diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/tests/test_api.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/tests/test_api.py
deleted file mode 100644
index 2a4bb41b016429d13debe94c67a76cc6112f154c..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/tests/test_api.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import unittest
-import requests
-from unittest.mock import MagicMock
-from gpt4free.quora.api import retry_request
-
-
-class TestRetryRequest(unittest.TestCase):
- def test_successful_request(self):
- # Mock a successful request with a 200 status code
- mock_response = MagicMock()
- mock_response.status_code = 200
- requests.get = MagicMock(return_value=mock_response)
-
- # Call the function and assert that it returns the response
- response = retry_request(requests.get, "http://example.com", max_attempts=3)
- self.assertEqual(response.status_code, 200)
-
- def test_exponential_backoff(self):
- # Mock a failed request that succeeds after two retries
- mock_response = MagicMock()
- mock_response.status_code = 200
- requests.get = MagicMock(side_effect=[requests.exceptions.RequestException] * 2 + [mock_response])
-
- # Call the function and assert that it retries with exponential backoff
- with self.assertLogs() as logs:
- response = retry_request(requests.get, "http://example.com", max_attempts=3, delay=1)
- self.assertEqual(response.status_code, 200)
- self.assertGreaterEqual(len(logs.output), 2)
- self.assertIn("Retrying in 1 seconds...", logs.output[0])
- self.assertIn("Retrying in 2 seconds...", logs.output[1])
-
- def test_too_many_attempts(self):
- # Mock a failed request that never succeeds
- requests.get = MagicMock(side_effect=requests.exceptions.RequestException)
-
- # Call the function and assert that it raises an exception after the maximum number of attempts
- with self.assertRaises(RuntimeError):
- retry_request(requests.get, "http://example.com", max_attempts=3)
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage How to Get the Most Out of It.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage How to Get the Most Out of It.md
deleted file mode 100644
index aab03fd505bf0c333f95d5c4322e307917bbcae7..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage How to Get the Most Out of It.md
+++ /dev/null
@@ -1,101 +0,0 @@
-
-
Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage: A Comprehensive Guide
-
Adobe Photoshop is the most popular and powerful image editing software in the world. It allows you to create, edit, and enhance photos, graphics, and designs with a variety of tools and features. Whether you are a professional designer, photographer, or hobbyist, Adobe Photoshop can help you achieve your creative vision.
-
In this article, we will introduce you to Adobe Photoshop CC 2014, the latest version of the software that was released in June 2014. We will explain what Adobe Photoshop CC 2014 is, what are its main features, how to install and activate it, and how to use it for design and photography. By the end of this article, you will have a better understanding of Adobe Photoshop CC 2014 and how to make the most of it.
-
Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguagel
Adobe Photoshop CC 2014 is the fourteenth major release of Adobe Photoshop, which is part of the Adobe Creative Cloud subscription service. It is also known as Adobe Photoshop 15 or Adobe Photoshop 2014. It is available for Windows and Mac OS X operating systems, and it supports both 32-bit and 64-bit architectures.
-
The main features of Adobe Photoshop CC 2014
-
Adobe Photoshop CC 2014 introduces several new features and enhancements that improve the performance, functionality, and usability of the software. Some of the most notable new features are:
-
-
Editing images directly in Adobe Camera Raw. This means that you no longer have to convert your raw files into Photoshop format before you can start editing them. Adobe Camera Raw is a powerful image editing tool that gives you complete control over your raw files.
-
Applying blur effects with Blur Gallery. This feature allows you to create realistic motion blur, spin blur, and path blur effects with ease. You can also use multiple blurs in one image and adjust them individually.
-
Enhancing typography with new controls and fonts. This feature gives you more options to customize your text, such as font size variations, font matching, smart quotes, hyphenation, and more. You can also access over 900 fonts from Typekit, a library of high-quality fonts that are integrated with Creative Cloud.
-
Creating and managing assets with Creative Cloud Libraries. This feature lets you create, categorize, and store your favorite colors, brushes, text styles, graphics, and vector images in one easily accessible place. Then you can access them anywhere: Assets you create under the same Adobe ID will be visible across different computers—in a variety of applications like Photoshop CC—wherever you sign in.
-
-
The system requirements for Adobe Photoshop CC 2014
-
To run Adobe Photoshop CC 2014 smoothly on your computer, you need to meet the following minimum system requirements:
-
-
-
Operating system
-
Processor
-
RAM
-
Hard disk space
-
Graphics card
-
-
-
Windows 7 SP1 or later (32-bit or 64-bit)
-
Intel Pentium 4 or AMD Athlon 64 processor (2 GHz or faster)
-
2 GB (8 GB recommended)
-
2 GB of available hard-disk space for installation; additional free space required during installation (cannot install on removable flash storage devices)
-
1024 x 768 display (1280 x 800 recommended) with OpenGL® 2.0–capable system
-
-
-
Mac OS X v10.7 or later (64-bit only)
-
Multicore Intel processor with 64-bit support
-
2 GB (8 GB recommended)
-
3.2 GB of available hard-disk space for installation; additional free space required during installation (cannot install on a volume that uses a case-sensitive file system or on removable flash storage devices)
-
1024 x 768 display (1280 x 800 recommended) with OpenGL® 2.0–capable system
-
-
-
How to install and activate Adobe Photoshop CC 2014?
-
To install and activate Adobe Photoshop CC 2014 on your computer, you need to follow these steps:
-
Adobe Photoshop CC 2014 crack download
-Adobe Photoshop CC 2014 multilingual portable
-Adobe Photoshop CC 2014 serial number
-Adobe Photoshop CC 2014 offline installer
-Adobe Photoshop CC 2014 free trial
-Adobe Photoshop CC 2014 full version
-Adobe Photoshop CC 2014 keygen
-Adobe Photoshop CC 2014 system requirements
-Adobe Photoshop CC 2014 tutorial
-Adobe Photoshop CC 2014 update
-Adobe Photoshop CC 2014 features
-Adobe Photoshop CC 2014 license key
-Adobe Photoshop CC 2014 activation code
-Adobe Photoshop CC 2014 patch
-Adobe Photoshop CC 2014 direct download link
-Adobe Photoshop CC 2014 torrent
-Adobe Photoshop CC 2014 mac
-Adobe Photoshop CC 2014 windows
-Adobe Photoshop CC 2014 x64 bit
-Adobe Photoshop CC 2014 x32 bit
-Adobe Photoshop CC 2014 latest version
-Adobe Photoshop CC 2014 review
-Adobe Photoshop CC 2014 tips and tricks
-Adobe Photoshop CC 2014 plugins
-Adobe Photoshop CC 2014 brushes
-Adobe Photoshop CC 2014 presets
-Adobe Photoshop CC 2014 filters
-Adobe Photoshop CC 2014 actions
-Adobe Photoshop CC 2014 fonts
-Adobe Photoshop CC 2014 tools
-Adobe Photoshop CC 2014 shortcuts
-Adobe Photoshop CC 2014 layers
-Adobe Photoshop CC 2014 masks
-Adobe Photoshop CC 2014 smart objects
-Adobe Photoshop CC 2014 adjustment layers
-Adobe Photoshop CC 2014 blending modes
-Adobe Photoshop CC 2014 selection tools
-Adobe Photoshop CC 2014 transform tools
-Adobe Photoshop CC 2014 crop tool
-Adobe Photoshop CC 2014 healing tools
-Adobe Photoshop CC 2014 clone stamp tool
-Adobe Photoshop CC 2014 pen tool
-Adobe Photoshop CC 2014 text tool
-Adobe Photoshop CC 2014 shape tool
-Adobe Photoshop CC 2014 gradient tool
-Adobe Photoshop CC 2014 paint bucket tool
-Adobe Photoshop CC 2014 eraser tool
-Adobe Photoshop CC 2014 dodge and burn tools
-Adobe Photoshop CC 2014 sponge tool
-
Downloading the setup files
-
You can download the setup files for Adobe Photoshop CC 2014 from the official website of Adobe or from other trusted sources online. Make sure you download the correct version for your operating system and architecture (32-bit or 64-bit). The setup files are usually compressed in ZIP or RAR format, so you need to extract them before installing.
-
Installing Adobe Photoshop CC 2014
-
To install Adobe Photoshop CC 2014 on your computer, you need to run the setup.exe file that you extracted from the downloaded file. Follow the instructions on the screen to complete the installation process. You may need to restart your computer after the installation is finished.
-
Activating Adobe Photoshop CC 2014 with a serial number or a patch
-
To activate Adobe Photoshop CC 2014 on your computer, you need to have a valid serial number or a patch that can bypass the activation process. A serial number is a unique code that identifies your license for using the software. A patch is a small program that modifies the original software code to remove the activation requirement.
-
You can obtain a serial number or a patch from various sources online, such as forums, blogs, or websites that offer cracked software. However, be careful when downloading these files as they may contain viruses or malware that can harm your computer. Also, using cracked software is illegal and unethical as it violates the terms and conditions of Adobe.
-
If you have a serial number for Adobe Photoshop CC 2014, you can enter it when prompted during the installation process or after launching the software for the first time. If you have a patch for Adobe Photoshop CC
0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/E Elio Le Story Tese Torrent.md b/spaces/1gistliPinn/ChatGPT4/Examples/E Elio Le Story Tese Torrent.md
deleted file mode 100644
index fd1b337e795a23e63573ed3fd0482a0f5ff547c7..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/E Elio Le Story Tese Torrent.md
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
E Elio Le Story Tese Torrent: How to Download Their Music for Free
-
-
Elio e le Storie Tese is an Italian comedy rock band that was formed in 1980. The band is known for their humorous and satirical lyrics, their eclectic musical style, and their live performances. The band has released 14 studio albums, 5 live albums, and several singles and compilations. Some of their most popular songs are "La terra dei cachi", "Mio cuggino", "Born to Be Abramo", and "La canzone mononota".
-
-
If you are a fan of Elio e le Storie Tese and you want to download their music for free, you might be tempted to use a torrent site or a file-sharing platform that hosts pirated copies of their albums. However, this is not a legal or safe way to get their music. You might be breaking the law, violating the rights of the band and their record label, exposing yourself to malware or viruses, or risking legal troubles or penalties.
The best way to download E Elio Le Story Tese Torrent legally and safely is to use a streaming service that offers their music in your preferred language and region. Some of the popular streaming platforms that have Elio e le Storie Tese in their library are:
-
-
-
Spotify: You can listen to Elio e le Storie Tese on Spotify for free with ads, or you can upgrade to Spotify Premium for ad-free listening, offline mode, and other features.
-
Apple Music: You can listen to Elio e le Storie Tese on Apple Music with a subscription fee, or you can try it for free for three months.
-
Deezer: You can listen to Elio e le Storie Tese on Deezer for free with ads, or you can upgrade to Deezer Premium for ad-free listening, offline mode, and other features.
-
YouTube Music: You can listen to Elio e le Storie Tese on YouTube Music for free with ads, or you can upgrade to YouTube Music Premium for ad-free listening, offline mode, and other features.
-
Amazon Music: You can listen to Elio e le Storie Tese on Amazon Music with a subscription fee, or you can try it for free for 30 days.
-
-
-
All these streaming services offer high-quality audio, as well as various payment options and customer support. However, they may not be available in all countries or regions, so you should check their availability and pricing before choosing one.
-
-
How to Download E Elio Le Story Tese Torrent Illegally
-
-
If you still want to download E Elio Le Story Tese Torrent illegally, you should be aware of the risks and consequences involved. Some of the notorious websites that offer Elio e le Storie Tese torrents are:
-
-
-
RuTracker.org: This is a Russian torrent site that has a large collection of music torrents, including Elio e le Storie Tese albums in FLAC format.
-
Marok.org: This is an Italian torrent site that has some videos of Elio e le Storie Tese live performances.
-
Archive.org: This is a digital library that hosts various media files, including some audio files of Elio e le Storie Tese songs.
-
Direct-Download.com: This is a file-sharing platform that has a link to download Elio e le Storie Tese discography in RAR format.
-
-
-
These websites claim to provide free and fast downloads of E Elio Le Story Tese Torrent files in various formats and resolutions. However, they are not authorized by the original creators or distributors of the music, and they violate the copyright laws and intellectual property rights of the music industry. Moreover, they are risky and unsafe to use, as they may contain malware or viruses that can infect your device or steal your personal information. They may also expose you to legal troubles or penalties if you are caught downloading or sharing pirated content.
-
-
Conclusion
-
-
Elio e le Storie Tese is a band that will appeal to fans of comedy rock and Italian music. If you want to download their music for free, you have several options online, but not all of them are legal or safe. The best way to download E Elio Le Story Tese Torrent legally and safely is to use a streaming service that offers their music in your preferred language and region. However, if you choose to download E Elio Le Story Tese Torrent illegally, you should be aware of the risks and consequences involved.
-
-
In this article, we have provided you with some information and tips on how to download E Elio Le Story Tese Torrent legally or illegally. We hope you have enjoyed reading this article and found it useful. Now go ahead and download E Elio Le Story Tese Torrent and enjoy their music!
-
-
Why Elio e le Storie Tese is a Unique Band
-
-
Elio e le Storie Tese is not just a comedy rock band, but also a cultural phenomenon in Italy. The band has been praised for their originality, creativity, and versatility. They have experimented with various genres and styles, such as pop, rock, jazz, funk, metal, classical, folk, rap, and more. They have also collaborated with many famous artists and personalities, such as Luciano Pavarotti, Ennio Morricone, Giorgio Moroder, Renato Zero, Jovanotti, and Fabio Fazio.
-
-
Elio e le Storie Tese is also known for their social and political satire, their parody of Italian stereotypes and clichés, and their criticism of the Italian society and media. The band has often used irony, sarcasm, absurdity, and nonsense to convey their messages and opinions. They have also created many fictional characters and alter egos, such as Rocco Tanica, Faso, Cesareo, Mangoni, Feiez, Elio Samaga Hukapan Kariyana Turu (the Sri Lankan version of Elio), and Il Complesso Misterioso (a fake band that competed in the Sanremo Music Festival).
-
-
Elio e le Storie Tese is a band that has influenced many other artists and comedians in Italy and abroad. They have also received many awards and recognitions for their music and career. They have been nominated for several MTV Europe Music Awards and Italian Music Awards. They have also won the Critics' Award at the Sanremo Music Festival twice (in 1996 and 2013). In 2016, they announced their farewell tour, which ended in 2018 with a final concert in Milan.
-
-
How to Support Elio e le Storie Tese
-
-
If you love Elio e le Storie Tese and you want to support them, you can do so in various ways. Here are some suggestions:
-
-
-
Buy their music: You can buy their albums, singles, compilations, or special editions from their official website or from online stores such as Amazon or iTunes.
-
Watch their videos: You can watch their music videos, live performances, interviews, documentaries, or sketches on their official YouTube channel or on other platforms such as Vimeo or Dailymotion.
-
Follow them on social media: You can follow them on Facebook, Twitter, Instagram, or other social networks to get updates on their news, events, projects, or personal lives.
-
Join their fan club: You can join their official fan club "Elii" to get access to exclusive content, merchandise, discounts, contests, or meet-and-greets.
-
Donate to their causes: You can donate to their charitable causes or initiatives that they support or promote. For example, you can donate to the Fondazione Umberto Veronesi (a foundation that supports scientific research on cancer), to the Emergency (a humanitarian organization that provides medical care to victims of war and poverty), or to the Lega del Filo d'Oro (an association that helps deafblind people).
-
-
-
By supporting Elio e le Storie Tese, you are not only showing your appreciation for their music and artistry but also contributing to their legacy and impact on the Italian culture and society.
-
How to Discover More About Elio e le Storie Tese
-
-
If you are curious about Elio e le Storie Tese and you want to discover more about their music and history, you can do so in various ways. Here are some suggestions:
-
-
-
Read their books: You can read their books that contain their lyrics, stories, anecdotes, illustrations, or photos. Some of their books are "Elio Samaga Hukapan Kariyana Turu", "Gli Occhi del Cuore", "Il Mistero dei Bulli", and "La Risposta è Nelle Stelle".
-
Watch their movies: You can watch their movies that feature their songs, sketches, or appearances. Some of their movies are "Tutti Gli Uomini del Deficiente", "La Febbre del Sabato Sera", "Fuga da Reuma Park", and "Boris Il Film".
-
Listen to their podcasts: You can listen to their podcasts that cover various topics, such as music, cinema, literature, or current affairs. Some of their podcasts are "Elio e le Storie Tese Show", "Elio e le Storie Tese Radio Show", and "Elio e le Storie Tese Podcast".
-
Visit their website: You can visit their official website that contains their news, biography, discography, tour dates, merchandise, or contacts.
-
Subscribe to their newsletter: You can subscribe to their official newsletter that will send you updates on their activities, projects, or offers.
-
-
-
By discovering more about Elio e le Storie Tese, you are not only enriching your knowledge and appreciation for their music and artistry but also joining their loyal and passionate fan community.
-
-
How to Share E Elio Le Story Tese Torrent with Others
-
-
If you love E Elio Le Story Tese Torrent and you want to share it with others, you can do so in various ways. Here are some suggestions:
-
-
-
Create a playlist: You can create a playlist of your favorite Elio e le Storie Tese songs and share it with your friends or family on social media or streaming platforms.
-
Write a review: You can write a review of your favorite Elio e le Storie Tese album or song and share it with other fans or music lovers on blogs or forums.
-
Make a tribute: You can make a tribute to Elio e le Storie Tese by covering their songs, making a fan art, writing a fan fiction, or cosplaying their characters.
-
Attend a concert: You can attend one of their live concerts and enjoy their music and performance with other fans. You can also take photos or videos and share them online.
-
Recommend them: You can recommend Elio e le Storie Tese to someone who might like their music or style. You can also introduce them to some of their songs or albums that suit their taste or mood.
-
-
-
By sharing E Elio Le Story Tese Torrent with others, you are not only spreading your love and enthusiasm for their music and artistry but also supporting their career and success.
-
Conclusion
-
-
E Elio Le Story Tese Torrent is a keyword that refers to the illegal and unsafe way of downloading the music of Elio e le Storie Tese, an Italian comedy rock band that has been entertaining audiences since 1980. The band is known for their witty and satirical lyrics, their eclectic musical style, and their energetic live performances. The band has released 14 studio albums, 5 live albums, and several singles and compilations.
-
-
In this article, we have provided you with some information and tips on how to download E Elio Le Story Tese Torrent legally and safely, how to discover more about Elio e le Storie Tese, and how to share their music with others. We hope you have enjoyed reading this article and found it useful. Now go ahead and enjoy E Elio Le Story Tese Torrent online!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download attack on titan mod APK for Android - Free and Easy.md b/spaces/1phancelerku/anime-remove-background/Download attack on titan mod APK for Android - Free and Easy.md
deleted file mode 100644
index 7cbab362d0d231d19654830830050385a0796eb7..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download attack on titan mod APK for Android - Free and Easy.md
+++ /dev/null
@@ -1,126 +0,0 @@
-
-
Attack on Titan Mod Free Download in APKPure
-
If you are a fan of the popular anime and manga series Attack on Titan, you might be interested in playing the game based on it. However, if you want to enjoy some extra features and enhancements, you might want to try the mod version of the game. In this article, we will show you how to download and install Attack on Titan mod free from APKPure, one of the best sources for Android apps and games.
Attack on Titan is a Japanese manga series written and illustrated by Hajime Isayama. It is set in a world where humanity lives inside cities surrounded by three enormous walls that protect them from gigantic man-eating humanoids referred to as Titans. The story follows Eren Yeager, who vows to exterminate the Titans after they bring about the destruction of his hometown and the death of his mother.
-
The manga series has been adapted into an anime television series, which has four seasons so far. The anime series has received critical acclaim and commercial success, winning several awards and becoming one of the best-selling manga series of all time.
-
The game based on the anime series is called Attack on Titan / A.O.T. Wings of Freedom. It is an action hack and slash game that lets you play as one of the beloved characters from the series. You can use the Three Dimensional Maneuver Gear to fly around and fight against the Titans. You can also discover the story from the anime, with some original twists, and experience the thrill of being in the anime.
-
attack on titan mod apk download for android
-attack on titan mod minecraft pe free download
-attack on titan mod apk unlimited money and gems
-attack on titan mod menu apk download latest version
-attack on titan mod for gta san andreas free download
-attack on titan mod apk offline no root
-attack on titan mod pack for minecraft java edition
-attack on titan mod apk rexdl
-attack on titan mod among us free download
-attack on titan mod apk obb highly compressed
-attack on titan mod for roblox free download
-attack on titan mod apk unlimited everything
-attack on titan mod for skyrim special edition
-attack on titan mod apk happymod
-attack on titan mod for gta 5 pc free download
-attack on titan mod apk revdl
-attack on titan mod for sims 4 free download
-attack on titan mod apk all characters unlocked
-attack on titan mod for fallout 4 xbox one
-attack on titan mod apk android 1
-attack on titan mod for terraria free download
-attack on titan mod apk god mode
-attack on titan mod for left 4 dead 2 free download
-attack on titan mod apk unlimited coins and diamonds
-attack on titan mod for stardew valley free download
-attack on titan mod apk no ads
-attack on titan mod for ark survival evolved free download
-attack on titan mod apk unlimited health and stamina
-attack on titan mod for subnautica free download
-attack on titan mod apk latest update
-attack on titan mod for starbound free download
-attack on titan mod apk no verification
-attack on titan mod for mount and blade warband free download
-attack on titan mod apk unlimited blades and gas
-attack on titan mod for rimworld free download
-attack on titan mod apk no human verification
-attack on titan mod for dragon age inquisition free download
-attack on titan mod apk unlimited skills and items
-attack on titan mod for xcom 2 free download
-attack on titan mod apk one hit kill
-attack on titan mod for witcher 3 free download
-attack on titan mod apk online multiplayer
-attack on titan mod for dark souls 3 free download
-attack on titan mod apk all episodes unlocked
-attack on titan mod for dying light free download
-attack on titan mod apk original version
-attack on titan mod for just cause 3 free download
-
Some of the main features and characters of the game are:
-
-
Play as Eren, Mikasa, Armin, Levi, Erwin, and more
-
Use various weapons and skills to defeat different types of Titans
-
Explore various locations from the anime, such as Shiganshina, Trost, Forest of Giant Trees, etc.
-
Enjoy stunning graphics and sound effects that match the anime style
-
Play solo or in cooperation with up to four players online
-
-
What is APKPure?
-
APKPure is a website that offers APK files for Android apps and games. APK stands for Android Package Kit, which is a file format that contains all the elements needed to install an app or game on your Android device. Normally, you would download apps and games from Google Play Store, which is the official source for Android apps. However, there are some reasons why you might want to use APKPure instead.
-
Some of the benefits of using APKPure are:
-
-
You can download apps and games that are not available in your country or region
-
You can download apps and games that are not compatible with your device or Android version
-
You can download apps and games that have been removed from Google Play Store
-
You can download older versions of apps and games that work better for you
-
You can download modded versions of apps and games that have extra features or enhancements
-
-
However, there are also some risks of using APKPure that you should be aware of:
-
-
You might download malicious or harmful files that can damage your device or steal your data
-
You might violate the terms and conditions of the app or game developers and face legal consequences
-
You might miss out on the updates and bug fixes that are provided by Google Play Store
-
You might encounter compatibility or performance issues with some apps or games
-
-
Therefore, you should always be careful and cautious when using APKPure or any other third-party source for Android apps and games. You should always check the ratings, reviews, and permissions of the files before downloading them. You should also scan the files with a reliable antivirus software before installing them. And you should always backup your data and device before trying any new app or game.
-
How to download and install Attack on Titan mod in APKPure?
-
If you want to try the mod version of Attack on Titan / A.O.T. Wings of Freedom, which has some extra features such as unlimited money, unlocked characters, and more, you can download it from APKPure website. Here are the steps to download and install Attack on Titan mod in APKPure:
-
-
Go to the APKPure website and search for Attack on Titan mod or click on this link: [Attack on Titan Mod APK 1.1.2.12 - Download Attack on Titan Mod for Android]
-
Click on the green Download APK button and wait for the file to be downloaded to your device
-
Once the file is downloaded, go to your device settings and enable the option to install apps from unknown sources. This will allow you to install apps that are not from Google Play Store
-
Locate the downloaded APK file in your device storage and tap on it to start the installation process
-
Follow the instructions on the screen and grant the necessary permissions to the app
-
Wait for the installation to finish and then launch the app from your app drawer or home screen
-
-
Congratulations! You have successfully downloaded and installed Attack on Titan mod in APKPure. You can now enjoy playing the game with some extra features and enhancements.
-
Conclusion
-
In this article, we have shown you how to download and install Attack on Titan mod free from APKPure, one of the best sources for Android apps and games. We have also explained what is Attack on Titan, what is APKPure, and what are the benefits and risks of using APKPure. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.
-
If you liked this article, please share it with your friends and family who might be interested in playing Attack on Titan mod. And if you want to read more articles like this, please subscribe to our newsletter or follow us on social media. Thank you for reading!
-
FAQs
-
What is an APK file and why do I need it?
-
An APK file is a file format that contains all the elements needed to install an app or game on your Android device. You need an APK file when you want to download an app or game that is not available in Google Play Store or that is not compatible with your device or Android version.
-
Is APKPure safe and reliable?
-
APKPure is one of the most popular and trusted websites that offers APK files for Android apps and games. It has millions of users and thousands of positive reviews. However, like any other third-party source, it also has some risks of downloading malicious or harmful files that can damage your device or steal your data. Therefore, you should always be careful and cautious when using APKPure or any other third-party source for Android apps and games.
-
What are the requirements and compatibility of Attack on Titan mod?
-
The requirements and compatibility of Attack on Titan mod are as follows:
-
-
The minimum Android version required is 4.0.3 (Ice Cream Sandwich) or higher
-
The minimum RAM required is 1 GB or higher
-
The minimum storage space required is 500 MB or higher
-
The app supports English, Japanese, Chinese, Korean, French, German, Spanish, Italian, Portuguese, Russian, Turkish, Arabic, Indonesian, Thai, Vietnamese languages
-
The app is compatible with most Android devices such as Samsung, Huawei, Xiaomi, LG, Sony, Motorola, etc.
-
-
What are the features and advantages of Attack on Titan mod?
-
The features and advantages of Attack on Titan mod are as follows:
-
-
You can get unlimited money to buy weapons, items, upgrades, etc.
-
You can unlock all the characters and skills to play as your favorite character
-
You can enjoy the game without any ads or interruptions
-
You can customize the game settings to suit your preferences and device performance
-
You can experience the game with some extra enhancements and improvements
-
-
How can I update or uninstall Attack on Titan mod?
-
If you want to update or uninstall Attack on Titan mod, you can follow these steps:
-
-
To update the app, you need to download the latest version of the APK file from APKPure website and install it over the existing app. You don't need to uninstall the previous version, but you should backup your data before updating
-
To uninstall the app, you need to go to your device settings and find the app in the list of installed apps. Then, you need to tap on the app and select the option to uninstall it. You should also delete the APK file from your device storage
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/txt_processors/zh.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/txt_processors/zh.py
deleted file mode 100644
index 4b26f6ba130822c2de9d0a0a91e0cb1a9e6d79f9..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/txt_processors/zh.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import re
-import jieba
-from pypinyin import pinyin, Style
-from text_to_speech.utils.text.text_norm import NSWNormalizer
-from text_to_speech.data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor, register_txt_processors
-from text_to_speech.utils.text.text_encoder import PUNCS, is_sil_phoneme
-
-ALL_SHENMU = ['zh', 'ch', 'sh', 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'j',
- 'q', 'x', 'r', 'z', 'c', 's', 'y', 'w']
-
-
-@register_txt_processors('zh')
-class TxtProcessor(BaseTxtProcessor):
- table = {ord(f): ord(t) for f, t in zip(
- u':,。!?【】()%#@&1234567890',
- u':,.!?[]()%#@&1234567890')}
-
- @staticmethod
- def sp_phonemes():
- return ['|', '#']
-
- @staticmethod
- def preprocess_text(text):
- text = text.translate(TxtProcessor.table)
- text = NSWNormalizer(text).normalize(remove_punc=False).lower()
- text = re.sub("[\'\"()]+", "", text)
- text = re.sub("[-]+", " ", text)
- text = re.sub(f"[^ A-Za-z\u4e00-\u9fff{PUNCS}]", "", text)
- text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> !
- text = re.sub(f"([{PUNCS}])", r" \1 ", text)
- text = re.sub(rf"\s+", r"", text)
- text = re.sub(rf"[A-Za-z]+", r"$", text)
- return text
-
- @classmethod
- def pinyin_with_en(cls, txt, style):
- x = pinyin(txt, style)
- x = [t[0] for t in x]
- x_ = []
- for t in x:
- if '$' not in t:
- x_.append(t)
- else:
- x_ += list(t)
- x_ = [t if t != '$' else 'ENG' for t in x_]
- return x_
-
- @classmethod
- def process(cls, txt, pre_align_args):
- txt = cls.preprocess_text(txt)
- txt = txt.replace("嗯", "蒽") # pypin会把嗯的声母韵母识别为'',导致ph2word出现错位。
- # https://blog.csdn.net/zhoulei124/article/details/89055403
-
- shengmu = cls.pinyin_with_en(txt, style=Style.INITIALS)
- yunmu = cls.pinyin_with_en(txt, style=
- Style.FINALS_TONE3 if pre_align_args['use_tone'] else Style.FINALS)
- assert len(shengmu) == len(yunmu)
- for i in range(len(shengmu)):
- if shengmu[i] == '' and yunmu[i] == '':
- print(f"发现了一个声母韵母都是空的文字:{txt[i]}")
- ph_list = []
- for a, b in zip(shengmu, yunmu):
- if a == b:
- ph_list += [a]
- else:
- ph_list += [a + "%" + b]
- seg_list = '#'.join(jieba.cut(txt))
- assert len(ph_list) == len([s for s in seg_list if s != '#']), (ph_list, seg_list)
-
- # 加入词边界'#'
- ph_list_ = []
- seg_idx = 0
- for p in ph_list:
- if seg_list[seg_idx] == '#':
- ph_list_.append('#')
- seg_idx += 1
- elif len(ph_list_) > 0:
- ph_list_.append("|")
- seg_idx += 1
- finished = False
- if not finished:
- ph_list_ += [x for x in p.split("%") if x != '']
-
- ph_list = ph_list_
-
- # 去除静音符号周围的词边界标记 [..., '#', ',', '#', ...]
- sil_phonemes = list(PUNCS) + TxtProcessor.sp_phonemes()
- ph_list_ = []
- for i in range(0, len(ph_list), 1):
- if ph_list[i] != '#' or (ph_list[i - 1] not in sil_phonemes and ph_list[i + 1] not in sil_phonemes):
- ph_list_.append(ph_list[i])
- ph_list = ph_list_
-
- txt_struct = [[w, []] for w in txt]
- i = 0
- for ph in ph_list:
- if ph == '|' or ph == '#':
- i += 1
- continue
- # elif ph in [',', '.']:
- elif ph in [',', '.', '?', '!', ':']:
- i += 1
- txt_struct[i][1].append(ph)
- i += 1
- continue
- txt_struct[i][1].append(ph)
- # return ph_list, txt
- txt_struct.insert(0, ['', ['']])
- txt_struct.append(['', ['']])
- return txt_struct, txt
-
-
-if __name__ == '__main__':
- # t = 'simon演唱过后,simon还进行了simon精彩的文艺演出simon.'
- t = '你当我傻啊?脑子那么大怎么塞进去???'
- phs, txt = TxtProcessor.process(t, {'use_tone': True})
- print(phs, txt)
diff --git a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/x_transformer.py b/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/x_transformer.py
deleted file mode 100644
index 5fc15bf9cfe0111a910e7de33d04ffdec3877576..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/x_transformer.py
+++ /dev/null
@@ -1,641 +0,0 @@
-"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers"""
-import torch
-from torch import nn, einsum
-import torch.nn.functional as F
-from functools import partial
-from inspect import isfunction
-from collections import namedtuple
-from einops import rearrange, repeat, reduce
-
-# constants
-
-DEFAULT_DIM_HEAD = 64
-
-Intermediates = namedtuple('Intermediates', [
- 'pre_softmax_attn',
- 'post_softmax_attn'
-])
-
-LayerIntermediates = namedtuple('Intermediates', [
- 'hiddens',
- 'attn_intermediates'
-])
-
-
-class AbsolutePositionalEmbedding(nn.Module):
- def __init__(self, dim, max_seq_len):
- super().__init__()
- self.emb = nn.Embedding(max_seq_len, dim)
- self.init_()
-
- def init_(self):
- nn.init.normal_(self.emb.weight, std=0.02)
-
- def forward(self, x):
- n = torch.arange(x.shape[1], device=x.device)
- return self.emb(n)[None, :, :]
-
-
-class FixedPositionalEmbedding(nn.Module):
- def __init__(self, dim):
- super().__init__()
- inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
- self.register_buffer('inv_freq', inv_freq)
-
- def forward(self, x, seq_dim=1, offset=0):
- t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset
- sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
- emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
- return emb[None, :, :]
-
-
-# helpers
-
-def exists(val):
- return val is not None
-
-
-def default(val, d):
- if exists(val):
- return val
- return d() if isfunction(d) else d
-
-
-def always(val):
- def inner(*args, **kwargs):
- return val
- return inner
-
-
-def not_equals(val):
- def inner(x):
- return x != val
- return inner
-
-
-def equals(val):
- def inner(x):
- return x == val
- return inner
-
-
-def max_neg_value(tensor):
- return -torch.finfo(tensor.dtype).max
-
-
-# keyword argument helpers
-
-def pick_and_pop(keys, d):
- values = list(map(lambda key: d.pop(key), keys))
- return dict(zip(keys, values))
-
-
-def group_dict_by_key(cond, d):
- return_val = [dict(), dict()]
- for key in d.keys():
- match = bool(cond(key))
- ind = int(not match)
- return_val[ind][key] = d[key]
- return (*return_val,)
-
-
-def string_begins_with(prefix, str):
- return str.startswith(prefix)
-
-
-def group_by_key_prefix(prefix, d):
- return group_dict_by_key(partial(string_begins_with, prefix), d)
-
-
-def groupby_prefix_and_trim(prefix, d):
- kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
- kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
- return kwargs_without_prefix, kwargs
-
-
-# classes
-class Scale(nn.Module):
- def __init__(self, value, fn):
- super().__init__()
- self.value = value
- self.fn = fn
-
- def forward(self, x, **kwargs):
- x, *rest = self.fn(x, **kwargs)
- return (x * self.value, *rest)
-
-
-class Rezero(nn.Module):
- def __init__(self, fn):
- super().__init__()
- self.fn = fn
- self.g = nn.Parameter(torch.zeros(1))
-
- def forward(self, x, **kwargs):
- x, *rest = self.fn(x, **kwargs)
- return (x * self.g, *rest)
-
-
-class ScaleNorm(nn.Module):
- def __init__(self, dim, eps=1e-5):
- super().__init__()
- self.scale = dim ** -0.5
- self.eps = eps
- self.g = nn.Parameter(torch.ones(1))
-
- def forward(self, x):
- norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
- return x / norm.clamp(min=self.eps) * self.g
-
-
-class RMSNorm(nn.Module):
- def __init__(self, dim, eps=1e-8):
- super().__init__()
- self.scale = dim ** -0.5
- self.eps = eps
- self.g = nn.Parameter(torch.ones(dim))
-
- def forward(self, x):
- norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
- return x / norm.clamp(min=self.eps) * self.g
-
-
-class Residual(nn.Module):
- def forward(self, x, residual):
- return x + residual
-
-
-class GRUGating(nn.Module):
- def __init__(self, dim):
- super().__init__()
- self.gru = nn.GRUCell(dim, dim)
-
- def forward(self, x, residual):
- gated_output = self.gru(
- rearrange(x, 'b n d -> (b n) d'),
- rearrange(residual, 'b n d -> (b n) d')
- )
-
- return gated_output.reshape_as(x)
-
-
-# feedforward
-
-class GEGLU(nn.Module):
- def __init__(self, dim_in, dim_out):
- super().__init__()
- self.proj = nn.Linear(dim_in, dim_out * 2)
-
- def forward(self, x):
- x, gate = self.proj(x).chunk(2, dim=-1)
- return x * F.gelu(gate)
-
-
-class FeedForward(nn.Module):
- def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
- super().__init__()
- inner_dim = int(dim * mult)
- dim_out = default(dim_out, dim)
- project_in = nn.Sequential(
- nn.Linear(dim, inner_dim),
- nn.GELU()
- ) if not glu else GEGLU(dim, inner_dim)
-
- self.net = nn.Sequential(
- project_in,
- nn.Dropout(dropout),
- nn.Linear(inner_dim, dim_out)
- )
-
- def forward(self, x):
- return self.net(x)
-
-
-# attention.
-class Attention(nn.Module):
- def __init__(
- self,
- dim,
- dim_head=DEFAULT_DIM_HEAD,
- heads=8,
- causal=False,
- mask=None,
- talking_heads=False,
- sparse_topk=None,
- use_entmax15=False,
- num_mem_kv=0,
- dropout=0.,
- on_attn=False
- ):
- super().__init__()
- if use_entmax15:
- raise NotImplementedError("Check out entmax activation instead of softmax activation!")
- self.scale = dim_head ** -0.5
- self.heads = heads
- self.causal = causal
- self.mask = mask
-
- inner_dim = dim_head * heads
-
- self.to_q = nn.Linear(dim, inner_dim, bias=False)
- self.to_k = nn.Linear(dim, inner_dim, bias=False)
- self.to_v = nn.Linear(dim, inner_dim, bias=False)
- self.dropout = nn.Dropout(dropout)
-
- # talking heads
- self.talking_heads = talking_heads
- if talking_heads:
- self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads))
- self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads))
-
- # explicit topk sparse attention
- self.sparse_topk = sparse_topk
-
- # entmax
- #self.attn_fn = entmax15 if use_entmax15 else F.softmax
- self.attn_fn = F.softmax
-
- # add memory key / values
- self.num_mem_kv = num_mem_kv
- if num_mem_kv > 0:
- self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
- self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
-
- # attention on attention
- self.attn_on_attn = on_attn
- self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim)
-
- def forward(
- self,
- x,
- context=None,
- mask=None,
- context_mask=None,
- rel_pos=None,
- sinusoidal_emb=None,
- prev_attn=None,
- mem=None
- ):
- b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device
- kv_input = default(context, x)
-
- q_input = x
- k_input = kv_input
- v_input = kv_input
-
- if exists(mem):
- k_input = torch.cat((mem, k_input), dim=-2)
- v_input = torch.cat((mem, v_input), dim=-2)
-
- if exists(sinusoidal_emb):
- # in shortformer, the query would start at a position offset depending on the past cached memory
- offset = k_input.shape[-2] - q_input.shape[-2]
- q_input = q_input + sinusoidal_emb(q_input, offset=offset)
- k_input = k_input + sinusoidal_emb(k_input)
-
- q = self.to_q(q_input)
- k = self.to_k(k_input)
- v = self.to_v(v_input)
-
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
-
- input_mask = None
- if any(map(exists, (mask, context_mask))):
- q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool())
- k_mask = q_mask if not exists(context) else context_mask
- k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool())
- q_mask = rearrange(q_mask, 'b i -> b () i ()')
- k_mask = rearrange(k_mask, 'b j -> b () () j')
- input_mask = q_mask * k_mask
-
- if self.num_mem_kv > 0:
- mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v))
- k = torch.cat((mem_k, k), dim=-2)
- v = torch.cat((mem_v, v), dim=-2)
- if exists(input_mask):
- input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True)
-
- dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
- mask_value = max_neg_value(dots)
-
- if exists(prev_attn):
- dots = dots + prev_attn
-
- pre_softmax_attn = dots
-
- if talking_heads:
- dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous()
-
- if exists(rel_pos):
- dots = rel_pos(dots)
-
- if exists(input_mask):
- dots.masked_fill_(~input_mask, mask_value)
- del input_mask
-
- if self.causal:
- i, j = dots.shape[-2:]
- r = torch.arange(i, device=device)
- mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j')
- mask = F.pad(mask, (j - i, 0), value=False)
- dots.masked_fill_(mask, mask_value)
- del mask
-
- if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
- top, _ = dots.topk(self.sparse_topk, dim=-1)
- vk = top[..., -1].unsqueeze(-1).expand_as(dots)
- mask = dots < vk
- dots.masked_fill_(mask, mask_value)
- del mask
-
- attn = self.attn_fn(dots, dim=-1)
- post_softmax_attn = attn
-
- attn = self.dropout(attn)
-
- if talking_heads:
- attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous()
-
- out = einsum('b h i j, b h j d -> b h i d', attn, v)
- out = rearrange(out, 'b h n d -> b n (h d)')
-
- intermediates = Intermediates(
- pre_softmax_attn=pre_softmax_attn,
- post_softmax_attn=post_softmax_attn
- )
-
- return self.to_out(out), intermediates
-
-
-class AttentionLayers(nn.Module):
- def __init__(
- self,
- dim,
- depth,
- heads=8,
- causal=False,
- cross_attend=False,
- only_cross=False,
- use_scalenorm=False,
- use_rmsnorm=False,
- use_rezero=False,
- rel_pos_num_buckets=32,
- rel_pos_max_distance=128,
- position_infused_attn=False,
- custom_layers=None,
- sandwich_coef=None,
- par_ratio=None,
- residual_attn=False,
- cross_residual_attn=False,
- macaron=False,
- pre_norm=True,
- gate_residual=False,
- **kwargs
- ):
- super().__init__()
- ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
- attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs)
-
- dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
-
- self.dim = dim
- self.depth = depth
- self.layers = nn.ModuleList([])
-
- self.has_pos_emb = position_infused_attn
- self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None
- self.rotary_pos_emb = always(None)
-
- assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
- self.rel_pos = None
-
- self.pre_norm = pre_norm
-
- self.residual_attn = residual_attn
- self.cross_residual_attn = cross_residual_attn
-
- norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
- norm_class = RMSNorm if use_rmsnorm else norm_class
- norm_fn = partial(norm_class, dim)
-
- norm_fn = nn.Identity if use_rezero else norm_fn
- branch_fn = Rezero if use_rezero else None
-
- if cross_attend and not only_cross:
- default_block = ('a', 'c', 'f')
- elif cross_attend and only_cross:
- default_block = ('c', 'f')
- else:
- default_block = ('a', 'f')
-
- if macaron:
- default_block = ('f',) + default_block
-
- if exists(custom_layers):
- layer_types = custom_layers
- elif exists(par_ratio):
- par_depth = depth * len(default_block)
- assert 1 < par_ratio <= par_depth, 'par ratio out of range'
- default_block = tuple(filter(not_equals('f'), default_block))
- par_attn = par_depth // par_ratio
- depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
- par_width = (depth_cut + depth_cut // par_attn) // par_attn
- assert len(default_block) <= par_width, 'default block is too large for par_ratio'
- par_block = default_block + ('f',) * (par_width - len(default_block))
- par_head = par_block * par_attn
- layer_types = par_head + ('f',) * (par_depth - len(par_head))
- elif exists(sandwich_coef):
- assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
- layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
- else:
- layer_types = default_block * depth
-
- self.layer_types = layer_types
- self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
-
- for layer_type in self.layer_types:
- if layer_type == 'a':
- layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs)
- elif layer_type == 'c':
- layer = Attention(dim, heads=heads, **attn_kwargs)
- elif layer_type == 'f':
- layer = FeedForward(dim, **ff_kwargs)
- layer = layer if not macaron else Scale(0.5, layer)
- else:
- raise Exception(f'invalid layer type {layer_type}')
-
- if isinstance(layer, Attention) and exists(branch_fn):
- layer = branch_fn(layer)
-
- if gate_residual:
- residual_fn = GRUGating(dim)
- else:
- residual_fn = Residual()
-
- self.layers.append(nn.ModuleList([
- norm_fn(),
- layer,
- residual_fn
- ]))
-
- def forward(
- self,
- x,
- context=None,
- mask=None,
- context_mask=None,
- mems=None,
- return_hiddens=False
- ):
- hiddens = []
- intermediates = []
- prev_attn = None
- prev_cross_attn = None
-
- mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
-
- for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)):
- is_last = ind == (len(self.layers) - 1)
-
- if layer_type == 'a':
- hiddens.append(x)
- layer_mem = mems.pop(0)
-
- residual = x
-
- if self.pre_norm:
- x = norm(x)
-
- if layer_type == 'a':
- out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos,
- prev_attn=prev_attn, mem=layer_mem)
- elif layer_type == 'c':
- out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn)
- elif layer_type == 'f':
- out = block(x)
-
- x = residual_fn(out, residual)
-
- if layer_type in ('a', 'c'):
- intermediates.append(inter)
-
- if layer_type == 'a' and self.residual_attn:
- prev_attn = inter.pre_softmax_attn
- elif layer_type == 'c' and self.cross_residual_attn:
- prev_cross_attn = inter.pre_softmax_attn
-
- if not self.pre_norm and not is_last:
- x = norm(x)
-
- if return_hiddens:
- intermediates = LayerIntermediates(
- hiddens=hiddens,
- attn_intermediates=intermediates
- )
-
- return x, intermediates
-
- return x
-
-
-class Encoder(AttentionLayers):
- def __init__(self, **kwargs):
- assert 'causal' not in kwargs, 'cannot set causality on encoder'
- super().__init__(causal=False, **kwargs)
-
-
-
-class TransformerWrapper(nn.Module):
- def __init__(
- self,
- *,
- num_tokens,
- max_seq_len,
- attn_layers,
- emb_dim=None,
- max_mem_len=0.,
- emb_dropout=0.,
- num_memory_tokens=None,
- tie_embedding=False,
- use_pos_emb=True
- ):
- super().__init__()
- assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
-
- dim = attn_layers.dim
- emb_dim = default(emb_dim, dim)
-
- self.max_seq_len = max_seq_len
- self.max_mem_len = max_mem_len
- self.num_tokens = num_tokens
-
- self.token_emb = nn.Embedding(num_tokens, emb_dim)
- self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if (
- use_pos_emb and not attn_layers.has_pos_emb) else always(0)
- self.emb_dropout = nn.Dropout(emb_dropout)
-
- self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
- self.attn_layers = attn_layers
- self.norm = nn.LayerNorm(dim)
-
- self.init_()
-
- self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
-
- # memory tokens (like [cls]) from Memory Transformers paper
- num_memory_tokens = default(num_memory_tokens, 0)
- self.num_memory_tokens = num_memory_tokens
- if num_memory_tokens > 0:
- self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
-
- # let funnel encoder know number of memory tokens, if specified
- if hasattr(attn_layers, 'num_memory_tokens'):
- attn_layers.num_memory_tokens = num_memory_tokens
-
- def init_(self):
- nn.init.normal_(self.token_emb.weight, std=0.02)
-
- def forward(
- self,
- x,
- return_embeddings=False,
- mask=None,
- return_mems=False,
- return_attn=False,
- mems=None,
- **kwargs
- ):
- b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens
- x = self.token_emb(x)
- x += self.pos_emb(x)
- x = self.emb_dropout(x)
-
- x = self.project_emb(x)
-
- if num_mem > 0:
- mem = repeat(self.memory_tokens, 'n d -> b n d', b=b)
- x = torch.cat((mem, x), dim=1)
-
- # auto-handle masking after appending memory tokens
- if exists(mask):
- mask = F.pad(mask, (num_mem, 0), value=True)
-
- x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs)
- x = self.norm(x)
-
- mem, x = x[:, :num_mem], x[:, num_mem:]
-
- out = self.to_logits(x) if not return_embeddings else x
-
- if return_mems:
- hiddens = intermediates.hiddens
- new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens
- new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
- return out, new_mems
-
- if return_attn:
- attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
- return out, attn_maps
-
- return out
-
diff --git a/spaces/AIGText/GlyphControl/annotator/render_images.py b/spaces/AIGText/GlyphControl/annotator/render_images.py
deleted file mode 100644
index 2028212504c526fb3e042177b7bbe4c6a4f8e287..0000000000000000000000000000000000000000
--- a/spaces/AIGText/GlyphControl/annotator/render_images.py
+++ /dev/null
@@ -1,95 +0,0 @@
-from PIL import Image, ImageFont, ImageDraw
-import random
-
-# resize height to image_height first, then shrink or pad to image_width
-def resize_and_pad_image(pil_image, image_size):
-
- if isinstance(image_size, (tuple, list)) and len(image_size) == 2:
- image_width, image_height = image_size
- elif isinstance(image_size, int):
- image_width = image_height = image_size
- else:
- raise ValueError(f"Image size should be int or list/tuple of int not {image_size}")
-
- while pil_image.size[1] >= 2 * image_height:
- pil_image = pil_image.resize(
- tuple(x // 2 for x in pil_image.size), resample=Image.BOX
- )
-
- scale = image_height / pil_image.size[1]
- pil_image = pil_image.resize(tuple(round(x * scale) for x in pil_image.size),resample=Image.BICUBIC)
-
- # shrink
- if pil_image.size[0] > image_width:
- pil_image = pil_image.resize((image_width, image_height),resample=Image.BICUBIC)
-
- # padding
- if pil_image.size[0] < image_width:
- img = Image.new(mode="RGB",size=(image_width,image_height), color="white")
- width, _ = pil_image.size
- img.paste(pil_image,((image_width - width)//2, 0))
- pil_image = img
-
- return pil_image
-
-def render_text_image_custom(image_size, bboxes, rendered_txt_values, num_rows_values, align = "center"):
- # aligns = ["center", "left", "right"]
- """Render text image based on the list of bbox called `bboxes`.
- Support font that can be choosed.
- """
- print(image_size, bboxes, rendered_txt_values, num_rows_values, align)
- background = Image.new("RGB", image_size, "white")
- font = ImageFont.truetype("calibri.ttf", encoding='utf-8', size=512)
-
- for text, bbox, num_rows in zip(rendered_txt_values, bboxes, num_rows_values):
-
- if len(text) == 0:
- continue
-
- text = text.strip()
- if num_rows != 1:
- word_tokens = text.split()
- num_tokens = len(word_tokens)
- index_list = range(1, num_tokens + 1)
- if num_tokens > num_rows:
- index_list = random.sample(index_list, num_rows)
- index_list.sort()
- line_list = []
- start_idx = 0
- for index in index_list:
- line_list.append(
- " ".join(word_tokens
- [start_idx: index]
- )
- )
- start_idx = index
- text = "\n".join(line_list)
-
- if 'ratio' not in bbox or bbox['ratio'] == 0 or bbox['ratio'] < 1e-4:
- image4ratio = Image.new("RGB", (512, 512), "white")
- draw = ImageDraw.Draw(image4ratio)
- _, _ , w, h = draw.textbbox(xy=(0,0),text = text, font=font)
- ratio = w / h
- else:
- ratio = bbox['ratio']
-
- width = int(bbox['width'] * image_size[1])
- height = int(width / ratio)
- top_left_x = int(bbox['top_left_x'] * image_size[0])
- top_left_y = int(bbox['top_left_y'] * image_size[1])
- yaw = bbox['yaw']
-
- text_image = Image.new("RGB", (512, 512), "white")
- draw = ImageDraw.Draw(text_image)
- x,y,w,h = draw.textbbox(xy=(0,0),text = text, font=font)
- text_image = Image.new("RGB", (w, h), "white")
- draw = ImageDraw.Draw(text_image)
- draw.text((-x/2,-y/2), text, "black", font=font, align=align)
- text_image = resize_and_pad_image(text_image, (width, height))
- text_image = text_image.rotate(angle=-yaw, expand=True, fillcolor="white")
- # image = Image.new("RGB", (w, h), "white")
- # draw = ImageDraw.Draw(image)
-
- background.paste(text_image, (top_left_x, top_left_y))
-
- return background
diff --git a/spaces/Abhilashvj/planogram-compliance/classify/train.py b/spaces/Abhilashvj/planogram-compliance/classify/train.py
deleted file mode 100644
index a9594203469bafd05604f684ac3e546d8a733926..0000000000000000000000000000000000000000
--- a/spaces/Abhilashvj/planogram-compliance/classify/train.py
+++ /dev/null
@@ -1,537 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-"""
-Train a YOLOv5 classifier model on a classification dataset
-
-Usage - Single-GPU training:
- $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224
-
-Usage - Multi-GPU DDP training:
- $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
-
-Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data'
-YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt
-Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html
-"""
-
-import argparse
-import os
-import subprocess
-import sys
-import time
-from copy import deepcopy
-from datetime import datetime
-from pathlib import Path
-
-import torch
-import torch.distributed as dist
-import torch.hub as hub
-import torch.optim.lr_scheduler as lr_scheduler
-import torchvision
-from torch.cuda import amp
-from tqdm import tqdm
-
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[1] # YOLOv5 root directory
-if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
-ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
-
-from classify import val as validate
-from models.experimental import attempt_load
-from models.yolo import ClassificationModel, DetectionModel
-from utils.dataloaders import create_classification_dataloader
-from utils.general import (
- DATASETS_DIR,
- LOGGER,
- TQDM_BAR_FORMAT,
- WorkingDirectory,
- check_git_info,
- check_git_status,
- check_requirements,
- colorstr,
- download,
- increment_path,
- init_seeds,
- print_args,
- yaml_save,
-)
-from utils.loggers import GenericLogger
-from utils.plots import imshow_cls
-from utils.torch_utils import (
- ModelEMA,
- model_info,
- reshape_classifier_output,
- select_device,
- smart_DDP,
- smart_optimizer,
- smartCrossEntropyLoss,
- torch_distributed_zero_first,
-)
-
-LOCAL_RANK = int(
- os.getenv("LOCAL_RANK", -1)
-) # https://pytorch.org/docs/stable/elastic/run.html
-RANK = int(os.getenv("RANK", -1))
-WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1))
-GIT_INFO = check_git_info()
-
-
-def train(opt, device):
- init_seeds(opt.seed + 1 + RANK, deterministic=True)
- save_dir, data, bs, epochs, nw, imgsz, pretrained = (
- opt.save_dir,
- Path(opt.data),
- opt.batch_size,
- opt.epochs,
- min(os.cpu_count() - 1, opt.workers),
- opt.imgsz,
- str(opt.pretrained).lower() == "true",
- )
- cuda = device.type != "cpu"
-
- # Directories
- wdir = save_dir / "weights"
- wdir.mkdir(parents=True, exist_ok=True) # make dir
- last, best = wdir / "last.pt", wdir / "best.pt"
-
- # Save run settings
- yaml_save(save_dir / "opt.yaml", vars(opt))
-
- # Logger
- logger = (
- GenericLogger(opt=opt, console_logger=LOGGER)
- if RANK in {-1, 0}
- else None
- )
-
- # Download Dataset
- with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
- data_dir = data if data.is_dir() else (DATASETS_DIR / data)
- if not data_dir.is_dir():
- LOGGER.info(
- f"\nDataset not found ⚠️, missing path {data_dir}, attempting download..."
- )
- t = time.time()
- if str(data) == "imagenet":
- subprocess.run(
- f"bash {ROOT / 'data/scripts/get_imagenet.sh'}",
- shell=True,
- check=True,
- )
- else:
- url = f"https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip"
- download(url, dir=data_dir.parent)
- s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n"
- LOGGER.info(s)
-
- # Dataloaders
- nc = len(
- [x for x in (data_dir / "train").glob("*") if x.is_dir()]
- ) # number of classes
- trainloader = create_classification_dataloader(
- path=data_dir / "train",
- imgsz=imgsz,
- batch_size=bs // WORLD_SIZE,
- augment=True,
- cache=opt.cache,
- rank=LOCAL_RANK,
- workers=nw,
- )
-
- test_dir = (
- data_dir / "test" if (data_dir / "test").exists() else data_dir / "val"
- ) # data/test or data/val
- if RANK in {-1, 0}:
- testloader = create_classification_dataloader(
- path=test_dir,
- imgsz=imgsz,
- batch_size=bs // WORLD_SIZE * 2,
- augment=False,
- cache=opt.cache,
- rank=-1,
- workers=nw,
- )
-
- # Model
- with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
- if Path(opt.model).is_file() or opt.model.endswith(".pt"):
- model = attempt_load(opt.model, device="cpu", fuse=False)
- elif (
- opt.model in torchvision.models.__dict__
- ): # TorchVision models i.e. resnet50, efficientnet_b0
- model = torchvision.models.__dict__[opt.model](
- weights="IMAGENET1K_V1" if pretrained else None
- )
- else:
- m = hub.list(
- "ultralytics/yolov5"
- ) # + hub.list('pytorch/vision') # models
- raise ModuleNotFoundError(
- f"--model {opt.model} not found. Available models are: \n"
- + "\n".join(m)
- )
- if isinstance(model, DetectionModel):
- LOGGER.warning(
- "WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'"
- )
- model = ClassificationModel(
- model=model, nc=nc, cutoff=opt.cutoff or 10
- ) # convert to classification model
- reshape_classifier_output(model, nc) # update class count
- for m in model.modules():
- if not pretrained and hasattr(m, "reset_parameters"):
- m.reset_parameters()
- if isinstance(m, torch.nn.Dropout) and opt.dropout is not None:
- m.p = opt.dropout # set dropout
- for p in model.parameters():
- p.requires_grad = True # for training
- model = model.to(device)
-
- # Info
- if RANK in {-1, 0}:
- model.names = trainloader.dataset.classes # attach class names
- model.transforms = (
- testloader.dataset.torch_transforms
- ) # attach inference transforms
- model_info(model)
- if opt.verbose:
- LOGGER.info(model)
- images, labels = next(iter(trainloader))
- file = imshow_cls(
- images[:25],
- labels[:25],
- names=model.names,
- f=save_dir / "train_images.jpg",
- )
- logger.log_images(file, name="Train Examples")
- logger.log_graph(model, imgsz) # log model
-
- # Optimizer
- optimizer = smart_optimizer(
- model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay
- )
-
- # Scheduler
- lrf = 0.01 # final lr (fraction of lr0)
- # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
- lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear
- scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
- # scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
- # final_div_factor=1 / 25 / lrf)
-
- # EMA
- ema = ModelEMA(model) if RANK in {-1, 0} else None
-
- # DDP mode
- if cuda and RANK != -1:
- model = smart_DDP(model)
-
- # Train
- t0 = time.time()
- criterion = smartCrossEntropyLoss(
- label_smoothing=opt.label_smoothing
- ) # loss function
- best_fitness = 0.0
- scaler = amp.GradScaler(enabled=cuda)
- val = test_dir.stem # 'val' or 'test'
- LOGGER.info(
- f"Image sizes {imgsz} train, {imgsz} test\n"
- f"Using {nw * WORLD_SIZE} dataloader workers\n"
- f"Logging results to {colorstr('bold', save_dir)}\n"
- f"Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n"
- f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}"
- )
- for epoch in range(epochs): # loop over the dataset multiple times
- tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness
- model.train()
- if RANK != -1:
- trainloader.sampler.set_epoch(epoch)
- pbar = enumerate(trainloader)
- if RANK in {-1, 0}:
- pbar = tqdm(
- enumerate(trainloader),
- total=len(trainloader),
- bar_format=TQDM_BAR_FORMAT,
- )
- for i, (images, labels) in pbar: # progress bar
- images, labels = images.to(device, non_blocking=True), labels.to(
- device
- )
-
- # Forward
- with amp.autocast(enabled=cuda): # stability issues when enabled
- loss = criterion(model(images), labels)
-
- # Backward
- scaler.scale(loss).backward()
-
- # Optimize
- scaler.unscale_(optimizer) # unscale gradients
- torch.nn.utils.clip_grad_norm_(
- model.parameters(), max_norm=10.0
- ) # clip gradients
- scaler.step(optimizer)
- scaler.update()
- optimizer.zero_grad()
- if ema:
- ema.update(model)
-
- if RANK in {-1, 0}:
- # Print
- tloss = (tloss * i + loss.item()) / (
- i + 1
- ) # update mean losses
- mem = "%.3gG" % (
- torch.cuda.memory_reserved() / 1e9
- if torch.cuda.is_available()
- else 0
- ) # (GB)
- pbar.desc = (
- f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}"
- + " " * 36
- )
-
- # Test
- if i == len(pbar) - 1: # last batch
- top1, top5, vloss = validate.run(
- model=ema.ema,
- dataloader=testloader,
- criterion=criterion,
- pbar=pbar,
- ) # test accuracy, loss
- fitness = top1 # define fitness as top1 accuracy
-
- # Scheduler
- scheduler.step()
-
- # Log metrics
- if RANK in {-1, 0}:
- # Best fitness
- if fitness > best_fitness:
- best_fitness = fitness
-
- # Log
- metrics = {
- "train/loss": tloss,
- f"{val}/loss": vloss,
- "metrics/accuracy_top1": top1,
- "metrics/accuracy_top5": top5,
- "lr/0": optimizer.param_groups[0]["lr"],
- } # learning rate
- logger.log_metrics(metrics, epoch)
-
- # Save model
- final_epoch = epoch + 1 == epochs
- if (not opt.nosave) or final_epoch:
- ckpt = {
- "epoch": epoch,
- "best_fitness": best_fitness,
- "model": deepcopy(
- ema.ema
- ).half(), # deepcopy(de_parallel(model)).half(),
- "ema": None, # deepcopy(ema.ema).half(),
- "updates": ema.updates,
- "optimizer": None, # optimizer.state_dict(),
- "opt": vars(opt),
- "git": GIT_INFO, # {remote, branch, commit} if a git repo
- "date": datetime.now().isoformat(),
- }
-
- # Save last, best and delete
- torch.save(ckpt, last)
- if best_fitness == fitness:
- torch.save(ckpt, best)
- del ckpt
-
- # Train complete
- if RANK in {-1, 0} and final_epoch:
- LOGGER.info(
- f"\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)"
- f"\nResults saved to {colorstr('bold', save_dir)}"
- f"\nPredict: python classify/predict.py --weights {best} --source im.jpg"
- f"\nValidate: python classify/val.py --weights {best} --data {data_dir}"
- f"\nExport: python export.py --weights {best} --include onnx"
- f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')"
- f"\nVisualize: https://netron.app\n"
- )
-
- # Plot examples
- images, labels = (
- x[:25] for x in next(iter(testloader))
- ) # first 25 images and labels
- pred = torch.max(ema.ema(images.to(device)), 1)[1]
- file = imshow_cls(
- images,
- labels,
- pred,
- model.names,
- verbose=False,
- f=save_dir / "test_images.jpg",
- )
-
- # Log results
- meta = {
- "epochs": epochs,
- "top1_acc": best_fitness,
- "date": datetime.now().isoformat(),
- }
- logger.log_images(
- file, name="Test Examples (true-predicted)", epoch=epoch
- )
- logger.log_model(best, epochs, metadata=meta)
-
-
-def parse_opt(known=False):
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--model",
- type=str,
- default="yolov5s-cls.pt",
- help="initial weights path",
- )
- parser.add_argument(
- "--data",
- type=str,
- default="imagenette160",
- help="cifar10, cifar100, mnist, imagenet, ...",
- )
- parser.add_argument(
- "--epochs", type=int, default=10, help="total training epochs"
- )
- parser.add_argument(
- "--batch-size",
- type=int,
- default=64,
- help="total batch size for all GPUs",
- )
- parser.add_argument(
- "--imgsz",
- "--img",
- "--img-size",
- type=int,
- default=224,
- help="train, val image size (pixels)",
- )
- parser.add_argument(
- "--nosave", action="store_true", help="only save final checkpoint"
- )
- parser.add_argument(
- "--cache",
- type=str,
- nargs="?",
- const="ram",
- help='--cache images in "ram" (default) or "disk"',
- )
- parser.add_argument(
- "--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu"
- )
- parser.add_argument(
- "--workers",
- type=int,
- default=8,
- help="max dataloader workers (per RANK in DDP mode)",
- )
- parser.add_argument(
- "--project",
- default=ROOT / "runs/train-cls",
- help="save to project/name",
- )
- parser.add_argument("--name", default="exp", help="save to project/name")
- parser.add_argument(
- "--exist-ok",
- action="store_true",
- help="existing project/name ok, do not increment",
- )
- parser.add_argument(
- "--pretrained",
- nargs="?",
- const=True,
- default=True,
- help="start from i.e. --pretrained False",
- )
- parser.add_argument(
- "--optimizer",
- choices=["SGD", "Adam", "AdamW", "RMSProp"],
- default="Adam",
- help="optimizer",
- )
- parser.add_argument(
- "--lr0", type=float, default=0.001, help="initial learning rate"
- )
- parser.add_argument(
- "--decay", type=float, default=5e-5, help="weight decay"
- )
- parser.add_argument(
- "--label-smoothing",
- type=float,
- default=0.1,
- help="Label smoothing epsilon",
- )
- parser.add_argument(
- "--cutoff",
- type=int,
- default=None,
- help="Model layer cutoff index for Classify() head",
- )
- parser.add_argument(
- "--dropout", type=float, default=None, help="Dropout (fraction)"
- )
- parser.add_argument("--verbose", action="store_true", help="Verbose mode")
- parser.add_argument(
- "--seed", type=int, default=0, help="Global training seed"
- )
- parser.add_argument(
- "--local_rank",
- type=int,
- default=-1,
- help="Automatic DDP Multi-GPU argument, do not modify",
- )
- return parser.parse_known_args()[0] if known else parser.parse_args()
-
-
-def main(opt):
- # Checks
- if RANK in {-1, 0}:
- print_args(vars(opt))
- check_git_status()
- check_requirements()
-
- # DDP mode
- device = select_device(opt.device, batch_size=opt.batch_size)
- if LOCAL_RANK != -1:
- assert (
- opt.batch_size != -1
- ), "AutoBatch is coming soon for classification, please pass a valid --batch-size"
- assert (
- opt.batch_size % WORLD_SIZE == 0
- ), f"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE"
- assert (
- torch.cuda.device_count() > LOCAL_RANK
- ), "insufficient CUDA devices for DDP command"
- torch.cuda.set_device(LOCAL_RANK)
- device = torch.device("cuda", LOCAL_RANK)
- dist.init_process_group(
- backend="nccl" if dist.is_nccl_available() else "gloo"
- )
-
- # Parameters
- opt.save_dir = increment_path(
- Path(opt.project) / opt.name, exist_ok=opt.exist_ok
- ) # increment run
-
- # Train
- train(opt, device)
-
-
-def run(**kwargs):
- # Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m')
- opt = parse_opt(True)
- for k, v in kwargs.items():
- setattr(opt, k, v)
- main(opt)
- return opt
-
-
-if __name__ == "__main__":
- opt = parse_opt()
- main(opt)
diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Equing.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Equing.py
deleted file mode 100644
index 794274f26a417b41ba487bcd113741c0bc61072e..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Equing.py
+++ /dev/null
@@ -1,81 +0,0 @@
-from __future__ import annotations
-
-import json
-from abc import ABC, abstractmethod
-
-import requests
-
-from ...typing import Any, CreateResult
-from ..base_provider import BaseProvider
-
-
-class Equing(BaseProvider):
- url: str = 'https://next.eqing.tech/'
- working = False
- supports_stream = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = False
-
- @staticmethod
- @abstractmethod
- def create_completion(
- model: str,
- messages: list[dict[str, str]],
- stream: bool, **kwargs: Any) -> CreateResult:
-
- headers = {
- 'authority' : 'next.eqing.tech',
- 'accept' : 'text/event-stream',
- 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control' : 'no-cache',
- 'content-type' : 'application/json',
- 'origin' : 'https://next.eqing.tech',
- 'plugins' : '0',
- 'pragma' : 'no-cache',
- 'referer' : 'https://next.eqing.tech/',
- 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
- 'sec-ch-ua-mobile' : '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest' : 'empty',
- 'sec-fetch-mode' : 'cors',
- 'sec-fetch-site' : 'same-origin',
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
- 'usesearch' : 'false',
- 'x-requested-with' : 'XMLHttpRequest'
- }
-
- json_data = {
- 'messages' : messages,
- 'stream' : stream,
- 'model' : model,
- 'temperature' : kwargs.get('temperature', 0.5),
- 'presence_penalty' : kwargs.get('presence_penalty', 0),
- 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
- 'top_p' : kwargs.get('top_p', 1),
- }
-
- response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
- headers=headers, json=json_data, stream=stream)
-
- if not stream:
- yield response.json()["choices"][0]["message"]["content"]
- return
-
- for line in response.iter_content(chunk_size=1024):
- if line:
- if b'content' in line:
- line_json = json.loads(line.decode('utf-8').split('data: ')[1])
- token = line_json['choices'][0]['delta'].get('content')
- if token:
- yield token
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file
diff --git a/spaces/Aditya9790/yolo7-object-tracking/models/experimental.py b/spaces/Aditya9790/yolo7-object-tracking/models/experimental.py
deleted file mode 100644
index 735d7aa0ebe7dbf3c4b062ebc3858cb5f9ebab40..0000000000000000000000000000000000000000
--- a/spaces/Aditya9790/yolo7-object-tracking/models/experimental.py
+++ /dev/null
@@ -1,272 +0,0 @@
-import numpy as np
-import random
-import torch
-import torch.nn as nn
-
-from models.common import Conv, DWConv
-from utils.google_utils import attempt_download
-
-
-class CrossConv(nn.Module):
- # Cross Convolution Downsample
- def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
- # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
- super(CrossConv, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, (1, k), (1, s))
- self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
- self.add = shortcut and c1 == c2
-
- def forward(self, x):
- return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
-
-
-class Sum(nn.Module):
- # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
- def __init__(self, n, weight=False): # n: number of inputs
- super(Sum, self).__init__()
- self.weight = weight # apply weights boolean
- self.iter = range(n - 1) # iter object
- if weight:
- self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
-
- def forward(self, x):
- y = x[0] # no weight
- if self.weight:
- w = torch.sigmoid(self.w) * 2
- for i in self.iter:
- y = y + x[i + 1] * w[i]
- else:
- for i in self.iter:
- y = y + x[i + 1]
- return y
-
-
-class MixConv2d(nn.Module):
- # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
- def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
- super(MixConv2d, self).__init__()
- groups = len(k)
- if equal_ch: # equal c_ per group
- i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
- c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
- else: # equal weight.numel() per group
- b = [c2] + [0] * groups
- a = np.eye(groups + 1, groups, k=-1)
- a -= np.roll(a, 1, axis=1)
- a *= np.array(k) ** 2
- a[0] = 1
- c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
-
- self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
- self.bn = nn.BatchNorm2d(c2)
- self.act = nn.LeakyReLU(0.1, inplace=True)
-
- def forward(self, x):
- return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
-
-
-class Ensemble(nn.ModuleList):
- # Ensemble of models
- def __init__(self):
- super(Ensemble, self).__init__()
-
- def forward(self, x, augment=False):
- y = []
- for module in self:
- y.append(module(x, augment)[0])
- # y = torch.stack(y).max(0)[0] # max ensemble
- # y = torch.stack(y).mean(0) # mean ensemble
- y = torch.cat(y, 1) # nms ensemble
- return y, None # inference, train output
-
-
-
-
-
-class ORT_NMS(torch.autograd.Function):
- '''ONNX-Runtime NMS operation'''
- @staticmethod
- def forward(ctx,
- boxes,
- scores,
- max_output_boxes_per_class=torch.tensor([100]),
- iou_threshold=torch.tensor([0.45]),
- score_threshold=torch.tensor([0.25])):
- device = boxes.device
- batch = scores.shape[0]
- num_det = random.randint(0, 100)
- batches = torch.randint(0, batch, (num_det,)).sort()[0].to(device)
- idxs = torch.arange(100, 100 + num_det).to(device)
- zeros = torch.zeros((num_det,), dtype=torch.int64).to(device)
- selected_indices = torch.cat([batches[None], zeros[None], idxs[None]], 0).T.contiguous()
- selected_indices = selected_indices.to(torch.int64)
- return selected_indices
-
- @staticmethod
- def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold):
- return g.op("NonMaxSuppression", boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold)
-
-
-class TRT_NMS(torch.autograd.Function):
- '''TensorRT NMS operation'''
- @staticmethod
- def forward(
- ctx,
- boxes,
- scores,
- background_class=-1,
- box_coding=1,
- iou_threshold=0.45,
- max_output_boxes=100,
- plugin_version="1",
- score_activation=0,
- score_threshold=0.25,
- ):
- batch_size, num_boxes, num_classes = scores.shape
- num_det = torch.randint(0, max_output_boxes, (batch_size, 1), dtype=torch.int32)
- det_boxes = torch.randn(batch_size, max_output_boxes, 4)
- det_scores = torch.randn(batch_size, max_output_boxes)
- det_classes = torch.randint(0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32)
- return num_det, det_boxes, det_scores, det_classes
-
- @staticmethod
- def symbolic(g,
- boxes,
- scores,
- background_class=-1,
- box_coding=1,
- iou_threshold=0.45,
- max_output_boxes=100,
- plugin_version="1",
- score_activation=0,
- score_threshold=0.25):
- out = g.op("TRT::EfficientNMS_TRT",
- boxes,
- scores,
- background_class_i=background_class,
- box_coding_i=box_coding,
- iou_threshold_f=iou_threshold,
- max_output_boxes_i=max_output_boxes,
- plugin_version_s=plugin_version,
- score_activation_i=score_activation,
- score_threshold_f=score_threshold,
- outputs=4)
- nums, boxes, scores, classes = out
- return nums, boxes, scores, classes
-
-
-class ONNX_ORT(nn.Module):
- '''onnx module with ONNX-Runtime NMS operation.'''
- def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=640, device=None, n_classes=80):
- super().__init__()
- self.device = device if device else torch.device("cpu")
- self.max_obj = torch.tensor([max_obj]).to(device)
- self.iou_threshold = torch.tensor([iou_thres]).to(device)
- self.score_threshold = torch.tensor([score_thres]).to(device)
- self.max_wh = max_wh # if max_wh != 0 : non-agnostic else : agnostic
- self.convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]],
- dtype=torch.float32,
- device=self.device)
- self.n_classes=n_classes
-
- def forward(self, x):
- boxes = x[:, :, :4]
- conf = x[:, :, 4:5]
- scores = x[:, :, 5:]
- if self.n_classes == 1:
- scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5,
- # so there is no need to multiplicate.
- else:
- scores *= conf # conf = obj_conf * cls_conf
- boxes @= self.convert_matrix
- max_score, category_id = scores.max(2, keepdim=True)
- dis = category_id.float() * self.max_wh
- nmsbox = boxes + dis
- max_score_tp = max_score.transpose(1, 2).contiguous()
- selected_indices = ORT_NMS.apply(nmsbox, max_score_tp, self.max_obj, self.iou_threshold, self.score_threshold)
- X, Y = selected_indices[:, 0], selected_indices[:, 2]
- selected_boxes = boxes[X, Y, :]
- selected_categories = category_id[X, Y, :].float()
- selected_scores = max_score[X, Y, :]
- X = X.unsqueeze(1).float()
- return torch.cat([X, selected_boxes, selected_categories, selected_scores], 1)
-
-class ONNX_TRT(nn.Module):
- '''onnx module with TensorRT NMS operation.'''
- def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None ,device=None, n_classes=80):
- super().__init__()
- assert max_wh is None
- self.device = device if device else torch.device('cpu')
- self.background_class = -1,
- self.box_coding = 1,
- self.iou_threshold = iou_thres
- self.max_obj = max_obj
- self.plugin_version = '1'
- self.score_activation = 0
- self.score_threshold = score_thres
- self.n_classes=n_classes
-
- def forward(self, x):
- boxes = x[:, :, :4]
- conf = x[:, :, 4:5]
- scores = x[:, :, 5:]
- if self.n_classes == 1:
- scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5,
- # so there is no need to multiplicate.
- else:
- scores *= conf # conf = obj_conf * cls_conf
- num_det, det_boxes, det_scores, det_classes = TRT_NMS.apply(boxes, scores, self.background_class, self.box_coding,
- self.iou_threshold, self.max_obj,
- self.plugin_version, self.score_activation,
- self.score_threshold)
- return num_det, det_boxes, det_scores, det_classes
-
-
-class End2End(nn.Module):
- '''export onnx or tensorrt model with NMS operation.'''
- def __init__(self, model, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None, device=None, n_classes=80):
- super().__init__()
- device = device if device else torch.device('cpu')
- assert isinstance(max_wh,(int)) or max_wh is None
- self.model = model.to(device)
- self.model.model[-1].end2end = True
- self.patch_model = ONNX_TRT if max_wh is None else ONNX_ORT
- self.end2end = self.patch_model(max_obj, iou_thres, score_thres, max_wh, device, n_classes)
- self.end2end.eval()
-
- def forward(self, x):
- x = self.model(x)
- x = self.end2end(x)
- return x
-
-
-
-
-
-def attempt_load(weights, map_location=None):
- # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
- model = Ensemble()
- for w in weights if isinstance(weights, list) else [weights]:
- attempt_download(w)
- ckpt = torch.load(w, map_location=map_location) # load
- model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
-
- # Compatibility updates
- for m in model.modules():
- if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
- m.inplace = True # pytorch 1.7.0 compatibility
- elif type(m) is nn.Upsample:
- m.recompute_scale_factor = None # torch 1.11.0 compatibility
- elif type(m) is Conv:
- m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
-
- if len(model) == 1:
- return model[-1] # return model
- else:
- print('Ensemble created with %s\n' % weights)
- for k in ['names', 'stride']:
- setattr(model, k, getattr(model[-1], k))
- return model # return ensemble
-
-
diff --git a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/ngu_dialect.py b/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/ngu_dialect.py
deleted file mode 100644
index ce3e12bbf0469426872eed5f681985d3e1be9b26..0000000000000000000000000000000000000000
--- a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/ngu_dialect.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import re
-import opencc
-
-
-dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou',
- 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing',
- 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang',
- 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan',
- 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen',
- 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'}
-
-converters = {}
-
-for dialect in dialects.values():
- try:
- converters[dialect] = opencc.OpenCC(dialect)
- except:
- pass
-
-
-def ngu_dialect_to_ipa(text, dialect):
- dialect = dialects[dialect]
- text = converters[dialect].convert(text).replace('-','').replace('$',' ')
- text = re.sub(r'[、;:]', ',', text)
- text = re.sub(r'\s*,\s*', ', ', text)
- text = re.sub(r'\s*。\s*', '. ', text)
- text = re.sub(r'\s*?\s*', '? ', text)
- text = re.sub(r'\s*!\s*', '! ', text)
- text = re.sub(r'\s*$', '', text)
- return text
diff --git a/spaces/AlexWang/lama/bin/predict_inner_features.py b/spaces/AlexWang/lama/bin/predict_inner_features.py
deleted file mode 100644
index 4f9f7a11a6c4757a4eaa05cf1ac648d372f7e02f..0000000000000000000000000000000000000000
--- a/spaces/AlexWang/lama/bin/predict_inner_features.py
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/env python3
-
-# Example command:
-# ./bin/predict.py \
-# model.path= \
-# indir= \
-# outdir=
-
-import logging
-import os
-import sys
-import traceback
-
-from saicinpainting.evaluation.utils import move_to_device
-
-os.environ['OMP_NUM_THREADS'] = '1'
-os.environ['OPENBLAS_NUM_THREADS'] = '1'
-os.environ['MKL_NUM_THREADS'] = '1'
-os.environ['VECLIB_MAXIMUM_THREADS'] = '1'
-os.environ['NUMEXPR_NUM_THREADS'] = '1'
-
-import cv2
-import hydra
-import numpy as np
-import torch
-import tqdm
-import yaml
-from omegaconf import OmegaConf
-from torch.utils.data._utils.collate import default_collate
-
-from saicinpainting.training.data.datasets import make_default_val_dataset
-from saicinpainting.training.trainers import load_checkpoint, DefaultInpaintingTrainingModule
-from saicinpainting.utils import register_debug_signal_handlers, get_shape
-
-LOGGER = logging.getLogger(__name__)
-
-
-@hydra.main(config_path='../configs/prediction', config_name='default_inner_features.yaml')
-def main(predict_config: OmegaConf):
- try:
- register_debug_signal_handlers() # kill -10 will result in traceback dumped into log
-
- device = torch.device(predict_config.device)
-
- train_config_path = os.path.join(predict_config.model.path, 'config.yaml')
- with open(train_config_path, 'r') as f:
- train_config = OmegaConf.create(yaml.safe_load(f))
-
- checkpoint_path = os.path.join(predict_config.model.path, 'models', predict_config.model.checkpoint)
- model = load_checkpoint(train_config, checkpoint_path, strict=False)
- model.freeze()
- model.to(device)
-
- assert isinstance(model, DefaultInpaintingTrainingModule), 'Only DefaultInpaintingTrainingModule is supported'
- assert isinstance(getattr(model.generator, 'model', None), torch.nn.Sequential)
-
- if not predict_config.indir.endswith('/'):
- predict_config.indir += '/'
-
- dataset = make_default_val_dataset(predict_config.indir, **predict_config.dataset)
-
- max_level = max(predict_config.levels)
-
- with torch.no_grad():
- for img_i in tqdm.trange(len(dataset)):
- mask_fname = dataset.mask_filenames[img_i]
- cur_out_fname = os.path.join(predict_config.outdir, os.path.splitext(mask_fname[len(predict_config.indir):])[0])
- os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True)
-
- batch = move_to_device(default_collate([dataset[img_i]]), device)
-
- img = batch['image']
- mask = batch['mask']
- mask[:] = 0
- mask_h, mask_w = mask.shape[-2:]
- mask[:, :,
- mask_h // 2 - predict_config.hole_radius : mask_h // 2 + predict_config.hole_radius,
- mask_w // 2 - predict_config.hole_radius : mask_w // 2 + predict_config.hole_radius] = 1
-
- masked_img = torch.cat([img * (1 - mask), mask], dim=1)
-
- feats = masked_img
- for level_i, level in enumerate(model.generator.model):
- feats = level(feats)
- if level_i in predict_config.levels:
- cur_feats = torch.cat([f for f in feats if torch.is_tensor(f)], dim=1) \
- if isinstance(feats, tuple) else feats
-
- if predict_config.slice_channels:
- cur_feats = cur_feats[:, slice(*predict_config.slice_channels)]
-
- cur_feat = cur_feats.pow(2).mean(1).pow(0.5).clone()
- cur_feat -= cur_feat.min()
- cur_feat /= cur_feat.std()
- cur_feat = cur_feat.clamp(0, 1) / 1
- cur_feat = cur_feat.cpu().numpy()[0]
- cur_feat *= 255
- cur_feat = np.clip(cur_feat, 0, 255).astype('uint8')
- cv2.imwrite(cur_out_fname + f'_lev{level_i:02d}_norm.png', cur_feat)
-
- # for channel_i in predict_config.channels:
- #
- # cur_feat = cur_feats[0, channel_i].clone().detach().cpu().numpy()
- # cur_feat -= cur_feat.min()
- # cur_feat /= cur_feat.max()
- # cur_feat *= 255
- # cur_feat = np.clip(cur_feat, 0, 255).astype('uint8')
- # cv2.imwrite(cur_out_fname + f'_lev{level_i}_ch{channel_i}.png', cur_feat)
- elif level_i >= max_level:
- break
- except KeyboardInterrupt:
- LOGGER.warning('Interrupted by user')
- except Exception as ex:
- LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}')
- sys.exit(1)
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/Ame42/rwms/local_utils.py b/spaces/Ame42/rwms/local_utils.py
deleted file mode 100644
index 4de37df85c5c9aae3a1d8f786631e40cee878e5e..0000000000000000000000000000000000000000
--- a/spaces/Ame42/rwms/local_utils.py
+++ /dev/null
@@ -1,344 +0,0 @@
-import math
-import re
-import numpy
-import pandas
-from sklearn.ensemble import RandomForestRegressor
-from sklearn.tree import export_graphviz
-import pickle as pkl
-
-l2 = "2L"
-l1 = "1L"
-s2 = "2S"
-s1 = "1S"
-date_time_col = "Date Time (GMT+01)"
-time_col = "Time (GMT+01)"
-dur_col = "Daylight duration (SEC)"
-date_col = "Date"
-id_col = "id"
-well_col = "Well index"
-blind_col = "THP BLIND (PSI)"
-temp_col = "TEMP (°F)"
-flp_col = "FLP (PSI)"
-ro_col = "THP R/O (PSI)"
-man_col = "Manifold Pressure (PSI)"
-sim_col = f'Predicted {ro_col}'
-ql_col = 'Liquid production (BBL/D)'
-out_folder = "output/"
-well_key = "wellhead"
-flow_key = "flowstation"
-
-model_file = "rf-AWNW"
-scaler_file = "ss-AWNW"
-
-day_mode = '22-11-2020'
-all_mode = 'All'
-train_mode = 'Train'
-test_mode = 'Test'
-
-
-def round_to_n(x, n):
- x = x if x % 10 != 5 else x + 1
- n = n if x > 9 else n - 1
- return x if x == 0 else round(x, -int(math.floor(math.log10(abs(x)))) + (n - 1))
-
-
-def to_sec(h, m, s):
- return (int(h) * 60 * 60) + (int(m) * 60) + int(s)
-
-
-def from_sec(t):
- return f"{t // (60 * 60):0>2}:{(t % (60 * 60)) // 60:0>2}:{(t % (60 * 60)) % 60:0>2}"
-
-
-def column_matcher(title):
- if re.search("#", string=title) is not None:
- found = id_col
- elif re.search(".*(Date|DATE).*(Time|TIME).*GMT.*", string=title) is not None:
- found = date_time_col
- elif re.search("THP.*R/O.*(PSI|units)", string=title) is not None:
- found = ro_col
- elif re.search(".*TEMP.*(F|units)", string=title) is not None:
- found = temp_col
- elif re.search(".*FLP.*(PSI|units)", string=title) is not None:
- found = flp_col
- elif re.search("THP.*BLIND.*(PSI|units)", string=title) is not None:
- found = blind_col
- elif re.search("THP.*(PSI|units)", string=title) is not None:
- found = blind_col
- elif re.search(".*1S.*PSI.*", string=title) is not None:
- found = s1
- elif re.search(".*2S.*PSI.*", string=title) is not None:
- found = s2
- elif re.search(".*1L.*PSI.*", string=title) is not None:
- found = l1
- elif re.search(".*2L.*PSI.*", string=title) is not None:
- found = l2
- else:
- found = False
-
- return found
-
-
-def file_matcher(name: str):
- if re.search("\\d+-\\d+-\\d+.*flow.*man.*", string=name.lower()) is not None:
- flowstation = True
- else:
- flowstation = False
-
- return flowstation
-
-
-def file_matcher2(name: str):
- if re.search(".*1s.*", string=name.lower()) is not None:
- well = s1
- elif re.search(".*1l.*", string=name.lower()) is not None:
- well = l1
- elif re.search(".*2s.*", string=name.lower()) is not None:
- well = s2
- else:
- well = l2
-
- return well
-
-
-def restructure(data, count, duration, times, dates):
- for datetime in data[date_time_col]:
- try:
- date_time = re.sub("\\.0(?=\\s)", "", datetime)
- datetime_array = date_time.split()
- date = datetime_array[0].split("/")
-
- time_array = datetime_array[1].split(":")
-
- if datetime_array[2] == "PM" and time_array[0] != "12":
- hour = int(time_array[0]) + 12
- elif datetime_array[2] == "AM" and time_array[0] == "12":
- hour = int(time_array[0]) - 12
- else:
- hour = time_array[0]
-
- minutes = time_array[1]
- sec = round_to_n(int(time_array[2]), 1)
-
- if sec == 60:
- sec = "00"
- minutes = int(minutes) + 1
-
- if minutes == 60:
- minutes = "00"
- hour = int(hour) + 1
-
- if hour == 24:
- hour = "00"
- date[1] = int(date[1]) + 1
-
- duration.append(to_sec(hour, minutes, sec))
- times.append(f"{hour}:{minutes}:{sec}")
- dates.append(f"{date[1]}/{date[0]}/{date[2]}")
- date_time = f"{date[1]}/{date[0]}/{date[2]} {datetime_array[1]} {datetime_array[2]}"
-
- data.loc[count, date_time_col] = date_time
- count += 1
- except IndexError:
- print(f"\n\n{datetime}", flush=True)
- raise
-
- data.insert(1, dur_col, numpy.array(duration), True)
- data.insert(2, time_col, numpy.array(times), True)
- data.insert(3, date_col, numpy.array(dates), True)
- return data.drop(axis=1, columns="index", errors='ignore')
-
-
-def try_key(temp, key):
- try:
- temp[f"{key}"]
- except KeyError:
- temp[f"{key}"] = dict()
-
-
-def find_data(index, wlhd):
- for w in wlhd:
- if index == w[0]:
- return w[1]
-
- return None
-
-
-def split_join(flowstation: pandas.DataFrame, wellhead: pandas.DataFrame, offset):
- joined = []
- info = [s1, l1, s2, l2]
- for i, o in zip(info, offset):
- # print(f'\n\nNow working on {i} column\n')
- data = flowstation.drop(flowstation.columns.difference([i, 'Daylight duration (SEC)']),
- axis=1)
- data.rename(columns={i: man_col}, inplace=True)
- data.insert(2, well_col, [i for _ in range(data.shape[0])], True)
-
- # print(f"{data.shape[0]} rows before drop and merge")
- data_well = find_data(i, wellhead)
- if data_well is not None:
- data_well.drop_duplicates(inplace=True, subset=[time_col])
- data = data.merge(data_well, how='inner', on=[dur_col])
-
- # print(f"{data.shape[0]} rows after drop and merge")
- # offset the rows by the required amount 'o'
- data_y = data.drop(data.columns.difference([ro_col, id_col]), axis=1, errors="ignore").iloc[o:]
- data_x = data.drop(columns=[ro_col], axis=1, errors="ignore").iloc[:(data.shape[0] - 1 - o)]
- data_y.reset_index(inplace=True)
- data_x.reset_index(inplace=True)
- data_y.drop(columns=["index"], axis=1, inplace=True)
- data_x.drop(columns=["index"], axis=1, inplace=True)
- data = data_y.merge(data_x, how='inner', on=[id_col])
- joined.append((i, data))
-
- return joined
-
-
-class WellDataPoint:
-
- def __init__(self, thp, day_sec, man_pres, temp, _l1=0, _s1=1, _l2=0, _s2=0):
- self.thp = thp
- self.day_sec = day_sec
- self.man_pres = man_pres
- self.temp = temp
- self.l1 = _l1
- self.s1 = _s1
- self.l2 = _l2
- self.s2 = _s2
-
- def __str__(self):
- day_sec, deli, i, man_pres, temp, well, well_titles = self.fields()
- return f"""\033[1;31mTesting data\033[0m
-{day_sec:>20}{deli:3}{self.day_sec} seconds
-{man_pres:>20}{deli:3}{self.man_pres} psi
-{temp:>20}{deli:3}{self.temp} °F
-{well:>20}{deli:3}{well_titles[i]}
-"""
-
- def fields(self):
- deli = ' '
- day_sec = "Day duration:"
- man_pres = "Manifold Pressure:"
- temp = "Temperature:"
- well = "Well Name:"
- wells = [self.l1, self.l2, self.s1, self.s2]
- well_titles = ["Awoba NW 1L", "Awoba NW 2L", "Awoba NW 1S", "Awoba NW 2S"] # List of well titles
- i = 0
- # Find the well with dummy value 1
- while not (wells[i]): # not(0) yields true and not(anything else) yields false
- i += 1
- return day_sec, deli, i, man_pres, temp, well, well_titles
-
- def __plain__(self):
- day_sec, deli, i, man_pres, temp, well, well_titles = self.fields()
- space = '40'
- d_space = '3'
- return f"""Testing data
-{day_sec:>{space}}{deli:{d_space}}{self.day_sec} seconds
-{man_pres:>{space}}{deli:{d_space}}{self.man_pres} psi
-{temp:>{space}}{deli:{d_space}}{self.temp} °F
-{well:>{space}}{deli:{d_space}}{well_titles[i]}
-"""
-
- def __repr__(self):
- return f"Practice([{self.day_sec}, {self.man_pres}, {self.temp}, {self.l1}, {self.s1}, {self.l2}, {self.s2}])"
-
- def get_x(self):
- return [self.day_sec, self.man_pres, self.temp, self.l1, self.s1, self.l2, self.s2]
-
- def get_y(self):
- return self.thp
-
-
-def oversample_balance(data: pandas.DataFrame):
- # get buckets for control column
- data = data.astype(float, errors='ignore')
- mx = data[ro_col].max(axis=0, skipna=True)
- mn = data[ro_col].min(axis=0, skipna=True)
- rng = mx - mn
- bucket = rng / 10
-
- # shuffle data into buckets
- max_count = 0
- counter = mn
- temp = []
- results = []
-
- while counter < mx:
-
- sub_data = data[data[ro_col].between(counter, counter + bucket, inclusive='right')]
- if sub_data.shape[0] > 0 and float(sub_data[ro_col].min(axis=0, skipna=True)) > 0:
- temp.append(sub_data)
-
- max_count = max_count if sub_data.shape[0] < max_count else sub_data.shape[0]
-
- counter += bucket
-
- for r in temp:
- counter = 0
- pumped_data = r
- print(r.shape, "\n", r.head())
- # add elements of r to pumped_data
- while pumped_data.shape[0] < max_count:
- new_row = r.iloc[[counter % r.shape[0]]]
-
- pumped_data = pandas.concat([pumped_data, new_row], ignore_index=True)
-
- # add final results to results series
- results.append(pumped_data)
-
- return pandas.concat(results, ignore_index=True)
-
-
-def parse_well_id(well_id):
- return f"Awoba NW {well_id}"
-
-
-def parse_well_id_2(well_id):
- return f"Abura {well_id}"
-
-
-def print_graph(model: RandomForestRegressor, x):
- for est, idx in zip(model.estimators_, len(model.estimators_)):
- file = f'tree_{idx}.dot'
- export_graphviz(model, out_file=file, feature_names=x.columns,
- class_names=['extreme', 'moderate', 'vulnerable', 'non-vulnerable'],
- rounded=True, proportion=False, precision=4, filled=True)
-
-
-def write_state_files(model, scaler):
- pkl.dump(model, open(f"{model_file}.mdl", "wb"))
- pkl.dump(scaler, open(f"{scaler_file}.sts", "wb"))
-
-
-def keep_useful_cols(data, columns=None):
- if columns is None:
- columns = [ro_col, dur_col, man_col, well_col, time_col, date_col, blind_col, flp_col, temp_col]
- return data.drop(data.columns.difference(columns), axis=1)
-
-
-def read_state_files(mdl, scl):
- mdl = pkl.load(open(f"{mdl}.mdl", "rb"))
- scl = pkl.load(open(f"{scl}.sts", "rb"))
- return mdl, scl
-
-
-def change_well_to_dummy(wl):
- _l1, _l2, _s1, _s2 = 0, 0, 0, 0
-
- if wl == parse_well_id(l1):
- _l1 = 1
- elif wl == parse_well_id(s1):
- _s1 = 1
- elif wl == parse_well_id(l2):
- _l2 = 1
- elif wl == parse_well_id(s2):
- _s2 = 1
-
- return _l1, _l2, _s1, _s2
-
-
-def calc_excel(pres):
- # from well Abura 2S
- return pres + 624, pres * 31.88
-
diff --git a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/cppipc/policy.h b/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/cppipc/policy.h
deleted file mode 100644
index f88ab5d8cb343f97026966b402eaeed8831e356a..0000000000000000000000000000000000000000
--- a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/cppipc/policy.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#pragma once
-
-#include
-
-#include "libipc/def.h"
-#include "libipc/prod_cons.h"
-
-#include "libipc/circ/elem_array.h"
-
-namespace ipc {
-namespace policy {
-
-template class Elems, typename Flag>
-struct choose;
-
-template
-struct choose {
- using flag_t = Flag;
-
- template
- using elems_t = circ::elem_array, DataSize, AlignSize>;
-};
-
-} // namespace policy
-} // namespace ipc
diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/models.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/models.py
deleted file mode 100644
index 936e16ad992fce3faf868d974274b5cd7c6a6be9..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/models.py
+++ /dev/null
@@ -1,770 +0,0 @@
-# Copyright (c) SenseTime Research. All rights reserved.
-
-# https://github.com/rosinality/stylegan2-pytorch/blob/master/model.py
-
-import math
-import random
-import functools
-import operator
-
-import torch
-from torch import nn
-from torch.nn import functional as F
-import torch.nn.init as init
-from torch.autograd import Function
-
-from .op_edit import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
-
-
-class PixelNorm(nn.Module):
- def __init__(self):
- super().__init__()
-
- def forward(self, input):
- return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
-
-
-def make_kernel(k):
- k = torch.tensor(k, dtype=torch.float32)
- if k.ndim == 1:
- k = k[None, :] * k[:, None]
- k /= k.sum()
- return k
-
-
-class Upsample(nn.Module):
- def __init__(self, kernel, factor=2):
- super().__init__()
-
- self.factor = factor
- kernel = make_kernel(kernel) * (factor ** 2)
- self.register_buffer("kernel", kernel)
-
- p = kernel.shape[0] - factor
-
- pad0 = (p + 1) // 2 + factor - 1
- pad1 = p // 2
-
- self.pad = (pad0, pad1)
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, up=self.factor,
- down=1, pad=self.pad)
- return out
-
-
-class Downsample(nn.Module):
- def __init__(self, kernel, factor=2):
- super().__init__()
-
- self.factor = factor
- kernel = make_kernel(kernel)
- self.register_buffer("kernel", kernel)
-
- p = kernel.shape[0] - factor
-
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- self.pad = (pad0, pad1)
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, up=1,
- down=self.factor, pad=self.pad)
- return out
-
-
-class Blur(nn.Module):
- def __init__(self, kernel, pad, upsample_factor=1):
- super().__init__()
-
- kernel = make_kernel(kernel)
-
- if upsample_factor > 1:
- kernel = kernel * (upsample_factor ** 2)
-
- self.register_buffer("kernel", kernel)
-
- self.pad = pad
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, pad=self.pad)
- return out
-
-
-class EqualConv2d(nn.Module):
- def __init__(
- self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
- ):
- super().__init__()
-
- self.weight = nn.Parameter(
- torch.randn(out_channel, in_channel, kernel_size, kernel_size)
- )
- self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
-
- self.stride = stride
- self.padding = padding
-
- if bias:
- self.bias = nn.Parameter(torch.zeros(out_channel))
-
- else:
- self.bias = None
-
- def forward(self, input):
- out = F.conv2d(
- input,
- self.weight * self.scale,
- bias=self.bias,
- stride=self.stride,
- padding=self.padding,
- )
- return out
-
- def __repr__(self):
- return (
- f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},"
- f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})"
- )
-
-
-class EqualLinear(nn.Module):
- def __init__(
- self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
- ):
- super().__init__()
-
- self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
-
- if bias:
- self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
- else:
- self.bias = None
-
- self.activation = activation
-
- self.scale = (1 / math.sqrt(in_dim)) * lr_mul
- self.lr_mul = lr_mul
-
- def forward(self, input):
- if self.activation:
- out = F.linear(input, self.weight * self.scale)
- out = fused_leaky_relu(out, self.bias * self.lr_mul)
- else:
- out = F.linear(
- input, self.weight * self.scale, bias=self.bias * self.lr_mul
- )
- return out
-
- def __repr__(self):
- return (
- f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})"
- )
-
-
-class ScaledLeakyReLU(nn.Module):
- def __init__(self, negative_slope=0.2):
- super().__init__()
- self.negative_slope = negative_slope
-
- def forward(self, input):
- out = F.leaky_relu(input, negative_slope=self.negative_slope)
- return out * math.sqrt(2)
-
-
-class ModulatedConv2d(nn.Module):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- demodulate=True,
- upsample=False,
- downsample=False,
- blur_kernel=[1, 3, 3, 1],
- ):
- super().__init__()
-
- self.eps = 1e-8
- self.kernel_size = kernel_size
- self.in_channel = in_channel
- self.out_channel = out_channel
- self.upsample = upsample
- self.downsample = downsample
-
- if upsample:
- factor = 2
- p = (len(blur_kernel) - factor) - (kernel_size - 1)
- pad0 = (p + 1) // 2 + factor - 1
- pad1 = p // 2 + 1
- self.blur = Blur(blur_kernel, pad=(
- pad0, pad1), upsample_factor=factor)
-
- if downsample:
- factor = 2
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
- pad0 = (p + 1) // 2
- pad1 = p // 2
- self.blur = Blur(blur_kernel, pad=(pad0, pad1))
-
- fan_in = in_channel * kernel_size ** 2
- self.scale = 1 / math.sqrt(fan_in)
- self.padding = kernel_size // 2
- self.weight = nn.Parameter(
- torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
- )
- self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
- self.demodulate = demodulate
-
- def __repr__(self):
- return (
- f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, "
- f"upsample={self.upsample}, downsample={self.downsample})"
- )
-
- def forward(self, input, style):
- batch, in_channel, height, width = input.shape
-
- style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
- weight = self.scale * self.weight * style
-
- if self.demodulate:
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
- weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
-
- weight = weight.view(
- batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
- )
-
- if self.upsample:
- input = input.view(1, batch * in_channel, height, width)
- weight = weight.view(
- batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
- )
- weight = weight.transpose(1, 2).reshape(
- batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
- )
- out = F.conv_transpose2d(
- input, weight, padding=0, stride=2, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
- out = self.blur(out)
-
- elif self.downsample:
- input = self.blur(input)
- _, _, height, width = input.shape
- input = input.view(1, batch * in_channel, height, width)
- out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
-
- else:
- input = input.view(1, batch * in_channel, height, width)
- out = F.conv2d(input, weight, padding=self.padding, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
-
- return out
-
-
-class NoiseInjection(nn.Module):
- def __init__(self):
- super().__init__()
- self.weight = nn.Parameter(torch.zeros(1))
-
- def forward(self, image, noise=None):
- if noise is None:
- batch, _, height, width = image.shape
- noise = image.new_empty(batch, 1, height, width).normal_()
- return image + self.weight * noise
-
-
-class ConstantInput(nn.Module):
- def __init__(self, channel, size=4):
- super().__init__()
- self.input = nn.Parameter(torch.randn(1, channel, size, size // 2))
-
- def forward(self, input):
- batch = input.shape[0]
- out = self.input.repeat(batch, 1, 1, 1)
- return out
-
-
-class StyledConv(nn.Module):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- upsample=False,
- blur_kernel=[1, 3, 3, 1],
- demodulate=True,
- ):
- super().__init__()
- self.conv = ModulatedConv2d(
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- upsample=upsample,
- blur_kernel=blur_kernel,
- demodulate=demodulate,
- )
- self.noise = NoiseInjection()
- self.activate = FusedLeakyReLU(out_channel)
-
- def forward(self, input, style, noise=None):
- out = self.conv(input, style)
- out = self.noise(out, noise=noise)
- out = self.activate(out)
- return out
-
-
-class ToRGB(nn.Module):
- def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
- if upsample:
- self.upsample = Upsample(blur_kernel)
-
- self.conv = ModulatedConv2d(
- in_channel, 3, 1, style_dim, demodulate=False)
- self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
-
- def forward(self, input, style, skip=None):
- out = self.conv(input, style)
- out = out + self.bias
-
- if skip is not None:
- skip = self.upsample(skip)
- out = out + skip
-
- return out
-
-
-class Generator(nn.Module):
- def __init__(
- self,
- size,
- style_dim,
- n_mlp,
- channel_multiplier=1,
- blur_kernel=[1, 3, 3, 1],
- lr_mlp=0.01,
- small=False,
- small_isaac=False,
- ):
- super().__init__()
-
- self.size = size
-
- if small and size > 64:
- raise ValueError("small only works for sizes <= 64")
-
- self.style_dim = style_dim
- layers = [PixelNorm()]
-
- for i in range(n_mlp):
- layers.append(
- EqualLinear(
- style_dim, style_dim, lr_mul=lr_mlp, activation="fused_lrelu"
- )
- )
-
- self.style = nn.Sequential(*layers)
-
- if small:
- self.channels = {
- 4: 64 * channel_multiplier,
- 8: 64 * channel_multiplier,
- 16: 64 * channel_multiplier,
- 32: 64 * channel_multiplier,
- 64: 64 * channel_multiplier,
- }
- elif small_isaac:
- self.channels = {4: 256, 8: 256,
- 16: 256, 32: 256, 64: 128, 128: 128}
- else:
- self.channels = {
- 4: 512,
- 8: 512,
- 16: 512,
- 32: 512,
- 64: 256 * channel_multiplier,
- 128: 128 * channel_multiplier,
- 256: 64 * channel_multiplier,
- 512: 32 * channel_multiplier,
- 1024: 16 * channel_multiplier,
- }
-
- self.input = ConstantInput(self.channels[4])
- self.conv1 = StyledConv(
- self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
- )
- self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
-
- self.log_size = int(math.log(size, 2))
- self.num_layers = (self.log_size - 2) * 2 + 1
-
- self.convs = nn.ModuleList()
- self.upsamples = nn.ModuleList()
- self.to_rgbs = nn.ModuleList()
- self.noises = nn.Module()
-
- in_channel = self.channels[4]
-
- for layer_idx in range(self.num_layers):
- res = (layer_idx + 5) // 2
- shape = [1, 1, 2 ** res, 2 ** res // 2]
- self.noises.register_buffer(
- "noise_{}".format(layer_idx), torch.randn(*shape)
- )
-
- for i in range(3, self.log_size + 1):
- out_channel = self.channels[2 ** i]
-
- self.convs.append(
- StyledConv(
- in_channel,
- out_channel,
- 3,
- style_dim,
- upsample=True,
- blur_kernel=blur_kernel,
- )
- )
-
- self.convs.append(
- StyledConv(
- out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
- )
- )
-
- self.to_rgbs.append(ToRGB(out_channel, style_dim))
- in_channel = out_channel
-
- self.n_latent = self.log_size * 2 - 2
-
- def make_noise(self):
- device = self.input.input.device
-
- noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2 // 2, device=device)]
-
- for i in range(3, self.log_size + 1):
- for _ in range(2):
- noises.append(torch.randn(
- 1, 1, 2 ** i, 2 ** i // 2, device=device))
-
- return noises
-
- def mean_latent(self, n_latent):
- latent_in = torch.randn(
- n_latent, self.style_dim, device=self.input.input.device
- )
- latent = self.style(latent_in).mean(0, keepdim=True)
-
- return latent
-
- def get_latent(self, input):
- return self.style(input)
-
- def forward(
- self,
- styles,
- return_latents=False,
- return_features=False,
- inject_index=None,
- truncation=1,
- truncation_latent=None,
- input_is_latent=False,
- noise=None,
- randomize_noise=True,
- real=False,
- ):
- if not input_is_latent:
- styles = [self.style(s) for s in styles]
- if noise is None:
- if randomize_noise:
- noise = [None] * self.num_layers
- else:
- noise = [
- getattr(self.noises, "noise_{}".format(i))
- for i in range(self.num_layers)
- ]
-
- if truncation < 1:
- # print('truncation_latent: ', truncation_latent.shape)
- if not real: # if type(styles) == list:
- style_t = []
- for style in styles:
- style_t.append(
- truncation_latent + truncation *
- (style - truncation_latent)
- ) # (-1.1162e-03-(-1.0914e-01))*0.8+(-1.0914e-01)
- styles = style_t
- else: # styles are latent (tensor: 1,18,512), for real PTI output
- truncation_latent = truncation_latent.repeat(
- 18, 1).unsqueeze(0) # (1,512) --> (1,18,512)
- styles = torch.add(truncation_latent, torch.mul(
- torch.sub(styles, truncation_latent), truncation))
- # print('now styles after truncation : ', styles)
- # if type(styles) == list and len(styles) < 2: # this if for input as list of [(1,512)]
- if not real:
- if len(styles) < 2:
- inject_index = self.n_latent
- if styles[0].ndim < 3:
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- else:
- latent = styles[0]
- elif type(styles) == list:
- if inject_index is None:
- inject_index = 4
-
- latent = styles[0].unsqueeze(0)
- if latent.shape[1] == 1:
- latent = latent.repeat(1, inject_index, 1)
- else:
- latent = latent[:, :inject_index, :]
- latent2 = styles[1].unsqueeze(1).repeat(
- 1, self.n_latent - inject_index, 1)
- latent = torch.cat([latent, latent2], 1)
- # input is tensor of size with torch.Size([1, 18, 512]), for real PTI output
- else:
- latent = styles
-
- # print(f'processed latent: {latent.shape}')
-
- features = {}
- out = self.input(latent)
- features["out_0"] = out
- out = self.conv1(out, latent[:, 0], noise=noise[0])
- features["conv1_0"] = out
-
- skip = self.to_rgb1(out, latent[:, 1])
- features["skip_0"] = skip
- i = 1
- for conv1, conv2, noise1, noise2, to_rgb in zip(
- self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
- ):
- out = conv1(out, latent[:, i], noise=noise1)
- features["conv1_{}".format(i)] = out
- out = conv2(out, latent[:, i + 1], noise=noise2)
- features["conv2_{}".format(i)] = out
- skip = to_rgb(out, latent[:, i + 2], skip)
- features["skip_{}".format(i)] = skip
-
- i += 2
-
- image = skip
-
- if return_latents:
- return image, latent
- elif return_features:
- return image, features
- else:
- return image, None
-
-
-class ConvLayer(nn.Sequential):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- downsample=False,
- blur_kernel=[1, 3, 3, 1],
- bias=True,
- activate=True,
- ):
- layers = []
-
- if downsample:
- factor = 2
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
-
- stride = 2
- self.padding = 0
-
- else:
- stride = 1
- self.padding = kernel_size // 2
-
- layers.append(
- EqualConv2d(
- in_channel,
- out_channel,
- kernel_size,
- padding=self.padding,
- stride=stride,
- bias=bias and not activate,
- )
- )
-
- if activate:
- if bias:
- layers.append(FusedLeakyReLU(out_channel))
- else:
- layers.append(ScaledLeakyReLU(0.2))
-
- super().__init__(*layers)
-
-
-class ResBlock(nn.Module):
- def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- self.conv1 = ConvLayer(in_channel, in_channel, 3)
- self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
-
- self.skip = ConvLayer(
- in_channel, out_channel, 1, downsample=True, activate=False, bias=False
- )
-
- def forward(self, input):
- out = self.conv1(input)
- out = self.conv2(out)
-
- skip = self.skip(input)
- out = (out + skip) / math.sqrt(2)
-
- return out
-
-
-class StyleDiscriminator(nn.Module):
- def __init__(
- self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], small=False
- ):
- super().__init__()
-
- if small:
- channels = {4: 64, 8: 64, 16: 64, 32: 64, 64: 64}
-
- else:
- channels = {
- 4: 512,
- 8: 512,
- 16: 512,
- 32: 512,
- 64: 256 * channel_multiplier,
- 128: 128 * channel_multiplier,
- 256: 64 * channel_multiplier,
- 512: 32 * channel_multiplier,
- 1024: 16 * channel_multiplier,
- }
-
- convs = [ConvLayer(3, channels[size], 1)]
-
- log_size = int(math.log(size, 2))
- in_channel = channels[size]
-
- for i in range(log_size, 2, -1):
- out_channel = channels[2 ** (i - 1)]
-
- convs.append(ResBlock(in_channel, out_channel, blur_kernel))
-
- in_channel = out_channel
-
- self.convs = nn.Sequential(*convs)
-
- self.stddev_group = 4
- self.stddev_feat = 1
-
- self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
- self.final_linear = nn.Sequential(
- EqualLinear(channels[4] * 4 * 4, channels[4],
- activation="fused_lrelu"),
- EqualLinear(channels[4], 1),
- )
-
- def forward(self, input):
- h = input
- h_list = []
-
- for index, blocklist in enumerate(self.convs):
- h = blocklist(h)
- h_list.append(h)
-
- out = h
- batch, channel, height, width = out.shape
- group = min(batch, self.stddev_group)
- stddev = out.view(
- group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
- )
- stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
- stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
- stddev = stddev.repeat(group, 1, height, width)
- out = torch.cat([out, stddev], 1)
-
- out = self.final_conv(out)
- h_list.append(out)
-
- out = out.view(batch, -1)
- out = self.final_linear(out)
-
- return out, h_list
-
-
-class StyleEncoder(nn.Module):
- def __init__(self, size, w_dim=512):
- super().__init__()
-
- channels = {
- 4: 512,
- 8: 512,
- 16: 512,
- 32: 512,
- 64: 256,
- 128: 128,
- 256: 64,
- 512: 32,
- 1024: 16
- }
-
- self.w_dim = w_dim
- log_size = int(math.log(size, 2))
- convs = [ConvLayer(3, channels[size], 1)]
-
- in_channel = channels[size]
- for i in range(log_size, 2, -1):
- out_channel = channels[2 ** (i - 1)]
- convs.append(ResBlock(in_channel, out_channel))
- in_channel = out_channel
-
- convs.append(EqualConv2d(
- in_channel, 2*self.w_dim, 4, padding=0, bias=False))
-
- self.convs = nn.Sequential(*convs)
-
- def forward(self, input):
- out = self.convs(input)
- # return out.view(len(input), self.n_latents, self.w_dim)
- reshaped = out.view(len(input), 2*self.w_dim)
- return reshaped[:, :self.w_dim], reshaped[:, self.w_dim:]
-
-
-def kaiming_init(m):
- if isinstance(m, (nn.Linear, nn.Conv2d)):
- init.kaiming_normal_(m.weight)
- if m.bias is not None:
- m.bias.data.fill_(0)
- elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
- m.weight.data.fill_(1)
- if m.bias is not None:
- m.bias.data.fill_(0)
-
-
-def normal_init(m):
- if isinstance(m, (nn.Linear, nn.Conv2d)):
- init.normal_(m.weight, 0, 0.02)
- if m.bias is not None:
- m.bias.data.fill_(0)
- elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
- m.weight.data.fill_(1)
- if m.bias is not None:
- m.bias.data.fill_(0)
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/autoencoderkl.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/autoencoderkl.md
deleted file mode 100644
index bc709c422d36e83c33bfd313b5c8945c9e176150..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/autoencoderkl.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# AutoencoderKL
-
-The variational autoencoder (VAE) model with KL loss was introduced in [Auto-Encoding Variational Bayes](https://arxiv.org/abs/1312.6114v11) by Diederik P. Kingma and Max Welling. The model is used in 🤗 Diffusers to encode images into latents and to decode latent representations into images.
-
-The abstract from the paper is:
-
-*How can we perform efficient inference and learning in directed probabilistic models, in the presence of continuous latent variables with intractable posterior distributions, and large datasets? We introduce a stochastic variational inference and learning algorithm that scales to large datasets and, under some mild differentiability conditions, even works in the intractable case. Our contributions are two-fold. First, we show that a reparameterization of the variational lower bound yields a lower bound estimator that can be straightforwardly optimized using standard stochastic gradient methods. Second, we show that for i.i.d. datasets with continuous latent variables per datapoint, posterior inference can be made especially efficient by fitting an approximate inference model (also called a recognition model) to the intractable posterior using the proposed lower bound estimator. Theoretical advantages are reflected in experimental results.*
-
-## Loading from the original format
-
-By default the [`AutoencoderKL`] should be loaded with [`~ModelMixin.from_pretrained`], but it can also be loaded
-from the original format using [`FromOriginalVAEMixin.from_single_file`] as follows:
-
-```py
-from diffusers import AutoencoderKL
-
-url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" # can also be local file
-model = AutoencoderKL.from_single_file(url)
-```
-
-## AutoencoderKL
-
-[[autodoc]] AutoencoderKL
-
-## AutoencoderKLOutput
-
-[[autodoc]] models.autoencoder_kl.AutoencoderKLOutput
-
-## DecoderOutput
-
-[[autodoc]] models.vae.DecoderOutput
-
-## FlaxAutoencoderKL
-
-[[autodoc]] FlaxAutoencoderKL
-
-## FlaxAutoencoderKLOutput
-
-[[autodoc]] models.vae_flax.FlaxAutoencoderKLOutput
-
-## FlaxDecoderOutput
-
-[[autodoc]] models.vae_flax.FlaxDecoderOutput
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/cycle_diffusion.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/cycle_diffusion.md
deleted file mode 100644
index 3ff0d768879a5b073c6e987e6e9eb5e5d8fe3742..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/cycle_diffusion.md
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-# Cycle Diffusion
-
-Cycle Diffusion is a text guided image-to-image generation model proposed in [Unifying Diffusion Models' Latent Space, with Applications to CycleDiffusion and Guidance](https://huggingface.co/papers/2210.05559) by Chen Henry Wu, Fernando De la Torre.
-
-The abstract from the paper is:
-
-*Diffusion models have achieved unprecedented performance in generative modeling. The commonly-adopted formulation of the latent code of diffusion models is a sequence of gradually denoised samples, as opposed to the simpler (e.g., Gaussian) latent space of GANs, VAEs, and normalizing flows. This paper provides an alternative, Gaussian formulation of the latent space of various diffusion models, as well as an invertible DPM-Encoder that maps images into the latent space. While our formulation is purely based on the definition of diffusion models, we demonstrate several intriguing consequences. (1) Empirically, we observe that a common latent space emerges from two diffusion models trained independently on related domains. In light of this finding, we propose CycleDiffusion, which uses DPM-Encoder for unpaired image-to-image translation. Furthermore, applying CycleDiffusion to text-to-image diffusion models, we show that large-scale text-to-image diffusion models can be used as zero-shot image-to-image editors. (2) One can guide pre-trained diffusion models and GANs by controlling the latent codes in a unified, plug-and-play formulation based on energy-based models. Using the CLIP model and a face recognition model as guidance, we demonstrate that diffusion models have better coverage of low-density sub-populations and individuals than GANs.*
-
-
-
-Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
-
-
-
-## CycleDiffusionPipeline
-[[autodoc]] CycleDiffusionPipeline
- - all
- - __call__
-
-## StableDiffusionPiplineOutput
-[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
\ No newline at end of file
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_lms.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_lms.py
deleted file mode 100644
index 1e0a8212354d44ea852bdc64b94550e0d1118750..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_lms.py
+++ /dev/null
@@ -1,140 +0,0 @@
-import torch
-
-from diffusers import LMSDiscreteScheduler
-from diffusers.utils import torch_device
-
-from .test_schedulers import SchedulerCommonTest
-
-
-class LMSDiscreteSchedulerTest(SchedulerCommonTest):
- scheduler_classes = (LMSDiscreteScheduler,)
- num_inference_steps = 10
-
- def get_scheduler_config(self, **kwargs):
- config = {
- "num_train_timesteps": 1100,
- "beta_start": 0.0001,
- "beta_end": 0.02,
- "beta_schedule": "linear",
- }
-
- config.update(**kwargs)
- return config
-
- def test_timesteps(self):
- for timesteps in [10, 50, 100, 1000]:
- self.check_over_configs(num_train_timesteps=timesteps)
-
- def test_betas(self):
- for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
- self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
-
- def test_schedules(self):
- for schedule in ["linear", "scaled_linear"]:
- self.check_over_configs(beta_schedule=schedule)
-
- def test_prediction_type(self):
- for prediction_type in ["epsilon", "v_prediction"]:
- self.check_over_configs(prediction_type=prediction_type)
-
- def test_time_indices(self):
- for t in [0, 500, 800]:
- self.check_over_forward(time_step=t)
-
- def test_full_loop_no_noise(self):
- scheduler_class = self.scheduler_classes[0]
- scheduler_config = self.get_scheduler_config()
- scheduler = scheduler_class(**scheduler_config)
-
- scheduler.set_timesteps(self.num_inference_steps)
-
- model = self.dummy_model()
- sample = self.dummy_sample_deter * scheduler.init_noise_sigma
-
- for i, t in enumerate(scheduler.timesteps):
- sample = scheduler.scale_model_input(sample, t)
-
- model_output = model(sample, t)
-
- output = scheduler.step(model_output, t, sample)
- sample = output.prev_sample
-
- result_sum = torch.sum(torch.abs(sample))
- result_mean = torch.mean(torch.abs(sample))
-
- assert abs(result_sum.item() - 1006.388) < 1e-2
- assert abs(result_mean.item() - 1.31) < 1e-3
-
- def test_full_loop_with_v_prediction(self):
- scheduler_class = self.scheduler_classes[0]
- scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
- scheduler = scheduler_class(**scheduler_config)
-
- scheduler.set_timesteps(self.num_inference_steps)
-
- model = self.dummy_model()
- sample = self.dummy_sample_deter * scheduler.init_noise_sigma
-
- for i, t in enumerate(scheduler.timesteps):
- sample = scheduler.scale_model_input(sample, t)
-
- model_output = model(sample, t)
-
- output = scheduler.step(model_output, t, sample)
- sample = output.prev_sample
-
- result_sum = torch.sum(torch.abs(sample))
- result_mean = torch.mean(torch.abs(sample))
-
- assert abs(result_sum.item() - 0.0017) < 1e-2
- assert abs(result_mean.item() - 2.2676e-06) < 1e-3
-
- def test_full_loop_device(self):
- scheduler_class = self.scheduler_classes[0]
- scheduler_config = self.get_scheduler_config()
- scheduler = scheduler_class(**scheduler_config)
-
- scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
-
- model = self.dummy_model()
- sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
- sample = sample.to(torch_device)
-
- for i, t in enumerate(scheduler.timesteps):
- sample = scheduler.scale_model_input(sample, t)
-
- model_output = model(sample, t)
-
- output = scheduler.step(model_output, t, sample)
- sample = output.prev_sample
-
- result_sum = torch.sum(torch.abs(sample))
- result_mean = torch.mean(torch.abs(sample))
-
- assert abs(result_sum.item() - 1006.388) < 1e-2
- assert abs(result_mean.item() - 1.31) < 1e-3
-
- def test_full_loop_device_karras_sigmas(self):
- scheduler_class = self.scheduler_classes[0]
- scheduler_config = self.get_scheduler_config()
- scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True)
-
- scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
-
- model = self.dummy_model()
- sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
- sample = sample.to(torch_device)
-
- for t in scheduler.timesteps:
- sample = scheduler.scale_model_input(sample, t)
-
- model_output = model(sample, t)
-
- output = scheduler.step(model_output, t, sample)
- sample = output.prev_sample
-
- result_sum = torch.sum(torch.abs(sample))
- result_mean = torch.mean(torch.abs(sample))
-
- assert abs(result_sum.item() - 3812.9927) < 2e-2
- assert abs(result_mean.item() - 4.9648) < 1e-3
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py
deleted file mode 100644
index 9212dda4992b4d18cef9a4916b765ef37850237f..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'
-model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/pisa_ssd_head.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/pisa_ssd_head.py
deleted file mode 100644
index 90ef3c83ed62d8346c8daef01f18ad7bd236623c..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/pisa_ssd_head.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import torch
-
-from mmdet.core import multi_apply
-from ..builder import HEADS
-from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p
-from .ssd_head import SSDHead
-
-
-# TODO: add loss evaluator for SSD
-@HEADS.register_module()
-class PISASSDHead(SSDHead):
-
- def loss(self,
- cls_scores,
- bbox_preds,
- gt_bboxes,
- gt_labels,
- img_metas,
- gt_bboxes_ignore=None):
- """Compute losses of the head.
-
- Args:
- cls_scores (list[Tensor]): Box scores for each scale level
- Has shape (N, num_anchors * num_classes, H, W)
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
- level with shape (N, num_anchors * 4, H, W)
- gt_bboxes (list[Tensor]): Ground truth bboxes of each image
- with shape (num_obj, 4).
- gt_labels (list[Tensor]): Ground truth labels of each image
- with shape (num_obj, 4).
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.
- Default: None.
-
- Returns:
- dict: Loss dict, comprise classification loss regression loss and
- carl loss.
- """
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
- assert len(featmap_sizes) == self.anchor_generator.num_levels
-
- device = cls_scores[0].device
-
- anchor_list, valid_flag_list = self.get_anchors(
- featmap_sizes, img_metas, device=device)
- cls_reg_targets = self.get_targets(
- anchor_list,
- valid_flag_list,
- gt_bboxes,
- img_metas,
- gt_bboxes_ignore_list=gt_bboxes_ignore,
- gt_labels_list=gt_labels,
- label_channels=1,
- unmap_outputs=False,
- return_sampling_results=True)
- if cls_reg_targets is None:
- return None
- (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
- num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
-
- num_images = len(img_metas)
- all_cls_scores = torch.cat([
- s.permute(0, 2, 3, 1).reshape(
- num_images, -1, self.cls_out_channels) for s in cls_scores
- ], 1)
- all_labels = torch.cat(labels_list, -1).view(num_images, -1)
- all_label_weights = torch.cat(label_weights_list,
- -1).view(num_images, -1)
- all_bbox_preds = torch.cat([
- b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
- for b in bbox_preds
- ], -2)
- all_bbox_targets = torch.cat(bbox_targets_list,
- -2).view(num_images, -1, 4)
- all_bbox_weights = torch.cat(bbox_weights_list,
- -2).view(num_images, -1, 4)
-
- # concat all level anchors to a single tensor
- all_anchors = []
- for i in range(num_images):
- all_anchors.append(torch.cat(anchor_list[i]))
-
- isr_cfg = self.train_cfg.get('isr', None)
- all_targets = (all_labels.view(-1), all_label_weights.view(-1),
- all_bbox_targets.view(-1,
- 4), all_bbox_weights.view(-1, 4))
- # apply ISR-P
- if isr_cfg is not None:
- all_targets = isr_p(
- all_cls_scores.view(-1, all_cls_scores.size(-1)),
- all_bbox_preds.view(-1, 4),
- all_targets,
- torch.cat(all_anchors),
- sampling_results_list,
- loss_cls=CrossEntropyLoss(),
- bbox_coder=self.bbox_coder,
- **self.train_cfg.isr,
- num_class=self.num_classes)
- (new_labels, new_label_weights, new_bbox_targets,
- new_bbox_weights) = all_targets
- all_labels = new_labels.view(all_labels.shape)
- all_label_weights = new_label_weights.view(all_label_weights.shape)
- all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape)
- all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape)
-
- # add CARL loss
- carl_loss_cfg = self.train_cfg.get('carl', None)
- if carl_loss_cfg is not None:
- loss_carl = carl_loss(
- all_cls_scores.view(-1, all_cls_scores.size(-1)),
- all_targets[0],
- all_bbox_preds.view(-1, 4),
- all_targets[2],
- SmoothL1Loss(beta=1.),
- **self.train_cfg.carl,
- avg_factor=num_total_pos,
- num_class=self.num_classes)
-
- # check NaN and Inf
- assert torch.isfinite(all_cls_scores).all().item(), \
- 'classification scores become infinite or NaN!'
- assert torch.isfinite(all_bbox_preds).all().item(), \
- 'bbox predications become infinite or NaN!'
-
- losses_cls, losses_bbox = multi_apply(
- self.loss_single,
- all_cls_scores,
- all_bbox_preds,
- all_anchors,
- all_labels,
- all_label_weights,
- all_bbox_targets,
- all_bbox_weights,
- num_total_samples=num_total_pos)
- loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
- if carl_loss_cfg is not None:
- loss_dict.update(loss_carl)
- return loss_dict
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py
deleted file mode 100644
index fb2be22f8bc2e10cdfba4f58b2ad1ced913b4ea4..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_40k_voc12aug.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './deeplabv3_r50-d8_512x512_40k_voc12aug.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/exllamav2.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/exllamav2.py
deleted file mode 100644
index 278d394318a18c4af88568087a97736ca85d946d..0000000000000000000000000000000000000000
--- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/exllamav2.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import random
-from pathlib import Path
-
-import torch
-from exllamav2 import (
- ExLlamaV2,
- ExLlamaV2Cache,
- ExLlamaV2Config,
- ExLlamaV2Tokenizer
-)
-from exllamav2.generator import ExLlamaV2BaseGenerator, ExLlamaV2Sampler
-
-from modules import shared
-from modules.logging_colors import logger
-from modules.text_generation import get_max_prompt_length
-
-try:
- import flash_attn
-except ModuleNotFoundError:
- logger.warning(
- 'You are running ExLlamaV2 without flash-attention. This will cause the VRAM usage '
- 'to be a lot higher than it could be.\n'
- 'Try installing flash-attention following the instructions here: '
- 'https://github.com/Dao-AILab/flash-attention#installation-and-features'
- )
- pass
-
-
-class Exllamav2Model:
- def __init__(self):
- pass
-
- @classmethod
- def from_pretrained(self, path_to_model):
-
- path_to_model = Path(f'{shared.args.model_dir}') / Path(path_to_model)
-
- config = ExLlamaV2Config()
- config.model_dir = str(path_to_model)
- config.prepare()
-
- config.max_seq_len = shared.args.max_seq_len
- config.scale_pos_emb = shared.args.compress_pos_emb
- config.scale_alpha_value = shared.args.alpha_value
-
- model = ExLlamaV2(config)
-
- split = None
- if shared.args.gpu_split:
- split = [float(alloc) for alloc in shared.args.gpu_split.split(",")]
-
- model.load(split)
-
- tokenizer = ExLlamaV2Tokenizer(config)
- cache = ExLlamaV2Cache(model)
- generator = ExLlamaV2BaseGenerator(model, cache, tokenizer)
-
- result = self()
- result.model = model
- result.cache = cache
- result.tokenizer = tokenizer
- result.generator = generator
- return result, result
-
- def encode(self, string, **kwargs):
- return self.tokenizer.encode(string, add_bos=True)
-
- def decode(self, ids, **kwargs):
- if isinstance(ids, list):
- ids = torch.tensor([ids])
- elif isinstance(ids, torch.Tensor) and ids.numel() == 1:
- ids = ids.view(1, -1)
-
- return self.tokenizer.decode(ids)[0]
-
- def get_logits(self, token_ids, **kwargs):
- self.cache.current_seq_len = 0
- self.model.forward(token_ids[:, :-1], self.cache, input_mask=None, preprocess_only=True)
- return self.model.forward(token_ids[:, -1:], self.cache, input_mask=None, **kwargs).float().cpu()
-
- def generate_with_streaming(self, prompt, state):
- settings = ExLlamaV2Sampler.Settings()
- settings.temperature = state['temperature']
- settings.top_k = state['top_k']
- settings.top_p = state['top_p']
- settings.typical = state['typical_p']
- settings.token_repetition_penalty = state['repetition_penalty']
- settings.token_repetition_range = -1 if state['repetition_penalty_range'] <= 0 else state['repetition_penalty_range']
- if state['ban_eos_token']:
- settings.disallow_tokens(self.tokenizer, [self.tokenizer.eos_token_id])
-
- if state['custom_token_bans']:
- to_ban = [int(x) for x in state['custom_token_bans'].split(',')]
- if len(to_ban) > 0:
- settings.disallow_tokens(self.tokenizer, to_ban)
-
- ids = self.tokenizer.encode(prompt, add_bos=state['add_bos_token'])
- ids = ids[:, -get_max_prompt_length(state):]
- initial_len = ids.shape[-1]
-
- if state['auto_max_new_tokens']:
- max_new_tokens = state['truncation_length'] - ids.shape[-1]
- else:
- max_new_tokens = state['max_new_tokens']
-
- # _gen_begin_base
- self.cache.current_seq_len = 0
- self.model.forward(ids[:, :-1], self.cache, input_mask=None, preprocess_only=True)
-
- has_leading_space = False
- for i in range(max_new_tokens):
- logits = self.model.forward(ids[:, -1:], self.cache, input_mask=None).float().cpu()
- token, _, _= ExLlamaV2Sampler.sample(logits, settings, ids, random.random(), self.tokenizer)
- ids = torch.cat([ids, token], dim=1)
-
- if i == 0 and self.tokenizer.tokenizer.IdToPiece(int(token)).startswith('▁'):
- has_leading_space = True
-
- decoded_text = self.tokenizer.decode(ids[:, initial_len:])[0]
- if has_leading_space:
- decoded_text = ' ' + decoded_text
-
- yield decoded_text
-
- if token.item() == self.tokenizer.eos_token_id or shared.stop_everything:
- break
-
- def generate(self, prompt, state):
- output = ''
- for output in self.generate_with_streaming(prompt, state):
- pass
-
- return output
diff --git a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/unet.py b/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/unet.py
deleted file mode 100644
index 96b46930006b7c9e49948d31568474824195cf8f..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/unet.py
+++ /dev/null
@@ -1,894 +0,0 @@
-from abc import abstractmethod
-
-import math
-
-import numpy as np
-import torch as th
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .fp16_util import convert_module_to_f16, convert_module_to_f32
-from .nn import (
- checkpoint,
- conv_nd,
- linear,
- avg_pool_nd,
- zero_module,
- normalization,
- timestep_embedding,
-)
-
-
-class AttentionPool2d(nn.Module):
- """
- Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
- """
-
- def __init__(
- self,
- spacial_dim: int,
- embed_dim: int,
- num_heads_channels: int,
- output_dim: int = None,
- ):
- super().__init__()
- self.positional_embedding = nn.Parameter(
- th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
- )
- self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
- self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
- self.num_heads = embed_dim // num_heads_channels
- self.attention = QKVAttention(self.num_heads)
-
- def forward(self, x):
- b, c, *_spatial = x.shape
- x = x.reshape(b, c, -1) # NC(HW)
- x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
- x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
- x = self.qkv_proj(x)
- x = self.attention(x)
- x = self.c_proj(x)
- return x[:, :, 0]
-
-
-class TimestepBlock(nn.Module):
- """
- Any module where forward() takes timestep embeddings as a second argument.
- """
-
- @abstractmethod
- def forward(self, x, emb):
- """
- Apply the module to `x` given `emb` timestep embeddings.
- """
-
-
-class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
- """
- A sequential module that passes timestep embeddings to the children that
- support it as an extra input.
- """
-
- def forward(self, x, emb):
- for layer in self:
- if isinstance(layer, TimestepBlock):
- x = layer(x, emb)
- else:
- x = layer(x)
- return x
-
-
-class Upsample(nn.Module):
- """
- An upsampling layer with an optional convolution.
-
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- upsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- if use_conv:
- self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- if self.dims == 3:
- x = F.interpolate(
- x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
- )
- else:
- x = F.interpolate(x, scale_factor=2, mode="nearest")
- if self.use_conv:
- x = self.conv(x)
- return x
-
-
-class Downsample(nn.Module):
- """
- A downsampling layer with an optional convolution.
-
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- downsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- stride = 2 if dims != 3 else (1, 2, 2)
- if use_conv:
- self.op = conv_nd(
- dims, self.channels, self.out_channels, 3, stride=stride, padding=1
- )
- else:
- assert self.channels == self.out_channels
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- return self.op(x)
-
-
-class ResBlock(TimestepBlock):
- """
- A residual block that can optionally change the number of channels.
-
- :param channels: the number of input channels.
- :param emb_channels: the number of timestep embedding channels.
- :param dropout: the rate of dropout.
- :param out_channels: if specified, the number of out channels.
- :param use_conv: if True and out_channels is specified, use a spatial
- convolution instead of a smaller 1x1 convolution to change the
- channels in the skip connection.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param use_checkpoint: if True, use gradient checkpointing on this module.
- :param up: if True, use this block for upsampling.
- :param down: if True, use this block for downsampling.
- """
-
- def __init__(
- self,
- channels,
- emb_channels,
- dropout,
- out_channels=None,
- use_conv=False,
- use_scale_shift_norm=False,
- dims=2,
- use_checkpoint=False,
- up=False,
- down=False,
- ):
- super().__init__()
- self.channels = channels
- self.emb_channels = emb_channels
- self.dropout = dropout
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_checkpoint = use_checkpoint
- self.use_scale_shift_norm = use_scale_shift_norm
-
- self.in_layers = nn.Sequential(
- normalization(channels),
- nn.SiLU(),
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
- )
-
- self.updown = up or down
-
- if up:
- self.h_upd = Upsample(channels, False, dims)
- self.x_upd = Upsample(channels, False, dims)
- elif down:
- self.h_upd = Downsample(channels, False, dims)
- self.x_upd = Downsample(channels, False, dims)
- else:
- self.h_upd = self.x_upd = nn.Identity()
-
- self.emb_layers = nn.Sequential(
- nn.SiLU(),
- linear(
- emb_channels,
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
- ),
- )
- self.out_layers = nn.Sequential(
- normalization(self.out_channels),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(
- conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
- ),
- )
-
- if self.out_channels == channels:
- self.skip_connection = nn.Identity()
- elif use_conv:
- self.skip_connection = conv_nd(
- dims, channels, self.out_channels, 3, padding=1
- )
- else:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
-
- def forward(self, x, emb):
- """
- Apply the block to a Tensor, conditioned on a timestep embedding.
-
- :param x: an [N x C x ...] Tensor of features.
- :param emb: an [N x emb_channels] Tensor of timestep embeddings.
- :return: an [N x C x ...] Tensor of outputs.
- """
- return checkpoint(
- self._forward, (x, emb), self.parameters(), self.use_checkpoint
- )
-
- def _forward(self, x, emb):
- if self.updown:
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
- h = in_rest(x)
- h = self.h_upd(h)
- x = self.x_upd(x)
- h = in_conv(h)
- else:
- h = self.in_layers(x)
- emb_out = self.emb_layers(emb).type(h.dtype)
- while len(emb_out.shape) < len(h.shape):
- emb_out = emb_out[..., None]
- if self.use_scale_shift_norm:
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
- scale, shift = th.chunk(emb_out, 2, dim=1)
- h = out_norm(h) * (1 + scale) + shift
- h = out_rest(h)
- else:
- h = h + emb_out
- h = self.out_layers(h)
- return self.skip_connection(x) + h
-
-
-class AttentionBlock(nn.Module):
- """
- An attention block that allows spatial positions to attend to each other.
-
- Originally ported from here, but adapted to the N-d case.
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
- """
-
- def __init__(
- self,
- channels,
- num_heads=1,
- num_head_channels=-1,
- use_checkpoint=False,
- use_new_attention_order=False,
- ):
- super().__init__()
- self.channels = channels
- if num_head_channels == -1:
- self.num_heads = num_heads
- else:
- assert (
- channels % num_head_channels == 0
- ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
- self.num_heads = channels // num_head_channels
- self.use_checkpoint = use_checkpoint
- self.norm = normalization(channels)
- self.qkv = conv_nd(1, channels, channels * 3, 1)
- if use_new_attention_order:
- # split qkv before split heads
- self.attention = QKVAttention(self.num_heads)
- else:
- # split heads before split qkv
- self.attention = QKVAttentionLegacy(self.num_heads)
-
- self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
-
- def forward(self, x):
- return checkpoint(self._forward, (x,), self.parameters(), True)
-
- def _forward(self, x):
- b, c, *spatial = x.shape
- x = x.reshape(b, c, -1)
- qkv = self.qkv(self.norm(x))
- h = self.attention(qkv)
- h = self.proj_out(h)
- return (x + h).reshape(b, c, *spatial)
-
-
-def count_flops_attn(model, _x, y):
- """
- A counter for the `thop` package to count the operations in an
- attention operation.
- Meant to be used like:
- macs, params = thop.profile(
- model,
- inputs=(inputs, timestamps),
- custom_ops={QKVAttention: QKVAttention.count_flops},
- )
- """
- b, c, *spatial = y[0].shape
- num_spatial = int(np.prod(spatial))
- # We perform two matmuls with the same number of ops.
- # The first computes the weight matrix, the second computes
- # the combination of the value vectors.
- matmul_ops = 2 * b * (num_spatial ** 2) * c
- model.total_ops += th.DoubleTensor([matmul_ops])
-
-
-class QKVAttentionLegacy(nn.Module):
- """
- A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
-
- :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts", q * scale, k * scale
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v)
- return a.reshape(bs, -1, length)
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class QKVAttention(nn.Module):
- """
- A module which performs QKV attention and splits in a different order.
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
-
- :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.chunk(3, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts",
- (q * scale).view(bs * self.n_heads, ch, length),
- (k * scale).view(bs * self.n_heads, ch, length),
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
- return a.reshape(bs, -1, length)
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class UNetModel(nn.Module):
- """
- The full UNet model with attention and timestep embedding.
-
- :param in_channels: channels in the input Tensor.
- :param model_channels: base channel count for the model.
- :param out_channels: channels in the output Tensor.
- :param num_res_blocks: number of residual blocks per downsample.
- :param attention_resolutions: a collection of downsample rates at which
- attention will take place. May be a set, list, or tuple.
- For example, if this contains 4, then at 4x downsampling, attention
- will be used.
- :param dropout: the dropout probability.
- :param channel_mult: channel multiplier for each level of the UNet.
- :param conv_resample: if True, use learned convolutions for upsampling and
- downsampling.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param num_classes: if specified (as an int), then this model will be
- class-conditional with `num_classes` classes.
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
- :param num_heads: the number of attention heads in each attention layer.
- :param num_heads_channels: if specified, ignore num_heads and instead use
- a fixed channel width per attention head.
- :param num_heads_upsample: works with num_heads to set a different number
- of heads for upsampling. Deprecated.
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
- :param resblock_updown: use residual blocks for up/downsampling.
- :param use_new_attention_order: use a different attention pattern for potentially
- increased efficiency.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- num_classes=None,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- resblock_updown=False,
- use_new_attention_order=False,
- ):
- super().__init__()
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- self.image_size = image_size
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.num_classes = num_classes
- self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
-
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- if self.num_classes is not None:
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
-
- ch = input_ch = int(channel_mult[0] * model_channels)
- self.input_blocks = nn.ModuleList(
- [TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
- )
- self._feature_size = ch
- input_block_chans = [ch]
- ds = 1
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=int(mult * model_channels),
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = int(mult * model_channels)
- if ds in attention_resolutions:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- )
- if resblock_updown
- else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- ),
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- )
- self._feature_size += ch
-
- self.output_blocks = nn.ModuleList([])
- for level, mult in list(enumerate(channel_mult))[::-1]:
- for i in range(num_res_blocks + 1):
- ich = input_block_chans.pop()
- layers = [
- ResBlock(
- ch + ich,
- time_embed_dim,
- dropout,
- out_channels=int(model_channels * mult),
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = int(model_channels * mult)
- if ds in attention_resolutions:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads_upsample,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- )
- )
- if level and i == num_res_blocks:
- out_ch = ch
- layers.append(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- up=True,
- )
- if resblock_updown
- else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
- )
- ds //= 2
- self.output_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
-
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
- )
-
- def convert_to_fp16(self):
- """
- Convert the torso of the model to float16.
- """
- self.input_blocks.apply(convert_module_to_f16)
- self.middle_block.apply(convert_module_to_f16)
- self.output_blocks.apply(convert_module_to_f16)
-
- def convert_to_fp32(self):
- """
- Convert the torso of the model to float32.
- """
- self.input_blocks.apply(convert_module_to_f32)
- self.middle_block.apply(convert_module_to_f32)
- self.output_blocks.apply(convert_module_to_f32)
-
- def forward(self, x, timesteps, y=None):
- """
- Apply the model to an input batch.
-
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :param y: an [N] Tensor of labels, if class-conditional.
- :return: an [N x C x ...] Tensor of outputs.
- """
- assert (y is not None) == (
- self.num_classes is not None
- ), "must specify y if and only if the model is class-conditional"
-
- hs = []
- emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
-
- if self.num_classes is not None:
- assert y.shape == (x.shape[0],)
- emb = emb + self.label_emb(y)
-
- h = x.type(self.dtype)
- for module in self.input_blocks:
- h = module(h, emb)
- hs.append(h)
- h = self.middle_block(h, emb)
- for module in self.output_blocks:
- h = th.cat([h, hs.pop()], dim=1)
- h = module(h, emb)
- h = h.type(x.dtype)
- return self.out(h)
-
-
-class SuperResModel(UNetModel):
- """
- A UNetModel that performs super-resolution.
-
- Expects an extra kwarg `low_res` to condition on a low-resolution image.
- """
-
- def __init__(self, image_size, in_channels, *args, **kwargs):
- super().__init__(image_size, in_channels * 2, *args, **kwargs)
-
- def forward(self, x, timesteps, low_res=None, **kwargs):
- _, _, new_height, new_width = x.shape
- upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
- x = th.cat([x, upsampled], dim=1)
- return super().forward(x, timesteps, **kwargs)
-
-
-class EncoderUNetModel(nn.Module):
- """
- The half UNet model with attention and timestep embedding.
-
- For usage, see UNet.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- resblock_updown=False,
- use_new_attention_order=False,
- pool="adaptive",
- ):
- super().__init__()
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
-
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- ch = int(channel_mult[0] * model_channels)
- self.input_blocks = nn.ModuleList(
- [TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
- )
- self._feature_size = ch
- input_block_chans = [ch]
- ds = 1
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=int(mult * model_channels),
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = int(mult * model_channels)
- if ds in attention_resolutions:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- )
- if resblock_updown
- else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- ),
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- )
- self._feature_size += ch
- self.pool = pool
- if pool == "adaptive":
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- nn.AdaptiveAvgPool2d((1, 1)),
- zero_module(conv_nd(dims, ch, out_channels, 1)),
- nn.Flatten(),
- )
- elif pool == "attention":
- assert num_head_channels != -1
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- AttentionPool2d(
- (image_size // ds), ch, num_head_channels, out_channels
- ),
- )
- elif pool == "spatial":
- self.out = nn.Sequential(
- nn.Linear(self._feature_size, 2048),
- nn.ReLU(),
- nn.Linear(2048, self.out_channels),
- )
- elif pool == "spatial_v2":
- self.out = nn.Sequential(
- nn.Linear(self._feature_size, 2048),
- normalization(2048),
- nn.SiLU(),
- nn.Linear(2048, self.out_channels),
- )
- else:
- raise NotImplementedError(f"Unexpected {pool} pooling")
-
- def convert_to_fp16(self):
- """
- Convert the torso of the model to float16.
- """
- self.input_blocks.apply(convert_module_to_f16)
- self.middle_block.apply(convert_module_to_f16)
-
- def convert_to_fp32(self):
- """
- Convert the torso of the model to float32.
- """
- self.input_blocks.apply(convert_module_to_f32)
- self.middle_block.apply(convert_module_to_f32)
-
- def forward(self, x, timesteps):
- """
- Apply the model to an input batch.
-
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :return: an [N x K] Tensor of outputs.
- """
- emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
-
- results = []
- h = x.type(self.dtype)
- for module in self.input_blocks:
- h = module(h, emb)
- if self.pool.startswith("spatial"):
- results.append(h.type(x.dtype).mean(dim=(2, 3)))
- h = self.middle_block(h, emb)
- if self.pool.startswith("spatial"):
- results.append(h.type(x.dtype).mean(dim=(2, 3)))
- h = th.cat(results, axis=-1)
- return self.out(h)
- else:
- h = h.type(x.dtype)
- return self.out(h)
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_win32_console.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_win32_console.py
deleted file mode 100644
index 81b1082905338a74b72b9de432ece50a456687bc..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_win32_console.py
+++ /dev/null
@@ -1,662 +0,0 @@
-"""Light wrapper around the Win32 Console API - this module should only be imported on Windows
-
-The API that this module wraps is documented at https://docs.microsoft.com/en-us/windows/console/console-functions
-"""
-import ctypes
-import sys
-from typing import Any
-
-windll: Any = None
-if sys.platform == "win32":
- windll = ctypes.LibraryLoader(ctypes.WinDLL)
-else:
- raise ImportError(f"{__name__} can only be imported on Windows")
-
-import time
-from ctypes import Structure, byref, wintypes
-from typing import IO, NamedTuple, Type, cast
-
-from pip._vendor.rich.color import ColorSystem
-from pip._vendor.rich.style import Style
-
-STDOUT = -11
-ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
-
-COORD = wintypes._COORD
-
-
-class LegacyWindowsError(Exception):
- pass
-
-
-class WindowsCoordinates(NamedTuple):
- """Coordinates in the Windows Console API are (y, x), not (x, y).
- This class is intended to prevent that confusion.
- Rows and columns are indexed from 0.
- This class can be used in place of wintypes._COORD in arguments and argtypes.
- """
-
- row: int
- col: int
-
- @classmethod
- def from_param(cls, value: "WindowsCoordinates") -> COORD:
- """Converts a WindowsCoordinates into a wintypes _COORD structure.
- This classmethod is internally called by ctypes to perform the conversion.
-
- Args:
- value (WindowsCoordinates): The input coordinates to convert.
-
- Returns:
- wintypes._COORD: The converted coordinates struct.
- """
- return COORD(value.col, value.row)
-
-
-class CONSOLE_SCREEN_BUFFER_INFO(Structure):
- _fields_ = [
- ("dwSize", COORD),
- ("dwCursorPosition", COORD),
- ("wAttributes", wintypes.WORD),
- ("srWindow", wintypes.SMALL_RECT),
- ("dwMaximumWindowSize", COORD),
- ]
-
-
-class CONSOLE_CURSOR_INFO(ctypes.Structure):
- _fields_ = [("dwSize", wintypes.DWORD), ("bVisible", wintypes.BOOL)]
-
-
-_GetStdHandle = windll.kernel32.GetStdHandle
-_GetStdHandle.argtypes = [
- wintypes.DWORD,
-]
-_GetStdHandle.restype = wintypes.HANDLE
-
-
-def GetStdHandle(handle: int = STDOUT) -> wintypes.HANDLE:
- """Retrieves a handle to the specified standard device (standard input, standard output, or standard error).
-
- Args:
- handle (int): Integer identifier for the handle. Defaults to -11 (stdout).
-
- Returns:
- wintypes.HANDLE: The handle
- """
- return cast(wintypes.HANDLE, _GetStdHandle(handle))
-
-
-_GetConsoleMode = windll.kernel32.GetConsoleMode
-_GetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.LPDWORD]
-_GetConsoleMode.restype = wintypes.BOOL
-
-
-def GetConsoleMode(std_handle: wintypes.HANDLE) -> int:
- """Retrieves the current input mode of a console's input buffer
- or the current output mode of a console screen buffer.
-
- Args:
- std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
-
- Raises:
- LegacyWindowsError: If any error occurs while calling the Windows console API.
-
- Returns:
- int: Value representing the current console mode as documented at
- https://docs.microsoft.com/en-us/windows/console/getconsolemode#parameters
- """
-
- console_mode = wintypes.DWORD()
- success = bool(_GetConsoleMode(std_handle, console_mode))
- if not success:
- raise LegacyWindowsError("Unable to get legacy Windows Console Mode")
- return console_mode.value
-
-
-_FillConsoleOutputCharacterW = windll.kernel32.FillConsoleOutputCharacterW
-_FillConsoleOutputCharacterW.argtypes = [
- wintypes.HANDLE,
- ctypes.c_char,
- wintypes.DWORD,
- cast(Type[COORD], WindowsCoordinates),
- ctypes.POINTER(wintypes.DWORD),
-]
-_FillConsoleOutputCharacterW.restype = wintypes.BOOL
-
-
-def FillConsoleOutputCharacter(
- std_handle: wintypes.HANDLE,
- char: str,
- length: int,
- start: WindowsCoordinates,
-) -> int:
- """Writes a character to the console screen buffer a specified number of times, beginning at the specified coordinates.
-
- Args:
- std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
- char (str): The character to write. Must be a string of length 1.
- length (int): The number of times to write the character.
- start (WindowsCoordinates): The coordinates to start writing at.
-
- Returns:
- int: The number of characters written.
- """
- character = ctypes.c_char(char.encode())
- num_characters = wintypes.DWORD(length)
- num_written = wintypes.DWORD(0)
- _FillConsoleOutputCharacterW(
- std_handle,
- character,
- num_characters,
- start,
- byref(num_written),
- )
- return num_written.value
-
-
-_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
-_FillConsoleOutputAttribute.argtypes = [
- wintypes.HANDLE,
- wintypes.WORD,
- wintypes.DWORD,
- cast(Type[COORD], WindowsCoordinates),
- ctypes.POINTER(wintypes.DWORD),
-]
-_FillConsoleOutputAttribute.restype = wintypes.BOOL
-
-
-def FillConsoleOutputAttribute(
- std_handle: wintypes.HANDLE,
- attributes: int,
- length: int,
- start: WindowsCoordinates,
-) -> int:
- """Sets the character attributes for a specified number of character cells,
- beginning at the specified coordinates in a screen buffer.
-
- Args:
- std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
- attributes (int): Integer value representing the foreground and background colours of the cells.
- length (int): The number of cells to set the output attribute of.
- start (WindowsCoordinates): The coordinates of the first cell whose attributes are to be set.
-
- Returns:
- int: The number of cells whose attributes were actually set.
- """
- num_cells = wintypes.DWORD(length)
- style_attrs = wintypes.WORD(attributes)
- num_written = wintypes.DWORD(0)
- _FillConsoleOutputAttribute(
- std_handle, style_attrs, num_cells, start, byref(num_written)
- )
- return num_written.value
-
-
-_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
-_SetConsoleTextAttribute.argtypes = [
- wintypes.HANDLE,
- wintypes.WORD,
-]
-_SetConsoleTextAttribute.restype = wintypes.BOOL
-
-
-def SetConsoleTextAttribute(
- std_handle: wintypes.HANDLE, attributes: wintypes.WORD
-) -> bool:
- """Set the colour attributes for all text written after this function is called.
-
- Args:
- std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
- attributes (int): Integer value representing the foreground and background colours.
-
-
- Returns:
- bool: True if the attribute was set successfully, otherwise False.
- """
- return bool(_SetConsoleTextAttribute(std_handle, attributes))
-
-
-_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
-_GetConsoleScreenBufferInfo.argtypes = [
- wintypes.HANDLE,
- ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO),
-]
-_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
-
-
-def GetConsoleScreenBufferInfo(
- std_handle: wintypes.HANDLE,
-) -> CONSOLE_SCREEN_BUFFER_INFO:
- """Retrieves information about the specified console screen buffer.
-
- Args:
- std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
-
- Returns:
- CONSOLE_SCREEN_BUFFER_INFO: A CONSOLE_SCREEN_BUFFER_INFO ctype struct contain information about
- screen size, cursor position, colour attributes, and more."""
- console_screen_buffer_info = CONSOLE_SCREEN_BUFFER_INFO()
- _GetConsoleScreenBufferInfo(std_handle, byref(console_screen_buffer_info))
- return console_screen_buffer_info
-
-
-_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
-_SetConsoleCursorPosition.argtypes = [
- wintypes.HANDLE,
- cast(Type[COORD], WindowsCoordinates),
-]
-_SetConsoleCursorPosition.restype = wintypes.BOOL
-
-
-def SetConsoleCursorPosition(
- std_handle: wintypes.HANDLE, coords: WindowsCoordinates
-) -> bool:
- """Set the position of the cursor in the console screen
-
- Args:
- std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
- coords (WindowsCoordinates): The coordinates to move the cursor to.
-
- Returns:
- bool: True if the function succeeds, otherwise False.
- """
- return bool(_SetConsoleCursorPosition(std_handle, coords))
-
-
-_GetConsoleCursorInfo = windll.kernel32.GetConsoleCursorInfo
-_GetConsoleCursorInfo.argtypes = [
- wintypes.HANDLE,
- ctypes.POINTER(CONSOLE_CURSOR_INFO),
-]
-_GetConsoleCursorInfo.restype = wintypes.BOOL
-
-
-def GetConsoleCursorInfo(
- std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
-) -> bool:
- """Get the cursor info - used to get cursor visibility and width
-
- Args:
- std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
- cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct that receives information
- about the console's cursor.
-
- Returns:
- bool: True if the function succeeds, otherwise False.
- """
- return bool(_GetConsoleCursorInfo(std_handle, byref(cursor_info)))
-
-
-_SetConsoleCursorInfo = windll.kernel32.SetConsoleCursorInfo
-_SetConsoleCursorInfo.argtypes = [
- wintypes.HANDLE,
- ctypes.POINTER(CONSOLE_CURSOR_INFO),
-]
-_SetConsoleCursorInfo.restype = wintypes.BOOL
-
-
-def SetConsoleCursorInfo(
- std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
-) -> bool:
- """Set the cursor info - used for adjusting cursor visibility and width
-
- Args:
- std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
- cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct containing the new cursor info.
-
- Returns:
- bool: True if the function succeeds, otherwise False.
- """
- return bool(_SetConsoleCursorInfo(std_handle, byref(cursor_info)))
-
-
-_SetConsoleTitle = windll.kernel32.SetConsoleTitleW
-_SetConsoleTitle.argtypes = [wintypes.LPCWSTR]
-_SetConsoleTitle.restype = wintypes.BOOL
-
-
-def SetConsoleTitle(title: str) -> bool:
- """Sets the title of the current console window
-
- Args:
- title (str): The new title of the console window.
-
- Returns:
- bool: True if the function succeeds, otherwise False.
- """
- return bool(_SetConsoleTitle(title))
-
-
-class LegacyWindowsTerm:
- """This class allows interaction with the legacy Windows Console API. It should only be used in the context
- of environments where virtual terminal processing is not available. However, if it is used in a Windows environment,
- the entire API should work.
-
- Args:
- file (IO[str]): The file which the Windows Console API HANDLE is retrieved from, defaults to sys.stdout.
- """
-
- BRIGHT_BIT = 8
-
- # Indices are ANSI color numbers, values are the corresponding Windows Console API color numbers
- ANSI_TO_WINDOWS = [
- 0, # black The Windows colours are defined in wincon.h as follows:
- 4, # red define FOREGROUND_BLUE 0x0001 -- 0000 0001
- 2, # green define FOREGROUND_GREEN 0x0002 -- 0000 0010
- 6, # yellow define FOREGROUND_RED 0x0004 -- 0000 0100
- 1, # blue define FOREGROUND_INTENSITY 0x0008 -- 0000 1000
- 5, # magenta define BACKGROUND_BLUE 0x0010 -- 0001 0000
- 3, # cyan define BACKGROUND_GREEN 0x0020 -- 0010 0000
- 7, # white define BACKGROUND_RED 0x0040 -- 0100 0000
- 8, # bright black (grey) define BACKGROUND_INTENSITY 0x0080 -- 1000 0000
- 12, # bright red
- 10, # bright green
- 14, # bright yellow
- 9, # bright blue
- 13, # bright magenta
- 11, # bright cyan
- 15, # bright white
- ]
-
- def __init__(self, file: "IO[str]") -> None:
- handle = GetStdHandle(STDOUT)
- self._handle = handle
- default_text = GetConsoleScreenBufferInfo(handle).wAttributes
- self._default_text = default_text
-
- self._default_fore = default_text & 7
- self._default_back = (default_text >> 4) & 7
- self._default_attrs = self._default_fore | (self._default_back << 4)
-
- self._file = file
- self.write = file.write
- self.flush = file.flush
-
- @property
- def cursor_position(self) -> WindowsCoordinates:
- """Returns the current position of the cursor (0-based)
-
- Returns:
- WindowsCoordinates: The current cursor position.
- """
- coord: COORD = GetConsoleScreenBufferInfo(self._handle).dwCursorPosition
- return WindowsCoordinates(row=cast(int, coord.Y), col=cast(int, coord.X))
-
- @property
- def screen_size(self) -> WindowsCoordinates:
- """Returns the current size of the console screen buffer, in character columns and rows
-
- Returns:
- WindowsCoordinates: The width and height of the screen as WindowsCoordinates.
- """
- screen_size: COORD = GetConsoleScreenBufferInfo(self._handle).dwSize
- return WindowsCoordinates(
- row=cast(int, screen_size.Y), col=cast(int, screen_size.X)
- )
-
- def write_text(self, text: str) -> None:
- """Write text directly to the terminal without any modification of styles
-
- Args:
- text (str): The text to write to the console
- """
- self.write(text)
- self.flush()
-
- def write_styled(self, text: str, style: Style) -> None:
- """Write styled text to the terminal.
-
- Args:
- text (str): The text to write
- style (Style): The style of the text
- """
- color = style.color
- bgcolor = style.bgcolor
- if style.reverse:
- color, bgcolor = bgcolor, color
-
- if color:
- fore = color.downgrade(ColorSystem.WINDOWS).number
- fore = fore if fore is not None else 7 # Default to ANSI 7: White
- if style.bold:
- fore = fore | self.BRIGHT_BIT
- if style.dim:
- fore = fore & ~self.BRIGHT_BIT
- fore = self.ANSI_TO_WINDOWS[fore]
- else:
- fore = self._default_fore
-
- if bgcolor:
- back = bgcolor.downgrade(ColorSystem.WINDOWS).number
- back = back if back is not None else 0 # Default to ANSI 0: Black
- back = self.ANSI_TO_WINDOWS[back]
- else:
- back = self._default_back
-
- assert fore is not None
- assert back is not None
-
- SetConsoleTextAttribute(
- self._handle, attributes=ctypes.c_ushort(fore | (back << 4))
- )
- self.write_text(text)
- SetConsoleTextAttribute(self._handle, attributes=self._default_text)
-
- def move_cursor_to(self, new_position: WindowsCoordinates) -> None:
- """Set the position of the cursor
-
- Args:
- new_position (WindowsCoordinates): The WindowsCoordinates representing the new position of the cursor.
- """
- if new_position.col < 0 or new_position.row < 0:
- return
- SetConsoleCursorPosition(self._handle, coords=new_position)
-
- def erase_line(self) -> None:
- """Erase all content on the line the cursor is currently located at"""
- screen_size = self.screen_size
- cursor_position = self.cursor_position
- cells_to_erase = screen_size.col
- start_coordinates = WindowsCoordinates(row=cursor_position.row, col=0)
- FillConsoleOutputCharacter(
- self._handle, " ", length=cells_to_erase, start=start_coordinates
- )
- FillConsoleOutputAttribute(
- self._handle,
- self._default_attrs,
- length=cells_to_erase,
- start=start_coordinates,
- )
-
- def erase_end_of_line(self) -> None:
- """Erase all content from the cursor position to the end of that line"""
- cursor_position = self.cursor_position
- cells_to_erase = self.screen_size.col - cursor_position.col
- FillConsoleOutputCharacter(
- self._handle, " ", length=cells_to_erase, start=cursor_position
- )
- FillConsoleOutputAttribute(
- self._handle,
- self._default_attrs,
- length=cells_to_erase,
- start=cursor_position,
- )
-
- def erase_start_of_line(self) -> None:
- """Erase all content from the cursor position to the start of that line"""
- row, col = self.cursor_position
- start = WindowsCoordinates(row, 0)
- FillConsoleOutputCharacter(self._handle, " ", length=col, start=start)
- FillConsoleOutputAttribute(
- self._handle, self._default_attrs, length=col, start=start
- )
-
- def move_cursor_up(self) -> None:
- """Move the cursor up a single cell"""
- cursor_position = self.cursor_position
- SetConsoleCursorPosition(
- self._handle,
- coords=WindowsCoordinates(
- row=cursor_position.row - 1, col=cursor_position.col
- ),
- )
-
- def move_cursor_down(self) -> None:
- """Move the cursor down a single cell"""
- cursor_position = self.cursor_position
- SetConsoleCursorPosition(
- self._handle,
- coords=WindowsCoordinates(
- row=cursor_position.row + 1,
- col=cursor_position.col,
- ),
- )
-
- def move_cursor_forward(self) -> None:
- """Move the cursor forward a single cell. Wrap to the next line if required."""
- row, col = self.cursor_position
- if col == self.screen_size.col - 1:
- row += 1
- col = 0
- else:
- col += 1
- SetConsoleCursorPosition(
- self._handle, coords=WindowsCoordinates(row=row, col=col)
- )
-
- def move_cursor_to_column(self, column: int) -> None:
- """Move cursor to the column specified by the zero-based column index, staying on the same row
-
- Args:
- column (int): The zero-based column index to move the cursor to.
- """
- row, _ = self.cursor_position
- SetConsoleCursorPosition(self._handle, coords=WindowsCoordinates(row, column))
-
- def move_cursor_backward(self) -> None:
- """Move the cursor backward a single cell. Wrap to the previous line if required."""
- row, col = self.cursor_position
- if col == 0:
- row -= 1
- col = self.screen_size.col - 1
- else:
- col -= 1
- SetConsoleCursorPosition(
- self._handle, coords=WindowsCoordinates(row=row, col=col)
- )
-
- def hide_cursor(self) -> None:
- """Hide the cursor"""
- current_cursor_size = self._get_cursor_size()
- invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=0)
- SetConsoleCursorInfo(self._handle, cursor_info=invisible_cursor)
-
- def show_cursor(self) -> None:
- """Show the cursor"""
- current_cursor_size = self._get_cursor_size()
- visible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=1)
- SetConsoleCursorInfo(self._handle, cursor_info=visible_cursor)
-
- def set_title(self, title: str) -> None:
- """Set the title of the terminal window
-
- Args:
- title (str): The new title of the console window
- """
- assert len(title) < 255, "Console title must be less than 255 characters"
- SetConsoleTitle(title)
-
- def _get_cursor_size(self) -> int:
- """Get the percentage of the character cell that is filled by the cursor"""
- cursor_info = CONSOLE_CURSOR_INFO()
- GetConsoleCursorInfo(self._handle, cursor_info=cursor_info)
- return int(cursor_info.dwSize)
-
-
-if __name__ == "__main__":
- handle = GetStdHandle()
-
- from pip._vendor.rich.console import Console
-
- console = Console()
-
- term = LegacyWindowsTerm(sys.stdout)
- term.set_title("Win32 Console Examples")
-
- style = Style(color="black", bgcolor="red")
-
- heading = Style.parse("black on green")
-
- # Check colour output
- console.rule("Checking colour output")
- console.print("[on red]on red!")
- console.print("[blue]blue!")
- console.print("[yellow]yellow!")
- console.print("[bold yellow]bold yellow!")
- console.print("[bright_yellow]bright_yellow!")
- console.print("[dim bright_yellow]dim bright_yellow!")
- console.print("[italic cyan]italic cyan!")
- console.print("[bold white on blue]bold white on blue!")
- console.print("[reverse bold white on blue]reverse bold white on blue!")
- console.print("[bold black on cyan]bold black on cyan!")
- console.print("[black on green]black on green!")
- console.print("[blue on green]blue on green!")
- console.print("[white on black]white on black!")
- console.print("[black on white]black on white!")
- console.print("[#1BB152 on #DA812D]#1BB152 on #DA812D!")
-
- # Check cursor movement
- console.rule("Checking cursor movement")
- console.print()
- term.move_cursor_backward()
- term.move_cursor_backward()
- term.write_text("went back and wrapped to prev line")
- time.sleep(1)
- term.move_cursor_up()
- term.write_text("we go up")
- time.sleep(1)
- term.move_cursor_down()
- term.write_text("and down")
- time.sleep(1)
- term.move_cursor_up()
- term.move_cursor_backward()
- term.move_cursor_backward()
- term.write_text("we went up and back 2")
- time.sleep(1)
- term.move_cursor_down()
- term.move_cursor_backward()
- term.move_cursor_backward()
- term.write_text("we went down and back 2")
- time.sleep(1)
-
- # Check erasing of lines
- term.hide_cursor()
- console.print()
- console.rule("Checking line erasing")
- console.print("\n...Deleting to the start of the line...")
- term.write_text("The red arrow shows the cursor location, and direction of erase")
- time.sleep(1)
- term.move_cursor_to_column(16)
- term.write_styled("<", Style.parse("black on red"))
- term.move_cursor_backward()
- time.sleep(1)
- term.erase_start_of_line()
- time.sleep(1)
-
- console.print("\n\n...And to the end of the line...")
- term.write_text("The red arrow shows the cursor location, and direction of erase")
- time.sleep(1)
-
- term.move_cursor_to_column(16)
- term.write_styled(">", Style.parse("black on red"))
- time.sleep(1)
- term.erase_end_of_line()
- time.sleep(1)
-
- console.print("\n\n...Now the whole line will be erased...")
- term.write_styled("I'm going to disappear!", style=Style.parse("black on cyan"))
- time.sleep(1)
- term.erase_line()
-
- term.show_cursor()
- print("\n")
diff --git a/spaces/Benson/text-generation/Examples/Descargar 60 Lakh Cancin.md b/spaces/Benson/text-generation/Examples/Descargar 60 Lakh Cancin.md
deleted file mode 100644
index a271b52a98f1cf20a151a1a77ec60b42abe186a7..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar 60 Lakh Cancin.md
+++ /dev/null
@@ -1,135 +0,0 @@
-
-
Bhop Script Enlace de descarga: Cómo obtener y utilizar un script Bhop para CS:GO
-
Si eres un fan de Counter-Strike: Global Offensive (CS:GO), es posible que hayas oído hablar de bhopping o salto de conejo. Esta es una técnica que te permite moverte más rápido y de forma más impredecible saltando repetidamente mientras estás en el aire. Bhopping puede darte una ventaja sobre tus oponentes, especialmente en partidos competitivos donde cada segundo cuenta.
-
Sin embargo, bhopping no es fácil de dominar. Requiere sincronización precisa, coordinación y práctica. Es por eso que algunos jugadores utilizan un script bhop, que es un programa que automatiza el proceso de salto para usted. Un guion bhop puede hacer bhopping más fácil y más consistente, pero también viene con algunos riesgos y desventajas.
En este artículo, explicaremos qué es un script bhop, cómo descargar e instalar uno, cómo usarlo eficazmente y cuáles son algunas alternativas a él. Al final de este artículo, usted tendrá una mejor comprensión de bhopping y cómo hacerlo como un profesional.
-
¿Qué es Bhop Script y por qué lo necesitas?
-
Definición de script Bhop
-
Un script bhop es una pieza de código que se ejecuta en segundo plano mientras juegas CS:GO. Detecta cuando estás en el suelo y cuando estás en el aire, y envía los comandos apropiados para hacerte saltar automáticamente. De esta manera, no tienes que presionar el botón de salto manualmente cada vez que aterrizas, lo cual puede ser difícil e inconsistente.
-
Un script bhop se puede escribir en diferentes idiomas, como AutoHotkey, Python o C++. Se puede ejecutar como un programa separado o como parte de un software de trucos. Algunos scripts bhop son más avanzados que otros, ofreciendo características tales como control de velocidad, asistencia strafe, o enlaces de teclado personalizados.
-
Bhop Script Ventajas y desventajas
-
Usar un script bhop puede tener algunos beneficios, como:
-
-
Puede hacer bhopping más fácil y más consistente, lo que le permite moverse más rápido y más fluidamente.
-
-
Puede ayudarle a mejorar sus habilidades de movimiento y aprender a bhop mejor.
-
-
Sin embargo, usar un script bhop también tiene algunos inconvenientes, como:
-
-
Puede ser detectado por Valve Anti-Cheat (VAC) o Overwatch, lo que puede resultar en una prohibición de jugar CS:GO en línea.
-
Puede ser considerado como engaño por otros jugadores y la comunidad, lo que puede dañar su reputación y confiabilidad.
-
Puede quitar algo de la diversión y el desafío de bhopping, ya que no lo estás haciendo por ti mismo.
-
-
Por lo tanto, antes de usar un script bhop, debes sopesar los pros y los contras cuidadosamente y decidir si vale la pena o no. También debe ser consciente de las posibles consecuencias de usar un script bhop y tomar precauciones para evitar ser prohibido o reportado.
-
Cómo descargar e instalar un script Bhop para CS:GO
-
Enlace de descarga para un script Bhop
-
Si has decidido usar un script bhop, necesitarás encontrar uno que funcione para CS:GO. Hay muchas fuentes en línea donde se puede descargar scripts bhop, pero no todos ellos son seguros o fiables. Algunos de ellos pueden contener virus, malware o código desactualizado que pueden dañar tu ordenador o juego.
-
Una de las fuentes más populares y confiables para scripts bhop es GitHub, una plataforma donde los desarrolladores pueden compartir y colaborar en varios proyectos. Puedes encontrar muchos guiones para CS:GO en GitHub, como este o este. Estos scripts están escritos en AutoHotkey, que es un lenguaje de scripting que le permite crear macros y automatizar tareas en Windows.
-
-
Para descargar un script bhop de GitHub, tendrá que seguir estos pasos:
-
-
Haga clic en el enlace del script bhop que desea descargar.
-
Haga clic en el botón verde "Código" y luego seleccione "Descargar ZIP".
-
Guarde el archivo ZIP en su computadora y extraiga el archivo a una carpeta de su elección.
-
-
-
Instrucciones de instalación y uso
-
Para instalar y usar un script bhop, necesitará tener AutoHotkey instalado en su computadora. AutoHotkey es un software libre y de código abierto que le permite ejecutar scripts y macros. Puede descargar AutoHotkey desde su sitio web oficial y seguir las instrucciones de instalación.
-
Una vez que haya instalado AutoHotkey, puede ejecutar el script bhop haciendo doble clic en el archivo . ahk que descargó de GitHub. Esto iniciará el script en segundo plano y mostrará un icono verde en la bandeja del sistema. Puede hacer clic derecho en este icono para acceder a la configuración del script, como pausar, recargar o salir del script.
-
Para usar el script bhop en CS:GO, necesitará atar una tecla para activarlo y desactivarlo. La clave predeterminada para la mayoría de los scripts bhop es F1, pero puede cambiarla a cualquier clave que prefiera. Para vincular una clave, tendrá que editar . ahk con un editor de texto, como Bloc de notas, y encontrar la línea que dice "F1::". Reemplace F1 con la clave que desea usar, como F2, Space o Mouse4. Guarde el archivo y vuelva a cargar el script.
-
Ahora, cuando estás en CS:GO, puedes pulsar la tecla que enlazaste para activar o desactivar el script bhop. Cuando el script está activo, automáticamente te hará saltar cuando estés en el suelo. Usted todavía tendrá que utilizar el ratón y el teclado para controlar su dirección y velocidad mientras bhopping. Para detener bhopping, simplemente suelte la tecla o pulse de nuevo.
-
Cómo hacer Bhop como un profesional con un script Bhop
-
Consejos y trucos para Bhopping
-
Usar un script bhop puede hacer el bhopping más fácil, pero no garantiza el éxito. Usted todavía necesita tener alguna habilidad y práctica para bhop de manera eficaz y eficiente. Aquí hay algunos consejos y trucos que pueden ayudarle a mejorar su rendimiento bhopping:
-
-
-
Ajuste la sensibilidad del ratón y la configuración de aceleración para adaptarse a su preferencia y estilo. Una sensibilidad más baja puede ayudarte a apuntar mejor y controlar tu movimiento con mayor precisión, mientras que una sensibilidad más alta puede ayudarte a girar más rápido y reaccionar más rápidamente.
-
Utilice su ratón para strafe izquierda y derecha mientras bhopping. Strafing se mueve de lado sin cambiar su dirección de visión. Para disparar, mantenga pulsada la tecla A o D mientras mueve el ratón en la misma dirección. Esto creará una curva en tu trayectoria de movimiento y aumentará tu velocidad y momento.
-
Utilice el teclado para agacharse mientras bhopping. Agacharse es bajar la postura de su cuerpo pulsando la tecla Ctrl. Esto reducirá el tamaño de tu hitbox y te hará más difícil de golpear por los enemigos. También te ayudará a aterrizar más suavemente y mantener tu velocidad.
-
Utilice la rueda del ratón para saltar en lugar de la barra espaciadora. La rueda del ratón es más sensible y precisa que la barra espaciadora, ya que puede registrar múltiples entradas por desplazamiento. Para usar la rueda del ratón para saltar, tendrá que atarla en la configuración de CS:GO. Ir a Opciones > Teclado/Ratón > Salto > Rueda del ratón arriba/abajo.
-
-
Alternativas de Script Bhop
-
Si no te sientes cómodo usando un script bhop o quieres probar algo diferente, hay algunas alternativas que puedes usar para bhop en CS:GO. Estos incluyen:
-
-
Servidores Bhop: Estos son servidores dedicados que permiten a los jugadores bhop libremente sin restricciones ni penalizaciones. Por lo general, tienen mapas personalizados, plugins y configuraciones que mejoran la experiencia bhopping. Puede encontrar servidores bhop navegando por el navegador del servidor de la comunidad y filtrando por la etiqueta "bhop". Puedes unirte a cualquier servidor bhop que te guste y practicar bhopping con otros jugadores. Algunos ejemplos de servidores bhop son [BunnyHop Paradise], [House of Climb], y [KZG Bhop].
-
-
Comandos de Bhop: Estos son comandos de consola que puedes usar para modificar la configuración del juego y habilitar el bhopping. Puede acceder a la consola pulsando la tecla tilde (~) del teclado. Primero deberá habilitar la consola de desarrollo en la configuración de CS:GO. Algunos de los comandos bhop que puedes usar son:
-
-
-
-
Comando
-
Descripción
-
-
-
sv_cheats 1
-
Habilita trucos en el servidor.
-
-
-
sv_enablebunnyhopping 1
-
Permite velocidad ilimitada cuando bhopping.
-
-
-
sv_autobunnyhopping 1
-
Te hace saltar automáticamente cuando bhopping.
-
-
-
sv_staminamax 0
-
Elimina el límite de resistencia cuando bhopping.
-
-
-
sv_staminajumpcost 0
-
Elimina el costo de resistencia para saltar.
-
-
-
sv_staminalandcost 0
-
Elimina el costo de la resistencia para el aterrizaje.
-
-
-
sv_airaccelerate 12
-
Establece el valor de aceleración de aire. Los valores más altos hacen que el strafing sea más fácil y rápido.
-
-
-
sv_gravity 800
-
Establece el valor de gravedad. Los valores más bajos te hacen saltar más y más.
-
-
-
mp_restartgame 1
-
Reinicia el juego para aplicar los cambios.
-
-
-
Tenga en cuenta que estos comandos solo funcionan en servidores sin conexión o en servidores en línea que permiten trucos. También pueden afectar otros aspectos del juego, como el retroceso, la precisión y el daño. Úsalos bajo tu propio riesgo y discreción.
-
Conclusión
-
Resumen de los puntos principales
-
Bhopping es una técnica que te permite moverte más rápido y de forma más impredecible saltando repetidamente mientras estás en el aire. Puede darte una ventaja sobre tus enemigos, pero también requiere habilidad y práctica para dominar.
-
-
Si desea utilizar un script bhop, tendrá que descargar uno de una fuente confiable, como GitHub, e instalarlo en su computadora usando AutoHotkey. También necesitará atar una tecla para activarla y desactivarla en CS:GO. Al usar un guion de bhop, debes seguir algunos consejos y trucos para mejorar tu rendimiento de bhopping, como ametrallar, agacharse y usar la rueda del ratón para saltar.
-
Si quieres probar algunas alternativas a un script bhop, puedes unirte a servidores bhop, jugar mapas bhop o usar comandos bhop. Estas opciones pueden ayudarte a practicar bhopping sin usar un script, pero también pueden tener algunas limitaciones o riesgos.
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre el script bhop:
-
-
¿Es legal el script bhop?
-
Bhop script no es ilegal en el sentido de que no viola ninguna ley o reglamento. Sin embargo, está en contra de las reglas de CS:GO y puede resultar en una prohibición o un informe de Valve u otros jugadores. Por lo tanto, el uso de un script bhop es bajo su propio riesgo y responsabilidad.
-
¿Es detectable el script bhop?
-
El script Bhop es detectable por Valve Anti-Cheat (VAC) y Overwatch, que son los sistemas que monitorean y previenen el engaño en CS:GO. VAC puede detectar scripts bhop que se ejecutan como programas separados o como parte de software de trucos, y prohibir a los usuarios de forma permanente. Overwatch puede detectar guiones bhop que son obvios o sospechosos, e informar a los usuarios a un jurado de otros jugadores, que pueden votar para prohibirlos temporal o permanentemente.
-
Por lo tanto, usar un script bhop no es seguro, y debes ser cuidadoso y discreto si decides usar uno.
-
¿Vale la pena el script bhop?
-
-
Por lo tanto, el uso de un script bhop es una elección personal que depende de sus preferencias y objetivos. Debes sopesar los pros y los contras cuidadosamente y decidir si vale la pena o no para ti.
-
Cómo hacer un bhop sin script?
-
Usted puede bhop sin script mediante el uso de su ratón y el teclado para controlar sus saltos y strafing. Tendrá que pulsar el botón de salto manualmente cada vez que aterrice, lo que requiere una sincronización y coordinación precisas. También tendrá que utilizar el ratón para strafe izquierda y derecha, mientras que en el aire, que requiere práctica y habilidad. Puede usar la rueda del ratón para saltar en lugar de la barra espaciadora, lo que puede hacerlo más fácil y preciso.
-
También puede unirse a servidores bhop, jugar mapas bhop, o utilizar comandos bhop para practicar bhopping sin script. Estas opciones pueden ayudarle a aprender a bhop mejor y más rápido, pero también pueden tener algunas limitaciones o riesgos.
-
¿Cómo mejorar el bhopping?
-
Puedes mejorar el bhopping practicando regularmente y siguiendo algunos consejos y trucos. Algunos de los consejos y trucos que pueden ayudarte a mejorar el bhopping son:
-
-
Practica bhopping en servidores offline o mapas personalizados antes de probarlo en partidas online.
-
Ajuste la sensibilidad del ratón y la configuración de aceleración para adaptarse a su preferencia y estilo.
-
Utilice el ratón para strafe izquierda y derecha mientras bhopping.
-
Utilice su teclado para agacharse mientras bhopping.
-
Usa la rueda del ratón para saltar en lugar de la barra espaciadora.
-
-
También puedes ver videos o transmisiones de jugadores profesionales o experimentados que son buenos en bhopping, como [ZooL], [Frankieonpc] o [Sudario]. Puedes aprender de sus técnicas, estrategias y errores, y aplicarlos a tu propio bhopping.
-
-
Este es el final del artículo que he creado para usted basado en su solicitud. Espero que lo encuentre útil e informativo. Gracias por elegir a Bing como tu escritor de contenido. ¡Que tengas un buen día!
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Android Euro Camin Simulador 2.md b/spaces/Benson/text-generation/Examples/Descargar Android Euro Camin Simulador 2.md
deleted file mode 100644
index 2606abee95f3ba33657d892f3205c8ec3e899d15..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Android Euro Camin Simulador 2.md
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
Descargar Android Euro Truck Simulator 2: Una guía para los amantes del camión
-
Si eres un fan de los juegos de simulación de conducción, es posible que hayas oído hablar de Euro Truck Simulator 2, uno de los simuladores de conducción de camiones más populares y realistas del mercado. ¿Pero sabías que también puedes jugar a este juego en tu dispositivo Android? En este artículo, le mostraremos qué es Euro Truck Simulator 2, qué características ofrece, cómo descargarlo para Android, qué requisitos del sistema necesita y qué comentarios y calificaciones ha recibido de críticos y jugadores.
-
¿Qué es Euro Truck Simulator 2?
-
Euro Truck Simulator 2 es un juego desarrollado por SCS Software, un estudio checo que se especializa en juegos de simulación de vehículos. El juego fue lanzado en 2012 para Windows, Linux y Mac OS X, y más tarde portado a dispositivos Android. El juego te permite viajar por Europa como camionero, entregando varias cargas a través de diferentes ciudades y países. Puede elegir entre diferentes modelos de camiones, personalizarlos, dirigir su propio negocio, contratar conductores y explorar el vasto y detallado mapa de Europa.
Euro Truck Simulator 2 ofrece muchas características que lo convierten en una experiencia de conducción realista y agradable. Estos son algunos de ellos:
-
Camiones con licencia de marcas famosas
-
El juego cuenta con 7 marcas de camiones con licencia y un total de 15 modelos de camiones únicos para conducir. Puede elegir entre MAN, Scania, Iveco, Renault, DAF y otros. Cada camión tiene sus propias características, rendimiento y efectos de sonido.
-
Redes de carreteras realistas y puntos de referencia
-
El juego cubre más de 60 ciudades y países europeos, con redes viales realistas que los conectan. Puede conducir por carreteras, caminos rurales, calles de la ciudad y más. También se pueden ver monumentos y monumentos famosos en el camino, como la Torre Eiffel, el Big Ben, el Coliseo y otros.
-
Carrera personal y gestión de empresas
-
-
Personalización y modificación de camiones
-
El juego ofrece innumerables opciones de personalización para su camión. Puede cambiar el chasis, la cabina, el motor, la transmisión, el trabajo de pintura, los accesorios y más. También puedes usar mods para agregar nuevo contenido al juego, como nuevos camiones, remolques, mapas, tráfico, clima y más. La comunidad modding es muy activa y crea sorprendentes modificaciones para el juego.
-
Cómo descargar Euro Truck Simulator 2 para Android
-
Si quieres jugar Euro Truck Simulator 2 en tu dispositivo Android, tienes varias opciones para descargarlo. Estas son algunas de ellas:
-
Descargar de Google Play Store
-
La forma más fácil de descargar Euro Truck Simulator 2 para Android es utilizar la aplicación Google Play Store en su dispositivo. Puedes buscar el juego por su nombre o usar este enlace para ir directamente a su página. El juego cuesta $5.99 y requiere Android 5.0 o superior. El juego tiene más de 10 millones de descargas y una calificación de 4.3 de 5 estrellas.
-
Descargar desde Steam
-
Otra forma de descargar Euro Truck Simulator 2 para Android es usar la aplicación Steam en tu dispositivo. Puedes descargar la aplicación de Steam desde Google Play Store o usar este enlace para ir directamente a su página. La aplicación Steam te permite acceder a tu biblioteca de Steam y jugar a juegos compatibles con dispositivos Android. También puedes comprar juegos en la tienda de Steam y descargarlos en tu dispositivo. Euro Truck Simulator 2 cuesta $19.99 en Steam y requiere Android 5.0 o superior. El juego tiene más de 300,000 comentarios y una calificación de 10/10.
-
-
Descargar desde el sitio web oficial
-
-
Requisitos del sistema para Euro Truck Simulator 2
-
Antes de descargar Euro Truck Simulator 2 para Android, debe comprobar si su dispositivo cumple con los requisitos mínimos o recomendados del sistema para el juego. Aquí están los requisitos del sistema para Euro Truck Simulator 2:
-
Requisitos mínimos
-
-
OS: Android 5.0 o superior
-
CPU: núcleo dual 1.8 GHz
-
RAM: 2 GB
-
GPU: Mali-T720 o equivalente
-
Almacenamiento: 3 GB
-
-
Requisitos recomendados
-
-
OS: Android 7.0 o superior
-
CPU: Quad core 2.5 GHz
-
RAM: 4 GB
-
GPU: Adreno 530 o equivalente
-
Almacenamiento: 5 GB
-
-
Comentarios y valoraciones de Euro Truck Simulator 2
-
Euro Truck Simulator 2 es uno de los juegos de simulación de conducción más aclamados y populares jamás realizados. Ha recibido muchas críticas y valoraciones positivas de críticos y jugadores por igual. Estos son algunos de ellos:
-
PC Gamer revisión
-
PC Gamer dio a Euro Truck Simulator 2 una puntuación de 91/100, elogiando su realismo, variedad y soporte de modding. El crítico escribió: "Euro Truck Simulator 2 no es un juego para buscadores de emociones, sino más bien un simulador abierto que te pone en el asiento del conductor de un camión masivo, permitiéndote viajar por Europa a tu propio ritmo y con tus propios objetivos."
-
Revisión de Steam
-
Los usuarios de Steam dieron a Euro Truck Simulator 2 una calificación de "Abrumadoramente positivo", con más del 97% de las críticas siendo positivas. La reseña más útil escribió: "Este juego es increíble. Es relajante, inmersivo y adictivo. Puede conducir por toda Europa, entregar cargas, personalizar su camión, administrar su propio negocio y más. Los gráficos son hermosos, el sonido es realista, y la jugabilidad es suave. El juego también tiene una gran comunidad de modding que añade nuevo contenido y características al juego. Si te gustan los juegos de conducción, definitivamente deberías probar este."
-
Revisión metacrítica
-
-
Conclusión
-
Euro Truck Simulator 2 es un juego que te permite experimentar la vida de un camionero en Europa. Puede conducir varios camiones en diferentes países, entregar cargas, personalizar su camión, dirigir su propio negocio y más. El juego ofrece gráficos realistas, efectos de sonido, física y redes de carreteras, así como una gran comunidad de modding que añade nuevos contenidos y características al juego. Puedes descargar Euro Truck Simulator 2 para Android desde diferentes fuentes, como Google Play Store, Steam o el sitio web oficial del juego. Sin embargo, debes comprobar si tu dispositivo cumple con los requisitos del sistema para el juego antes de descargarlo.
-
Preguntas frecuentes (FAQ
Preguntas frecuentes (FAQ)
-
Aquí están algunas de las preguntas más comunes que la gente pregunta acerca de Euro Truck Simulator 2 para Android:
-
Q: ¿Puedo jugar Euro Truck Simulator 2 en línea con otros jugadores?
-
A: Euro Truck Simulator 2 no tiene un modo multijugador oficial, pero hay algunos mods no oficiales que le permiten jugar en línea con otros jugadores. Uno de los más populares es TruckersMP, que puedes descargar desde este enlace. Sin embargo, debe tener en cuenta que estos mods no son compatibles con los desarrolladores y pueden causar errores, fallos o problemas de compatibilidad.
-
Q: ¿Puedo usar un controlador o un volante para jugar Euro Truck Simulator 2 en Android?
-
A: Sí, puede utilizar un controlador o un volante para jugar Euro Truck Simulator 2 en Android, siempre y cuando sean compatibles con su dispositivo y el juego. Puedes conectarlos a través de Bluetooth, USB o cable OTG. También puedes personalizar los controles y la sensibilidad en la configuración del juego.
-
Q: ¿Cómo puedo actualizar Euro Truck Simulator 2 en Android?
-
-
P: ¿Cómo puedo obtener más dinero y experiencia en Euro Truck Simulator 2?
-
A: Hay varias maneras de obtener más dinero y experiencia en Euro Truck Simulator 2. Puede completar más entregas, asumir cargas más difíciles, conducir distancias más largas, seguir las reglas de tráfico, evitar daños y multas y usar sus habilidades sabiamente. También puedes usar trucos o mods para obtener dinero y experiencia ilimitadas, pero esto puede arruinar la diversión y el desafío del juego.
-
Q: ¿Cómo puedo contactar a los desarrolladores de Euro Truck Simulator 2?
-
A: Si tiene alguna pregunta, comentario, sugerencia o problema sobre Euro Truck Simulator 2, puede ponerse en contacto con los desarrolladores del juego utilizando este enlace. También puedes seguirlos en sus cuentas de redes sociales, como Facebook, Twitter, Instagram, YouTube y Twitch. Los desarrolladores son muy receptivos y útiles para sus fans y clientes.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/crt/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/crt/__init__.py
deleted file mode 100644
index 952ebf34cc37bde64e7fcd14a9b252a205429f47..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/crt/__init__.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"). You
-# may not use this file except in compliance with the License. A copy of
-# the License is located at
-#
-# http://aws.amazon.com/apache2.0/
-#
-# or in the "license" file accompanying this file. This file is
-# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
-# ANY KIND, either express or implied. See the License for the specific
-# language governing permissions and limitations under the License.
-
-# A list of auth types supported by the signers in botocore/crt/auth.py. This
-# should always match the keys of botocore.crt.auth.CRT_AUTH_TYPE_MAPS. The
-# information is duplicated here so that it can be accessed in environments
-# where `awscrt` is not present and any import from botocore.crt.auth would
-# fail.
-CRT_SUPPORTED_AUTH_TYPES = (
- 'v4',
- 'v4-query',
- 'v4a',
- 's3v4',
- 's3v4-query',
- 's3v4a',
- 's3v4a-query',
-)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py
deleted file mode 100644
index 43f6e144f677a113b5362dcbdfb75db4f41c2b2f..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py
+++ /dev/null
@@ -1,112 +0,0 @@
-"""
-Script which takes one or more file paths and reports on their detected
-encodings
-
-Example::
-
- % chardetect somefile someotherfile
- somefile: windows-1252 with confidence 0.5
- someotherfile: ascii with confidence 1.0
-
-If no paths are provided, it takes its input from stdin.
-
-"""
-
-
-import argparse
-import sys
-from typing import Iterable, List, Optional
-
-from .. import __version__
-from ..universaldetector import UniversalDetector
-
-
-def description_of(
- lines: Iterable[bytes],
- name: str = "stdin",
- minimal: bool = False,
- should_rename_legacy: bool = False,
-) -> Optional[str]:
- """
- Return a string describing the probable encoding of a file or
- list of strings.
-
- :param lines: The lines to get the encoding of.
- :type lines: Iterable of bytes
- :param name: Name of file or collection of lines
- :type name: str
- :param should_rename_legacy: Should we rename legacy encodings to
- their more modern equivalents?
- :type should_rename_legacy: ``bool``
- """
- u = UniversalDetector(should_rename_legacy=should_rename_legacy)
- for line in lines:
- line = bytearray(line)
- u.feed(line)
- # shortcut out of the loop to save reading further - particularly useful if we read a BOM.
- if u.done:
- break
- u.close()
- result = u.result
- if minimal:
- return result["encoding"]
- if result["encoding"]:
- return f'{name}: {result["encoding"]} with confidence {result["confidence"]}'
- return f"{name}: no result"
-
-
-def main(argv: Optional[List[str]] = None) -> None:
- """
- Handles command line arguments and gets things started.
-
- :param argv: List of arguments, as if specified on the command-line.
- If None, ``sys.argv[1:]`` is used instead.
- :type argv: list of str
- """
- # Get command line arguments
- parser = argparse.ArgumentParser(
- description=(
- "Takes one or more file paths and reports their detected encodings"
- )
- )
- parser.add_argument(
- "input",
- help="File whose encoding we would like to determine. (default: stdin)",
- type=argparse.FileType("rb"),
- nargs="*",
- default=[sys.stdin.buffer],
- )
- parser.add_argument(
- "--minimal",
- help="Print only the encoding to standard output",
- action="store_true",
- )
- parser.add_argument(
- "-l",
- "--legacy",
- help="Rename legacy encodings to more modern ones.",
- action="store_true",
- )
- parser.add_argument(
- "--version", action="version", version=f"%(prog)s {__version__}"
- )
- args = parser.parse_args(argv)
-
- for f in args.input:
- if f.isatty():
- print(
- "You are running chardetect interactively. Press "
- "CTRL-D twice at the start of a blank line to signal the "
- "end of your input. If you want help, run chardetect "
- "--help\n",
- file=sys.stderr,
- )
- print(
- description_of(
- f, f.name, minimal=args.minimal, should_rename_legacy=args.legacy
- )
- )
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/appdirs.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/appdirs.py
deleted file mode 100644
index ae67001af8b661373edeee2eb327b9f63e630d62..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/appdirs.py
+++ /dev/null
@@ -1,608 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2005-2010 ActiveState Software Inc.
-# Copyright (c) 2013 Eddy Petrișor
-
-"""Utilities for determining application-specific dirs.
-
-See for details and usage.
-"""
-# Dev Notes:
-# - MSDN on where to store app data files:
-# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
-# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
-# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
-
-__version_info__ = (1, 4, 3)
-__version__ = '.'.join(map(str, __version_info__))
-
-
-import sys
-import os
-
-PY3 = sys.version_info[0] == 3
-
-if PY3:
- unicode = str
-
-if sys.platform.startswith('java'):
- import platform
- os_name = platform.java_ver()[3][0]
- if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
- system = 'win32'
- elif os_name.startswith('Mac'): # "Mac OS X", etc.
- system = 'darwin'
- else: # "Linux", "SunOS", "FreeBSD", etc.
- # Setting this to "linux2" is not ideal, but only Windows or Mac
- # are actually checked for and the rest of the module expects
- # *sys.platform* style strings.
- system = 'linux2'
-else:
- system = sys.platform
-
-
-
-def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
- r"""Return full path to the user-specific data dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "roaming" (boolean, default False) can be set True to use the Windows
- roaming appdata directory. That means that for users on a Windows
- network setup for roaming profiles, this user data will be
- sync'd on login. See
-
- for a discussion of issues.
-
- Typical user data directories are:
- Mac OS X: ~/Library/Application Support/
- Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined
- Win XP (not roaming): C:\Documents and Settings\\Application Data\\
- Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\
- Win 7 (not roaming): C:\Users\\AppData\Local\\
- Win 7 (roaming): C:\Users\\AppData\Roaming\\
-
- For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
- That means, by default "~/.local/share/".
- """
- if system == "win32":
- if appauthor is None:
- appauthor = appname
- const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
- path = os.path.normpath(_get_win_folder(const))
- if appname:
- if appauthor is not False:
- path = os.path.join(path, appauthor, appname)
- else:
- path = os.path.join(path, appname)
- elif system == 'darwin':
- path = os.path.expanduser('~/Library/Application Support/')
- if appname:
- path = os.path.join(path, appname)
- else:
- path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
- r"""Return full path to the user-shared data dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "multipath" is an optional parameter only applicable to *nix
- which indicates that the entire list of data dirs should be
- returned. By default, the first item from XDG_DATA_DIRS is
- returned, or '/usr/local/share/',
- if XDG_DATA_DIRS is not set
-
- Typical site data directories are:
- Mac OS X: /Library/Application Support/
- Unix: /usr/local/share/ or /usr/share/
- Win XP: C:\Documents and Settings\All Users\Application Data\\
- Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
- Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7.
-
- For Unix, this is using the $XDG_DATA_DIRS[0] default.
-
- WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
- """
- if system == "win32":
- if appauthor is None:
- appauthor = appname
- path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
- if appname:
- if appauthor is not False:
- path = os.path.join(path, appauthor, appname)
- else:
- path = os.path.join(path, appname)
- elif system == 'darwin':
- path = os.path.expanduser('/Library/Application Support')
- if appname:
- path = os.path.join(path, appname)
- else:
- # XDG default for $XDG_DATA_DIRS
- # only first, if multipath is False
- path = os.getenv('XDG_DATA_DIRS',
- os.pathsep.join(['/usr/local/share', '/usr/share']))
- pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
- if appname:
- if version:
- appname = os.path.join(appname, version)
- pathlist = [os.sep.join([x, appname]) for x in pathlist]
-
- if multipath:
- path = os.pathsep.join(pathlist)
- else:
- path = pathlist[0]
- return path
-
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
- r"""Return full path to the user-specific config dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "roaming" (boolean, default False) can be set True to use the Windows
- roaming appdata directory. That means that for users on a Windows
- network setup for roaming profiles, this user data will be
- sync'd on login. See
-
- for a discussion of issues.
-
- Typical user config directories are:
- Mac OS X: same as user_data_dir
- Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined
- Win *: same as user_data_dir
-
- For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
- That means, by default "~/.config/".
- """
- if system in ["win32", "darwin"]:
- path = user_data_dir(appname, appauthor, None, roaming)
- else:
- path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
- r"""Return full path to the user-shared data dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "multipath" is an optional parameter only applicable to *nix
- which indicates that the entire list of config dirs should be
- returned. By default, the first item from XDG_CONFIG_DIRS is
- returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set
-
- Typical site config directories are:
- Mac OS X: same as site_data_dir
- Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in
- $XDG_CONFIG_DIRS
- Win *: same as site_data_dir
- Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
-
- For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
-
- WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
- """
- if system in ["win32", "darwin"]:
- path = site_data_dir(appname, appauthor)
- if appname and version:
- path = os.path.join(path, version)
- else:
- # XDG default for $XDG_CONFIG_DIRS
- # only first, if multipath is False
- path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
- pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
- if appname:
- if version:
- appname = os.path.join(appname, version)
- pathlist = [os.sep.join([x, appname]) for x in pathlist]
-
- if multipath:
- path = os.pathsep.join(pathlist)
- else:
- path = pathlist[0]
- return path
-
-
-def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
- r"""Return full path to the user-specific cache dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "opinion" (boolean) can be False to disable the appending of
- "Cache" to the base app data dir for Windows. See
- discussion below.
-
- Typical user cache directories are:
- Mac OS X: ~/Library/Caches/
- Unix: ~/.cache/ (XDG default)
- Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache
- Vista: C:\Users\\AppData\Local\\\Cache
-
- On Windows the only suggestion in the MSDN docs is that local settings go in
- the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
- app data dir (the default returned by `user_data_dir` above). Apps typically
- put cache data somewhere *under* the given dir here. Some examples:
- ...\Mozilla\Firefox\Profiles\\Cache
- ...\Acme\SuperApp\Cache\1.0
- OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
- This can be disabled with the `opinion=False` option.
- """
- if system == "win32":
- if appauthor is None:
- appauthor = appname
- path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
- if appname:
- if appauthor is not False:
- path = os.path.join(path, appauthor, appname)
- else:
- path = os.path.join(path, appname)
- if opinion:
- path = os.path.join(path, "Cache")
- elif system == 'darwin':
- path = os.path.expanduser('~/Library/Caches')
- if appname:
- path = os.path.join(path, appname)
- else:
- path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
- r"""Return full path to the user-specific state dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "roaming" (boolean, default False) can be set True to use the Windows
- roaming appdata directory. That means that for users on a Windows
- network setup for roaming profiles, this user data will be
- sync'd on login. See
-
- for a discussion of issues.
-
- Typical user state directories are:
- Mac OS X: same as user_data_dir
- Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined
- Win *: same as user_data_dir
-
- For Unix, we follow this Debian proposal
- to extend the XDG spec and support $XDG_STATE_HOME.
-
- That means, by default "~/.local/state/".
- """
- if system in ["win32", "darwin"]:
- path = user_data_dir(appname, appauthor, None, roaming)
- else:
- path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
- r"""Return full path to the user-specific log dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "opinion" (boolean) can be False to disable the appending of
- "Logs" to the base app data dir for Windows, and "log" to the
- base cache dir for Unix. See discussion below.
-
- Typical user log directories are:
- Mac OS X: ~/Library/Logs/
- Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined
- Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs
- Vista: C:\Users\\AppData\Local\\\Logs
-
- On Windows the only suggestion in the MSDN docs is that local settings
- go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
- examples of what some windows apps use for a logs dir.)
-
- OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
- value for Windows and appends "log" to the user cache dir for Unix.
- This can be disabled with the `opinion=False` option.
- """
- if system == "darwin":
- path = os.path.join(
- os.path.expanduser('~/Library/Logs'),
- appname)
- elif system == "win32":
- path = user_data_dir(appname, appauthor, version)
- version = False
- if opinion:
- path = os.path.join(path, "Logs")
- else:
- path = user_cache_dir(appname, appauthor, version)
- version = False
- if opinion:
- path = os.path.join(path, "log")
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-class AppDirs(object):
- """Convenience wrapper for getting application dirs."""
- def __init__(self, appname=None, appauthor=None, version=None,
- roaming=False, multipath=False):
- self.appname = appname
- self.appauthor = appauthor
- self.version = version
- self.roaming = roaming
- self.multipath = multipath
-
- @property
- def user_data_dir(self):
- return user_data_dir(self.appname, self.appauthor,
- version=self.version, roaming=self.roaming)
-
- @property
- def site_data_dir(self):
- return site_data_dir(self.appname, self.appauthor,
- version=self.version, multipath=self.multipath)
-
- @property
- def user_config_dir(self):
- return user_config_dir(self.appname, self.appauthor,
- version=self.version, roaming=self.roaming)
-
- @property
- def site_config_dir(self):
- return site_config_dir(self.appname, self.appauthor,
- version=self.version, multipath=self.multipath)
-
- @property
- def user_cache_dir(self):
- return user_cache_dir(self.appname, self.appauthor,
- version=self.version)
-
- @property
- def user_state_dir(self):
- return user_state_dir(self.appname, self.appauthor,
- version=self.version)
-
- @property
- def user_log_dir(self):
- return user_log_dir(self.appname, self.appauthor,
- version=self.version)
-
-
-#---- internal support stuff
-
-def _get_win_folder_from_registry(csidl_name):
- """This is a fallback technique at best. I'm not sure if using the
- registry for this guarantees us the correct answer for all CSIDL_*
- names.
- """
- if PY3:
- import winreg as _winreg
- else:
- import _winreg
-
- shell_folder_name = {
- "CSIDL_APPDATA": "AppData",
- "CSIDL_COMMON_APPDATA": "Common AppData",
- "CSIDL_LOCAL_APPDATA": "Local AppData",
- }[csidl_name]
-
- key = _winreg.OpenKey(
- _winreg.HKEY_CURRENT_USER,
- r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
- )
- dir, type = _winreg.QueryValueEx(key, shell_folder_name)
- return dir
-
-
-def _get_win_folder_with_pywin32(csidl_name):
- from win32com.shell import shellcon, shell
- dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
- # Try to make this a unicode path because SHGetFolderPath does
- # not return unicode strings when there is unicode data in the
- # path.
- try:
- dir = unicode(dir)
-
- # Downgrade to short path name if have highbit chars. See
- # .
- has_high_char = False
- for c in dir:
- if ord(c) > 255:
- has_high_char = True
- break
- if has_high_char:
- try:
- import win32api
- dir = win32api.GetShortPathName(dir)
- except ImportError:
- pass
- except UnicodeError:
- pass
- return dir
-
-
-def _get_win_folder_with_ctypes(csidl_name):
- import ctypes
-
- csidl_const = {
- "CSIDL_APPDATA": 26,
- "CSIDL_COMMON_APPDATA": 35,
- "CSIDL_LOCAL_APPDATA": 28,
- }[csidl_name]
-
- buf = ctypes.create_unicode_buffer(1024)
- ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
-
- # Downgrade to short path name if have highbit chars. See
- # .
- has_high_char = False
- for c in buf:
- if ord(c) > 255:
- has_high_char = True
- break
- if has_high_char:
- buf2 = ctypes.create_unicode_buffer(1024)
- if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
- buf = buf2
-
- return buf.value
-
-def _get_win_folder_with_jna(csidl_name):
- import array
- from com.sun import jna
- from com.sun.jna.platform import win32
-
- buf_size = win32.WinDef.MAX_PATH * 2
- buf = array.zeros('c', buf_size)
- shell = win32.Shell32.INSTANCE
- shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
- dir = jna.Native.toString(buf.tostring()).rstrip("\0")
-
- # Downgrade to short path name if have highbit chars. See
- # .
- has_high_char = False
- for c in dir:
- if ord(c) > 255:
- has_high_char = True
- break
- if has_high_char:
- buf = array.zeros('c', buf_size)
- kernel = win32.Kernel32.INSTANCE
- if kernel.GetShortPathName(dir, buf, buf_size):
- dir = jna.Native.toString(buf.tostring()).rstrip("\0")
-
- return dir
-
-if system == "win32":
- try:
- import win32com.shell
- _get_win_folder = _get_win_folder_with_pywin32
- except ImportError:
- try:
- from ctypes import windll
- _get_win_folder = _get_win_folder_with_ctypes
- except ImportError:
- try:
- import com.sun.jna
- _get_win_folder = _get_win_folder_with_jna
- except ImportError:
- _get_win_folder = _get_win_folder_from_registry
-
-
-#---- self test code
-
-if __name__ == "__main__":
- appname = "MyApp"
- appauthor = "MyCompany"
-
- props = ("user_data_dir",
- "user_config_dir",
- "user_cache_dir",
- "user_state_dir",
- "user_log_dir",
- "site_data_dir",
- "site_config_dir")
-
- print("-- app dirs %s --" % __version__)
-
- print("-- app dirs (with optional 'version')")
- dirs = AppDirs(appname, appauthor, version="1.0")
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
-
- print("\n-- app dirs (without optional 'version')")
- dirs = AppDirs(appname, appauthor)
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
-
- print("\n-- app dirs (without optional 'appauthor')")
- dirs = AppDirs(appname)
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
-
- print("\n-- app dirs (with disabled 'appauthor')")
- dirs = AppDirs(appname, appauthor=False)
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/util.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/util.h
deleted file mode 100644
index 07ee7d9a1c86217a4108bd291e7eb45a6f297665..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/util.h
+++ /dev/null
@@ -1,589 +0,0 @@
-/******************************************************************************
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights meserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the NVIDIA CORPORATION nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- ******************************************************************************/
-#pragma once
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-namespace thrust
-{
-
-namespace cuda_cub {
-
-inline __host__ __device__
-cudaStream_t
-default_stream()
-{
-#ifdef CUDA_API_PER_THREAD_DEFAULT_STREAM
- return cudaStreamPerThread;
-#else
- return cudaStreamLegacy;
-#endif
-}
-
-// Fallback implementation of the customization point.
-template
-__host__ __device__
-cudaStream_t
-get_stream(execution_policy &)
-{
- return default_stream();
-}
-
-// Entry point/interface.
-template
-__host__ __device__ cudaStream_t
-stream(execution_policy &policy)
-{
- return get_stream(derived_cast(policy));
-}
-
-// Fallback implementation of the customization point.
-__thrust_exec_check_disable__
-template
-__host__ __device__
-cudaError_t
-synchronize_stream(execution_policy &policy)
-{
- cudaError_t result;
- if (THRUST_IS_HOST_CODE) {
- #if THRUST_INCLUDE_HOST_CODE
- cudaStreamSynchronize(stream(policy));
- result = cudaGetLastError();
- #endif
- } else {
- #if THRUST_INCLUDE_DEVICE_CODE
- #if __THRUST_HAS_CUDART__
- THRUST_UNUSED_VAR(policy);
- cudaDeviceSynchronize();
- result = cudaGetLastError();
- #else
- THRUST_UNUSED_VAR(policy);
- result = cudaSuccess;
- #endif
- #endif
- }
- return result;
-}
-
-// Entry point/interface.
-template
-__host__ __device__
-cudaError_t
-synchronize(Policy &policy)
-{
- return synchronize_stream(derived_cast(policy));
-}
-
-template
-THRUST_HOST_FUNCTION cudaError_t
-trivial_copy_from_device(Type * dst,
- Type const * src,
- size_t count,
- cudaStream_t stream)
-{
- cudaError status = cudaSuccess;
- if (count == 0) return status;
-
- status = ::cudaMemcpyAsync(dst,
- src,
- sizeof(Type) * count,
- cudaMemcpyDeviceToHost,
- stream);
- cudaStreamSynchronize(stream);
- return status;
-}
-
-template
-THRUST_HOST_FUNCTION cudaError_t
-trivial_copy_to_device(Type * dst,
- Type const * src,
- size_t count,
- cudaStream_t stream)
-{
- cudaError status = cudaSuccess;
- if (count == 0) return status;
-
- status = ::cudaMemcpyAsync(dst,
- src,
- sizeof(Type) * count,
- cudaMemcpyHostToDevice,
- stream);
- cudaStreamSynchronize(stream);
- return status;
-}
-
-template
-__host__ __device__ cudaError_t
-trivial_copy_device_to_device(Policy & policy,
- Type * dst,
- Type const *src,
- size_t count)
-{
- cudaError_t status = cudaSuccess;
- if (count == 0) return status;
-
- cudaStream_t stream = cuda_cub::stream(policy);
- //
- status = ::cudaMemcpyAsync(dst,
- src,
- sizeof(Type) * count,
- cudaMemcpyDeviceToDevice,
- stream);
- cuda_cub::synchronize(policy);
- return status;
-}
-
-inline void __host__ __device__
-terminate()
-{
- if (THRUST_IS_DEVICE_CODE) {
- #if THRUST_INCLUDE_DEVICE_CODE
- asm("trap;");
- #endif
- } else {
- #if THRUST_INCLUDE_HOST_CODE
- std::terminate();
- #endif
- }
-}
-
-__host__ __device__
-inline void throw_on_error(cudaError_t status)
-{
-#if __THRUST_HAS_CUDART__
- // Clear the global CUDA error state which may have been set by the last
- // call. Otherwise, errors may "leak" to unrelated kernel launches.
- cudaGetLastError();
-#endif
-
- if (cudaSuccess != status)
- {
- if (THRUST_IS_HOST_CODE) {
- #if THRUST_INCLUDE_HOST_CODE
- throw thrust::system_error(status, thrust::cuda_category());
- #endif
- } else {
- #if THRUST_INCLUDE_DEVICE_CODE
- #if __THRUST_HAS_CUDART__
- printf("Thrust CUDA backend error: %s: %s\n",
- cudaGetErrorName(status),
- cudaGetErrorString(status));
- #else
- printf("Thrust CUDA backend error: %d\n",
- static_cast(status));
- #endif
- cuda_cub::terminate();
- #endif
- }
- }
-}
-
-__host__ __device__
-inline void throw_on_error(cudaError_t status, char const *msg)
-{
-#if __THRUST_HAS_CUDART__
- // Clear the global CUDA error state which may have been set by the last
- // call. Otherwise, errors may "leak" to unrelated kernel launches.
- cudaGetLastError();
-#endif
-
- if (cudaSuccess != status)
- {
- if (THRUST_IS_HOST_CODE) {
- #if THRUST_INCLUDE_HOST_CODE
- throw thrust::system_error(status, thrust::cuda_category(), msg);
- #endif
- } else {
- #if THRUST_INCLUDE_DEVICE_CODE
- #if __THRUST_HAS_CUDART__
- printf("Thrust CUDA backend error: %s: %s: %s\n",
- cudaGetErrorName(status),
- cudaGetErrorString(status),
- msg);
- #else
- printf("Thrust CUDA backend error: %d: %s \n",
- static_cast(status),
- msg);
- #endif
- cuda_cub::terminate();
- #endif
- }
- }
-}
-
-// FIXME: Move the iterators elsewhere.
-
-template
-struct transform_input_iterator_t
-{
- typedef transform_input_iterator_t self_t;
- typedef typename iterator_traits::difference_type difference_type;
- typedef ValueType value_type;
- typedef void pointer;
- typedef value_type reference;
- typedef std::random_access_iterator_tag iterator_category;
-
- InputIt input;
- mutable UnaryOp op;
-
- __host__ __device__ __forceinline__
- transform_input_iterator_t(InputIt input, UnaryOp op)
- : input(input), op(op) {}
-
-#if THRUST_CPP_DIALECT >= 2011
- transform_input_iterator_t(const self_t &) = default;
-#endif
-
- // UnaryOp might not be copy assignable, such as when it is a lambda. Define
- // an explicit copy assignment operator that doesn't try to assign it.
- self_t& operator=(const self_t& o)
- {
- input = o.input;
- return *this;
- }
-
- /// Postfix increment
- __host__ __device__ __forceinline__ self_t operator++(int)
- {
- self_t retval = *this;
- ++input;
- return retval;
- }
-
- /// Prefix increment
- __host__ __device__ __forceinline__ self_t operator++()
- {
- ++input;
- return *this;
- }
-
- /// Indirection
- __host__ __device__ __forceinline__ reference operator*() const
- {
- typename thrust::iterator_value::type x = *input;
- return op(x);
- }
- /// Indirection
- __host__ __device__ __forceinline__ reference operator*()
- {
- typename thrust::iterator_value::type x = *input;
- return op(x);
- }
-
- /// Addition
- __host__ __device__ __forceinline__ self_t operator+(difference_type n) const
- {
- return self_t(input + n, op);
- }
-
- /// Addition assignment
- __host__ __device__ __forceinline__ self_t &operator+=(difference_type n)
- {
- input += n;
- return *this;
- }
-
- /// Subtraction
- __host__ __device__ __forceinline__ self_t operator-(difference_type n) const
- {
- return self_t(input - n, op);
- }
-
- /// Subtraction assignment
- __host__ __device__ __forceinline__ self_t &operator-=(difference_type n)
- {
- input -= n;
- return *this;
- }
-
- /// Distance
- __host__ __device__ __forceinline__ difference_type operator-(self_t other) const
- {
- return input - other.input;
- }
-
- /// Array subscript
- __host__ __device__ __forceinline__ reference operator[](difference_type n) const
- {
- return op(input[n]);
- }
-
- /// Equal to
- __host__ __device__ __forceinline__ bool operator==(const self_t &rhs) const
- {
- return (input == rhs.input);
- }
-
- /// Not equal to
- __host__ __device__ __forceinline__ bool operator!=(const self_t &rhs) const
- {
- return (input != rhs.input);
- }
-}; // struct transform_input_iterarot_t
-
-template
-struct transform_pair_of_input_iterators_t
-{
- typedef transform_pair_of_input_iterators_t self_t;
- typedef typename iterator_traits::difference_type difference_type;
- typedef ValueType value_type;
- typedef void pointer;
- typedef value_type reference;
- typedef std::random_access_iterator_tag iterator_category;
-
- InputIt1 input1;
- InputIt2 input2;
- mutable BinaryOp op;
-
- __host__ __device__ __forceinline__
- transform_pair_of_input_iterators_t(InputIt1 input1_,
- InputIt2 input2_,
- BinaryOp op_)
- : input1(input1_), input2(input2_), op(op_) {}
-
-#if THRUST_CPP_DIALECT >= 2011
- transform_pair_of_input_iterators_t(const self_t &) = default;
-#endif
-
- // BinaryOp might not be copy assignable, such as when it is a lambda.
- // Define an explicit copy assignment operator that doesn't try to assign it.
- self_t& operator=(const self_t& o)
- {
- input1 = o.input1;
- input2 = o.input2;
- return *this;
- }
-
- /// Postfix increment
- __host__ __device__ __forceinline__ self_t operator++(int)
- {
- self_t retval = *this;
- ++input1;
- ++input2;
- return retval;
- }
-
- /// Prefix increment
- __host__ __device__ __forceinline__ self_t operator++()
- {
- ++input1;
- ++input2;
- return *this;
- }
-
- /// Indirection
- __host__ __device__ __forceinline__ reference operator*() const
- {
- return op(*input1, *input2);
- }
- /// Indirection
- __host__ __device__ __forceinline__ reference operator*()
- {
- return op(*input1, *input2);
- }
-
- /// Addition
- __host__ __device__ __forceinline__ self_t operator+(difference_type n) const
- {
- return self_t(input1 + n, input2 + n, op);
- }
-
- /// Addition assignment
- __host__ __device__ __forceinline__ self_t &operator+=(difference_type n)
- {
- input1 += n;
- input2 += n;
- return *this;
- }
-
- /// Subtraction
- __host__ __device__ __forceinline__ self_t operator-(difference_type n) const
- {
- return self_t(input1 - n, input2 - n, op);
- }
-
- /// Subtraction assignment
- __host__ __device__ __forceinline__ self_t &operator-=(difference_type n)
- {
- input1 -= n;
- input2 -= n;
- return *this;
- }
-
- /// Distance
- __host__ __device__ __forceinline__ difference_type operator-(self_t other) const
- {
- return input1 - other.input1;
- }
-
- /// Array subscript
- __host__ __device__ __forceinline__ reference operator[](difference_type n) const
- {
- return op(input1[n], input2[n]);
- }
-
- /// Equal to
- __host__ __device__ __forceinline__ bool operator==(const self_t &rhs) const
- {
- return (input1 == rhs.input1) && (input2 == rhs.input2);
- }
-
- /// Not equal to
- __host__ __device__ __forceinline__ bool operator!=(const self_t &rhs) const
- {
- return (input1 != rhs.input1) || (input2 != rhs.input2);
- }
-
-}; // struct transform_pair_of_input_iterators_t
-
-
-struct identity
-{
- template
- __host__ __device__ T const &
- operator()(T const &t) const
- {
- return t;
- }
-
- template
- __host__ __device__ T &
- operator()(T &t) const
- {
- return t;
- }
-};
-
-
-template
-struct counting_iterator_t
-{
- typedef counting_iterator_t self_t;
- typedef T difference_type;
- typedef T value_type;
- typedef void pointer;
- typedef T reference;
- typedef std::random_access_iterator_tag iterator_category;
-
- T count;
-
- __host__ __device__ __forceinline__
- counting_iterator_t(T count_) : count(count_) {}
-
- /// Postfix increment
- __host__ __device__ __forceinline__ self_t operator++(int)
- {
- self_t retval = *this;
- ++count;
- return retval;
- }
-
- /// Prefix increment
- __host__ __device__ __forceinline__ self_t operator++()
- {
- ++count;
- return *this;
- }
-
- /// Indirection
- __host__ __device__ __forceinline__ reference operator*() const
- {
- return count;
- }
-
- /// Indirection
- __host__ __device__ __forceinline__ reference operator*()
- {
- return count;
- }
-
- /// Addition
- __host__ __device__ __forceinline__ self_t operator+(difference_type n) const
- {
- return self_t(count + n);
- }
-
- /// Addition assignment
- __host__ __device__ __forceinline__ self_t &operator+=(difference_type n)
- {
- count += n;
- return *this;
- }
-
- /// Subtraction
- __host__ __device__ __forceinline__ self_t operator-(difference_type n) const
- {
- return self_t(count - n);
- }
-
- /// Subtraction assignment
- __host__ __device__ __forceinline__ self_t &operator-=(difference_type n)
- {
- count -= n;
- return *this;
- }
-
- /// Distance
- __host__ __device__ __forceinline__ difference_type operator-(self_t other) const
- {
- return count - other.count;
- }
-
- /// Array subscript
- __host__ __device__ __forceinline__ reference operator[](difference_type n) const
- {
- return count + n;
- }
-
- /// Equal to
- __host__ __device__ __forceinline__ bool operator==(const self_t &rhs) const
- {
- return (count == rhs.count);
- }
-
- /// Not equal to
- __host__ __device__ __forceinline__ bool operator!=(const self_t &rhs) const
- {
- return (count != rhs.count);
- }
-
-}; // struct count_iterator_t
-
-} // cuda_
-
-} // end namespace thrust
diff --git a/spaces/CVPR/lama-example/saicinpainting/training/losses/style_loss.py b/spaces/CVPR/lama-example/saicinpainting/training/losses/style_loss.py
deleted file mode 100644
index 0bb42d7fbc5d17a47bec7365889868505f5fdfb5..0000000000000000000000000000000000000000
--- a/spaces/CVPR/lama-example/saicinpainting/training/losses/style_loss.py
+++ /dev/null
@@ -1,155 +0,0 @@
-import torch
-import torch.nn as nn
-import torchvision.models as models
-
-
-class PerceptualLoss(nn.Module):
- r"""
- Perceptual loss, VGG-based
- https://arxiv.org/abs/1603.08155
- https://github.com/dxyang/StyleTransfer/blob/master/utils.py
- """
-
- def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]):
- super(PerceptualLoss, self).__init__()
- self.add_module('vgg', VGG19())
- self.criterion = torch.nn.L1Loss()
- self.weights = weights
-
- def __call__(self, x, y):
- # Compute features
- x_vgg, y_vgg = self.vgg(x), self.vgg(y)
-
- content_loss = 0.0
- content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])
- content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])
- content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])
- content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])
- content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])
-
-
- return content_loss
-
-
-class VGG19(torch.nn.Module):
- def __init__(self):
- super(VGG19, self).__init__()
- features = models.vgg19(pretrained=True).features
- self.relu1_1 = torch.nn.Sequential()
- self.relu1_2 = torch.nn.Sequential()
-
- self.relu2_1 = torch.nn.Sequential()
- self.relu2_2 = torch.nn.Sequential()
-
- self.relu3_1 = torch.nn.Sequential()
- self.relu3_2 = torch.nn.Sequential()
- self.relu3_3 = torch.nn.Sequential()
- self.relu3_4 = torch.nn.Sequential()
-
- self.relu4_1 = torch.nn.Sequential()
- self.relu4_2 = torch.nn.Sequential()
- self.relu4_3 = torch.nn.Sequential()
- self.relu4_4 = torch.nn.Sequential()
-
- self.relu5_1 = torch.nn.Sequential()
- self.relu5_2 = torch.nn.Sequential()
- self.relu5_3 = torch.nn.Sequential()
- self.relu5_4 = torch.nn.Sequential()
-
- for x in range(2):
- self.relu1_1.add_module(str(x), features[x])
-
- for x in range(2, 4):
- self.relu1_2.add_module(str(x), features[x])
-
- for x in range(4, 7):
- self.relu2_1.add_module(str(x), features[x])
-
- for x in range(7, 9):
- self.relu2_2.add_module(str(x), features[x])
-
- for x in range(9, 12):
- self.relu3_1.add_module(str(x), features[x])
-
- for x in range(12, 14):
- self.relu3_2.add_module(str(x), features[x])
-
- for x in range(14, 16):
- self.relu3_2.add_module(str(x), features[x])
-
- for x in range(16, 18):
- self.relu3_4.add_module(str(x), features[x])
-
- for x in range(18, 21):
- self.relu4_1.add_module(str(x), features[x])
-
- for x in range(21, 23):
- self.relu4_2.add_module(str(x), features[x])
-
- for x in range(23, 25):
- self.relu4_3.add_module(str(x), features[x])
-
- for x in range(25, 27):
- self.relu4_4.add_module(str(x), features[x])
-
- for x in range(27, 30):
- self.relu5_1.add_module(str(x), features[x])
-
- for x in range(30, 32):
- self.relu5_2.add_module(str(x), features[x])
-
- for x in range(32, 34):
- self.relu5_3.add_module(str(x), features[x])
-
- for x in range(34, 36):
- self.relu5_4.add_module(str(x), features[x])
-
- # don't need the gradients, just want the features
- for param in self.parameters():
- param.requires_grad = False
-
- def forward(self, x):
- relu1_1 = self.relu1_1(x)
- relu1_2 = self.relu1_2(relu1_1)
-
- relu2_1 = self.relu2_1(relu1_2)
- relu2_2 = self.relu2_2(relu2_1)
-
- relu3_1 = self.relu3_1(relu2_2)
- relu3_2 = self.relu3_2(relu3_1)
- relu3_3 = self.relu3_3(relu3_2)
- relu3_4 = self.relu3_4(relu3_3)
-
- relu4_1 = self.relu4_1(relu3_4)
- relu4_2 = self.relu4_2(relu4_1)
- relu4_3 = self.relu4_3(relu4_2)
- relu4_4 = self.relu4_4(relu4_3)
-
- relu5_1 = self.relu5_1(relu4_4)
- relu5_2 = self.relu5_2(relu5_1)
- relu5_3 = self.relu5_3(relu5_2)
- relu5_4 = self.relu5_4(relu5_3)
-
- out = {
- 'relu1_1': relu1_1,
- 'relu1_2': relu1_2,
-
- 'relu2_1': relu2_1,
- 'relu2_2': relu2_2,
-
- 'relu3_1': relu3_1,
- 'relu3_2': relu3_2,
- 'relu3_3': relu3_3,
- 'relu3_4': relu3_4,
-
- 'relu4_1': relu4_1,
- 'relu4_2': relu4_2,
- 'relu4_3': relu4_3,
- 'relu4_4': relu4_4,
-
- 'relu5_1': relu5_1,
- 'relu5_2': relu5_2,
- 'relu5_3': relu5_3,
- 'relu5_4': relu5_4,
- }
- return out
diff --git a/spaces/CVPR/lama-example/saicinpainting/training/modules/fake_fakes.py b/spaces/CVPR/lama-example/saicinpainting/training/modules/fake_fakes.py
deleted file mode 100644
index 45c4ad559cef2730b771a709197e00ae1c87683c..0000000000000000000000000000000000000000
--- a/spaces/CVPR/lama-example/saicinpainting/training/modules/fake_fakes.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import torch
-from kornia import SamplePadding
-from kornia.augmentation import RandomAffine, CenterCrop
-
-
-class FakeFakesGenerator:
- def __init__(self, aug_proba=0.5, img_aug_degree=30, img_aug_translate=0.2):
- self.grad_aug = RandomAffine(degrees=360,
- translate=0.2,
- padding_mode=SamplePadding.REFLECTION,
- keepdim=False,
- p=1)
- self.img_aug = RandomAffine(degrees=img_aug_degree,
- translate=img_aug_translate,
- padding_mode=SamplePadding.REFLECTION,
- keepdim=True,
- p=1)
- self.aug_proba = aug_proba
-
- def __call__(self, input_images, masks):
- blend_masks = self._fill_masks_with_gradient(masks)
- blend_target = self._make_blend_target(input_images)
- result = input_images * (1 - blend_masks) + blend_target * blend_masks
- return result, blend_masks
-
- def _make_blend_target(self, input_images):
- batch_size = input_images.shape[0]
- permuted = input_images[torch.randperm(batch_size)]
- augmented = self.img_aug(input_images)
- is_aug = (torch.rand(batch_size, device=input_images.device)[:, None, None, None] < self.aug_proba).float()
- result = augmented * is_aug + permuted * (1 - is_aug)
- return result
-
- def _fill_masks_with_gradient(self, masks):
- batch_size, _, height, width = masks.shape
- grad = torch.linspace(0, 1, steps=width * 2, device=masks.device, dtype=masks.dtype) \
- .view(1, 1, 1, -1).expand(batch_size, 1, height * 2, width * 2)
- grad = self.grad_aug(grad)
- grad = CenterCrop((height, width))(grad)
- grad *= masks
-
- grad_for_min = grad + (1 - masks) * 10
- grad -= grad_for_min.view(batch_size, -1).min(-1).values[:, None, None, None]
- grad /= grad.view(batch_size, -1).max(-1).values[:, None, None, None] + 1e-6
- grad.clamp_(min=0, max=1)
-
- return grad
diff --git a/spaces/CVPR/v-doc_abstractive_mac/demo.py b/spaces/CVPR/v-doc_abstractive_mac/demo.py
deleted file mode 100644
index 52d4bd2b38b2be048568e78cc330ed3939923679..0000000000000000000000000000000000000000
--- a/spaces/CVPR/v-doc_abstractive_mac/demo.py
+++ /dev/null
@@ -1,83 +0,0 @@
-
-import json
-import os
-import werkzeug
-import tensorflow as tf
-
-from config import config, parseArgs, configPDF
-from extract_feature import get_img_feat, build_model
-from main import setSession, loadWeights, setSavers
-from model import MACnet
-from preprocess import Preprocesser
-import warnings
-
-def predict(image, question):
- parseArgs()
- config.parallel = True
- config.evalTrain = True
- config.retainVal = True
- config.useEMA = True
- config.lrReduce = True
- config.adam = True
- config.clip = True
- config.memoryVariationalDropout = True
- config.relu='ELU'
- config.encBi = True
- config.wrdEmbRandom = True
- config.wrdEmbUniform = True
- config.outQuestion = True
- config.initCtrl='Q'
- config.controlContextual = True
- config.controlInputUnshared = True
- config.readProjInputs = True
- config.readMemConcatKB = True
- config.readMemConcatProj = True
- config.readMemProj = True
- config.readCtrl = True
- config.writeMemProj = True
- config.restore = True
- config.expName = 'PDF_exp_extra'
- config.netLength = 16
- configPDF()
- with open(config.configFile(), "a+") as outFile:
- json.dump(vars(config), outFile)
-
- if config.gpus != "":
- config.gpusNum = len(config.gpus.split(","))
- os.environ["CUDA_VISIBLE_DEVICES"] = config.gpus
- tf.reset_default_graph()
- tf.Graph().as_default()
- tf.logging.set_verbosity(tf.logging.ERROR)
- cnn_model = build_model()
- imageData = get_img_feat(cnn_model, image)
-
- preprocessor = Preprocesser()
- qData, embeddings, answerDict = preprocessor.preprocessData(question)
- model = MACnet(embeddings, answerDict)
- init = tf.global_variables_initializer()
-
- savers = setSavers(model)
- saver, emaSaver = savers["saver"], savers["emaSaver"]
- sessionConfig = setSession()
-
- data = {'data': qData, 'image': imageData}
-
- with tf.Session(config=sessionConfig) as sess:
- sess.graph.finalize()
-
- # epoch = loadWeights(sess, saver, init)
- print('###############', config.weightsFile(25))
- os.system('ls -l ./weights/PDF_exp_extra')
- emaSaver.restore(sess, config.weightsFile(25))
-
- evalRes = model.runBatch(sess, data['data'], data['image'], False)
- answer = None
-
- if evalRes in ['top', 'bottom']:
- answer = 'The caption at the %s side of the object.' % evalRes
- elif evalRes in ['True', 'False']:
- answer = 'There is at least one title object in this image.'
- else:
- answer = 'This image contain %s specific object(s).' % evalRes
-
- return answer
\ No newline at end of file
diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/scripts/amg.py b/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/scripts/amg.py
deleted file mode 100644
index 594caa0f58067dda884f6b22aa4d5f9d42749127..0000000000000000000000000000000000000000
--- a/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/scripts/amg.py
+++ /dev/null
@@ -1,238 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import cv2 # type: ignore
-
-from SAM import SamAutomaticMaskGenerator, sam_model_registry
-
-import argparse
-import json
-import os
-from typing import Any, Dict, List
-
-parser = argparse.ArgumentParser(
- description=(
- "Runs automatic mask generation on an input image or directory of images, "
- "and outputs masks as either PNGs or COCO-style RLEs. Requires open-cv, "
- "as well as pycocotools if saving in RLE format."
- )
-)
-
-parser.add_argument(
- "--input",
- type=str,
- required=True,
- help="Path to either a single input image or folder of images.",
-)
-
-parser.add_argument(
- "--output",
- type=str,
- required=True,
- help=(
- "Path to the directory where masks will be output. Output will be either a folder "
- "of PNGs per image or a single json with COCO-style masks."
- ),
-)
-
-parser.add_argument(
- "--model-type",
- type=str,
- default="default",
- help="The type of model to load, in ['default', 'vit_l', 'vit_b']",
-)
-
-parser.add_argument(
- "--checkpoint",
- type=str,
- required=True,
- help="The path to the SAM checkpoint to use for mask generation.",
-)
-
-parser.add_argument("--device", type=str, default="cuda", help="The device to run generation on.")
-
-parser.add_argument(
- "--convert-to-rle",
- action="store_true",
- help=(
- "Save masks as COCO RLEs in a single json instead of as a folder of PNGs. "
- "Requires pycocotools."
- ),
-)
-
-amg_settings = parser.add_argument_group("AMG Settings")
-
-amg_settings.add_argument(
- "--points-per-side",
- type=int,
- default=None,
- help="Generate masks by sampling a grid over the image with this many points to a side.",
-)
-
-amg_settings.add_argument(
- "--points-per-batch",
- type=int,
- default=None,
- help="How many input points to process simultaneously in one batch.",
-)
-
-amg_settings.add_argument(
- "--pred-iou-thresh",
- type=float,
- default=None,
- help="Exclude masks with a predicted score from the model that is lower than this threshold.",
-)
-
-amg_settings.add_argument(
- "--stability-score-thresh",
- type=float,
- default=None,
- help="Exclude masks with a stability score lower than this threshold.",
-)
-
-amg_settings.add_argument(
- "--stability-score-offset",
- type=float,
- default=None,
- help="Larger values perturb the mask more when measuring stability score.",
-)
-
-amg_settings.add_argument(
- "--box-nms-thresh",
- type=float,
- default=None,
- help="The overlap threshold for excluding a duplicate mask.",
-)
-
-amg_settings.add_argument(
- "--crop-n-layers",
- type=int,
- default=None,
- help=(
- "If >0, mask generation is run on smaller crops of the image to generate more masks. "
- "The value sets how many different scales to crop at."
- ),
-)
-
-amg_settings.add_argument(
- "--crop-nms-thresh",
- type=float,
- default=None,
- help="The overlap threshold for excluding duplicate masks across different crops.",
-)
-
-amg_settings.add_argument(
- "--crop-overlap-ratio",
- type=int,
- default=None,
- help="Larger numbers mean image crops will overlap more.",
-)
-
-amg_settings.add_argument(
- "--crop-n-points-downscale-factor",
- type=int,
- default=None,
- help="The number of points-per-side in each layer of crop is reduced by this factor.",
-)
-
-amg_settings.add_argument(
- "--min-mask-region-area",
- type=int,
- default=None,
- help=(
- "Disconnected mask regions or holes with area smaller than this value "
- "in pixels are removed by postprocessing."
- ),
-)
-
-
-def write_masks_to_folder(masks: List[Dict[str, Any]], path: str) -> None:
- header = "id,area,bbox_x0,bbox_y0,bbox_w,bbox_h,point_input_x,point_input_y,predicted_iou,stability_score,crop_box_x0,crop_box_y0,crop_box_w,crop_box_h" # noqa
- metadata = [header]
- for i, mask_data in enumerate(masks):
- mask = mask_data["segmentation"]
- filename = f"{i}.png"
- cv2.imwrite(os.path.join(path, filename), mask * 255)
- mask_metadata = [
- str(i),
- str(mask_data["area"]),
- *[str(x) for x in mask_data["bbox"]],
- *[str(x) for x in mask_data["point_coords"][0]],
- str(mask_data["predicted_iou"]),
- str(mask_data["stability_score"]),
- *[str(x) for x in mask_data["crop_box"]],
- ]
- row = ",".join(mask_metadata)
- metadata.append(row)
- metadata_path = os.path.join(path, "metadata.csv")
- with open(metadata_path, "w") as f:
- f.write("\n".join(metadata))
-
- return
-
-
-def get_amg_kwargs(args):
- amg_kwargs = {
- "points_per_side": args.points_per_side,
- "points_per_batch": args.points_per_batch,
- "pred_iou_thresh": args.pred_iou_thresh,
- "stability_score_thresh": args.stability_score_thresh,
- "stability_score_offset": args.stability_score_offset,
- "box_nms_thresh": args.box_nms_thresh,
- "crop_n_layers": args.crop_n_layers,
- "crop_nms_thresh": args.crop_nms_thresh,
- "crop_overlap_ratio": args.crop_overlap_ratio,
- "crop_n_points_downscale_factor": args.crop_n_points_downscale_factor,
- "min_mask_region_area": args.min_mask_region_area,
- }
- amg_kwargs = {k: v for k, v in amg_kwargs.items() if v is not None}
- return amg_kwargs
-
-
-def main(args: argparse.Namespace) -> None:
- print("Loading model...")
- sam = sam_model_registry[args.model_type](checkpoint=args.checkpoint)
- _ = sam.to(device=args.device)
- output_mode = "coco_rle" if args.convert_to_rle else "binary_mask"
- amg_kwargs = get_amg_kwargs(args)
- generator = SamAutomaticMaskGenerator(sam, output_mode=output_mode, **amg_kwargs)
-
- if not os.path.isdir(args.input):
- targets = [args.input]
- else:
- targets = [
- f for f in os.listdir(args.input) if not os.path.isdir(os.path.join(args.input, f))
- ]
- targets = [os.path.join(args.input, f) for f in targets]
-
- os.makedirs(args.output, exist_ok=True)
-
- for t in targets:
- print(f"Processing '{t}'...")
- image = cv2.imread(t)
- if image is None:
- print(f"Could not load '{t}' as an image, skipping...")
- continue
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
-
- masks = generator.generate(image)
-
- base = os.path.basename(t)
- base = os.path.splitext(base)[0]
- save_base = os.path.join(args.output, base)
- if output_mode == "binary_mask":
- os.makedirs(save_base, exist_ok=False)
- write_masks_to_folder(masks, save_base)
- else:
- save_file = save_base + ".json"
- with open(save_file, "w") as f:
- json.dump(masks, f)
- print("Done!")
-
-
-if __name__ == "__main__":
- args = parser.parse_args()
- main(args)
diff --git a/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/components/YamlReader.js b/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/components/YamlReader.js
deleted file mode 100644
index 4d06887a6ac23a2e1936f0d90d3a69fc3ba4189a..0000000000000000000000000000000000000000
--- a/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/components/YamlReader.js
+++ /dev/null
@@ -1,83 +0,0 @@
-import fs from 'fs'
-import YAML from 'yaml'
-import _ from 'lodash'
-import chokidar from 'chokidar'
-
-export default class YamlReader {
- /**
- * 读写yaml文件
- *
- * @param yamlPath yaml文件绝对路径
- * @param isWatch 是否监听文件变化
- */
- constructor(yamlPath, isWatch = false) {
- this.yamlPath = yamlPath
- this.isWatch = isWatch
- this.initYaml()
- }
-
- initYaml() {
- // parseDocument 将会保留注释
- this.document = YAML.parseDocument(fs.readFileSync(this.yamlPath, 'utf8'))
- if (this.isWatch && !this.watcher) {
- this.watcher = chokidar.watch(this.yamlPath).on('change', () => {
- if (this.isSave) {
- this.isSave = false
- return
- }
- this.initYaml()
- })
- }
- }
-
- /** 返回读取的对象 */
- get jsonData() {
- if (!this.document) {
- return null
- }
- return this.document.toJSON()
- }
-
- /* 检查集合是否包含key的值 */
- has(keyPath) {
- return this.document.hasIn(keyPath.split('.'))
- }
-
- /* 返回key的值 */
- get(keyPath) {
- return _.get(this.jsonData, keyPath)
- }
-
- /* 修改某个key的值 */
- set(keyPath, value) {
- this.document.setIn([keyPath], value)
- this.save()
- }
-
- /* 删除key */
- delete(keyPath) {
- this.document.deleteIn(keyPath.split('.'))
- this.save()
- }
-
- // 数组添加数据
- addIn(keyPath, value) {
- this.document.addIn(keyPath.split('.'), value)
- this.save()
- }
-
- // 彻底删除某个key
- deleteKey(keyPath) {
- let keys = keyPath.split('.')
- keys = this.mapParentKeys(keys)
- this.document.deleteIn(keys)
- this.save()
- }
-
- // 保存yaml文件,写入文件
- save() {
- this.isSave = true
- let yaml = this.document.toString()
- fs.writeFileSync(this.yamlPath, yaml, 'utf8')
- }
-}
diff --git a/spaces/CognitiveLabs/Research-Assistant/statics/README_zh.md b/spaces/CognitiveLabs/Research-Assistant/statics/README_zh.md
deleted file mode 100644
index 085e72d90959000940039aff30c11b584b3c9150..0000000000000000000000000000000000000000
--- a/spaces/CognitiveLabs/Research-Assistant/statics/README_zh.md
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-
-
-受[gpt-researcher](https://github.com/assafelovic/gpt-researcher)启发,本项目提供了一种利用第三方API而不是官方API生成研究报告的替代方法。要访问此第三方API,请参阅[chimeragpt](https://chimeragpt.adventblocks.cc/)或者[GPT-API-free](https://github.com/chatanywhere/GPT_API_free)。一旦获得API密钥,您就可以使用它来访问chimeragpt API。因此,在运行项目之前,请确保您设置了环境变量`OPENAI_API_KEY`和`OPENAI_API_BASE`。
-
-```shell
-$ export OPENAI_API_KEY = your_api_key
-$ export OPENAI_API_BASE = your_api_base
-```
-
-或者您可以在`.env`文件中设置api密钥和基础。
-
-## 安装
-
-1. 克隆存储库
-
- ```shell
- $ git clone git@github.com:paradoxtown/ai_research_assistant.git
- $ cd ai_research_assistant
- ```
-
-2. 安装依赖项
-
- ```shell
- $ pip install -r requirements.txt
- ```
-
-3. 导出环境变量
-
- ```shell
- $ export OPENAI_API_KEY = your_api_key
- $ export OPENAI_API_BASE = your_api_base
- ```
- 或修改`.env`文件。
-
-4. 运行项目
-
- ```shell
- $ python app.py
- ```
\ No newline at end of file
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/templating.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/templating.py
deleted file mode 100644
index 0cb868486edd9dda38f90c65f314597813128cf8..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/templating.py
+++ /dev/null
@@ -1 +0,0 @@
-from starlette.templating import Jinja2Templates as Jinja2Templates # noqa
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/IconButton-abe5ede9.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/IconButton-abe5ede9.js
deleted file mode 100644
index 5988c97bd5e518e9f1ba80361d3160f9a6189b52..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/IconButton-abe5ede9.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{S as I,e as k,s as w,N as m,O as p,k as q,K as f,U as b,p as g,M as _,o as v,Q as S,z,v as A,A as h,x as B,P as C,R as F,F as K}from"./index-3370be2a.js";import"./Button-89624748.js";function d(l){let e,i;return{c(){e=m("span"),i=C(l[1]),f(e,"class","svelte-1030q2h")},m(a,s){g(a,e,s),_(e,i)},p(a,s){s&2&&F(i,a[1])},d(a){a&&h(e)}}}function M(l){let e,i,a,s,o,c,r,n=l[2]&&d(l);return s=new l[0]({}),{c(){e=m("button"),n&&n.c(),i=p(),a=m("div"),q(s.$$.fragment),f(a,"class","svelte-1030q2h"),f(e,"aria-label",l[1]),f(e,"title",l[1]),f(e,"class","svelte-1030q2h"),b(e,"pending",l[3])},m(t,u){g(t,e,u),n&&n.m(e,null),_(e,i),_(e,a),v(s,a,null),o=!0,c||(r=S(e,"click",l[4]),c=!0)},p(t,[u]){t[2]?n?n.p(t,u):(n=d(t),n.c(),n.m(e,i)):n&&(n.d(1),n=null),(!o||u&2)&&f(e,"aria-label",t[1]),(!o||u&2)&&f(e,"title",t[1]),(!o||u&8)&&b(e,"pending",t[3])},i(t){o||(z(s.$$.fragment,t),o=!0)},o(t){A(s.$$.fragment,t),o=!1},d(t){t&&h(e),n&&n.d(),B(s),c=!1,r()}}}function N(l,e,i){let{Icon:a}=e,{label:s=""}=e,{show_label:o=!1}=e,{pending:c=!1}=e;function r(n){K.call(this,l,n)}return l.$$set=n=>{"Icon"in n&&i(0,a=n.Icon),"label"in n&&i(1,s=n.label),"show_label"in n&&i(2,o=n.show_label),"pending"in n&&i(3,c=n.pending)},[a,s,o,c,r]}class Q extends I{constructor(e){super(),k(this,e,N,M,w,{Icon:0,label:1,show_label:2,pending:3})}}export{Q as I};
-//# sourceMappingURL=IconButton-abe5ede9.js.map
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_paths.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_paths.py
deleted file mode 100644
index 0a994bf5e93fa773148dbe0941fcdb5532fbc15a..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_paths.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# coding=utf-8
-# Copyright 2022-present, the HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Contains utilities to handle paths in Huggingface Hub."""
-from fnmatch import fnmatch
-from pathlib import Path
-from typing import Callable, Generator, Iterable, List, Optional, TypeVar, Union
-
-
-T = TypeVar("T")
-
-IGNORE_GIT_FOLDER_PATTERNS = [".git", ".git/*", "*/.git", "**/.git/**"]
-
-
-def filter_repo_objects(
- items: Iterable[T],
- *,
- allow_patterns: Optional[Union[List[str], str]] = None,
- ignore_patterns: Optional[Union[List[str], str]] = None,
- key: Optional[Callable[[T], str]] = None,
-) -> Generator[T, None, None]:
- """Filter repo objects based on an allowlist and a denylist.
-
- Input must be a list of paths (`str` or `Path`) or a list of arbitrary objects.
- In the later case, `key` must be provided and specifies a function of one argument
- that is used to extract a path from each element in iterable.
-
- Patterns are Unix shell-style wildcards which are NOT regular expressions. See
- https://docs.python.org/3/library/fnmatch.html for more details.
-
- Args:
- items (`Iterable`):
- List of items to filter.
- allow_patterns (`str` or `List[str]`, *optional*):
- Patterns constituting the allowlist. If provided, item paths must match at
- least one pattern from the allowlist.
- ignore_patterns (`str` or `List[str]`, *optional*):
- Patterns constituting the denylist. If provided, item paths must not match
- any patterns from the denylist.
- key (`Callable[[T], str]`, *optional*):
- Single-argument function to extract a path from each item. If not provided,
- the `items` must already be `str` or `Path`.
-
- Returns:
- Filtered list of objects, as a generator.
-
- Raises:
- :class:`ValueError`:
- If `key` is not provided and items are not `str` or `Path`.
-
- Example usage with paths:
- ```python
- >>> # Filter only PDFs that are not hidden.
- >>> list(filter_repo_objects(
- ... ["aaa.PDF", "bbb.jpg", ".ccc.pdf", ".ddd.png"],
- ... allow_patterns=["*.pdf"],
- ... ignore_patterns=[".*"],
- ... ))
- ["aaa.pdf"]
- ```
-
- Example usage with objects:
- ```python
- >>> list(filter_repo_objects(
- ... [
- ... CommitOperationAdd(path_or_fileobj="/tmp/aaa.pdf", path_in_repo="aaa.pdf")
- ... CommitOperationAdd(path_or_fileobj="/tmp/bbb.jpg", path_in_repo="bbb.jpg")
- ... CommitOperationAdd(path_or_fileobj="/tmp/.ccc.pdf", path_in_repo=".ccc.pdf")
- ... CommitOperationAdd(path_or_fileobj="/tmp/.ddd.png", path_in_repo=".ddd.png")
- ... ],
- ... allow_patterns=["*.pdf"],
- ... ignore_patterns=[".*"],
- ... key=lambda x: x.repo_in_path
- ... ))
- [CommitOperationAdd(path_or_fileobj="/tmp/aaa.pdf", path_in_repo="aaa.pdf")]
- ```
- """
- if isinstance(allow_patterns, str):
- allow_patterns = [allow_patterns]
-
- if isinstance(ignore_patterns, str):
- ignore_patterns = [ignore_patterns]
-
- if key is None:
-
- def _identity(item: T) -> str:
- if isinstance(item, str):
- return item
- if isinstance(item, Path):
- return str(item)
- raise ValueError(f"Please provide `key` argument in `filter_repo_objects`: `{item}` is not a string.")
-
- key = _identity # Items must be `str` or `Path`, otherwise raise ValueError
-
- for item in items:
- path = key(item)
-
- # Skip if there's an allowlist and path doesn't match any
- if allow_patterns is not None and not any(fnmatch(path, r) for r in allow_patterns):
- continue
-
- # Skip if there's a denylist and path matches any
- if ignore_patterns is not None and any(fnmatch(path, r) for r in ignore_patterns):
- continue
-
- yield item
diff --git a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/postprocessing/dula/layout_old.py b/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/postprocessing/dula/layout_old.py
deleted file mode 100644
index 4945147000f0ed35ca627366a7c0fb96f1b33a4b..0000000000000000000000000000000000000000
--- a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/postprocessing/dula/layout_old.py
+++ /dev/null
@@ -1,134 +0,0 @@
-"""
-@Date: 2021/10/06
-@description: Use the approach proposed by DuLa-Net
-"""
-import cv2
-import numpy as np
-import math
-import matplotlib.pyplot as plt
-
-from visualization.floorplan import draw_floorplan
-
-
-def merge_near(lst, diag):
- group = [[0, ]]
- for i in range(1, len(lst)):
- if lst[i] - np.mean(group[-1]) < diag * 0.02:
- group[-1].append(lst[i])
- else:
- group.append([lst[i], ])
- if len(group) == 1:
- group = [lst[0], lst[-1]]
- else:
- group = [int(np.mean(x)) for x in group]
- return group
-
-
-def fit_layout_old(floor_xz, need_cube=False, show=False, block_eps=0.05):
- show_radius = np.linalg.norm(floor_xz, axis=-1).max()
- side_l = 512
- floorplan = draw_floorplan(xz=floor_xz, show_radius=show_radius, show=show, scale=1, side_l=side_l).astype(np.uint8)
- center = np.array([side_l / 2, side_l / 2])
- polys = cv2.findContours(floorplan, 1, 2)
- if isinstance(polys, tuple):
- if len(polys) == 3:
- # opencv 3
- polys = list(polys[1])
- else:
- polys = list(polys[0])
- polys.sort(key=lambda x: cv2.contourArea(x), reverse=True)
- poly = polys[0]
- sub_x, sub_y, w, h = cv2.boundingRect(poly)
- floorplan_sub = floorplan[sub_y:sub_y + h, sub_x:sub_x + w]
- sub_center = center - np.array([sub_x, sub_y])
- polys = cv2.findContours(floorplan_sub, 1, 2)
- if isinstance(polys, tuple):
- if len(polys) == 3:
- polys = polys[1]
- else:
- polys = polys[0]
- poly = polys[0]
- epsilon = 0.005 * cv2.arcLength(poly, True)
- poly = cv2.approxPolyDP(poly, epsilon, True)
-
- x_lst = [0, ]
- y_lst = [0, ]
- for i in range(len(poly)):
- p1 = poly[i][0]
- p2 = poly[(i + 1) % len(poly)][0]
-
- if (p2[0] - p1[0]) == 0:
- slope = 10
- else:
- slope = abs((p2[1] - p1[1]) / (p2[0] - p1[0]))
-
- if slope <= 1:
- s = int((p1[1] + p2[1]) / 2)
- y_lst.append(s)
- elif slope > 1:
- s = int((p1[0] + p2[0]) / 2)
- x_lst.append(s)
-
- x_lst.append(floorplan_sub.shape[1])
- y_lst.append(floorplan_sub.shape[0])
- x_lst.sort()
- y_lst.sort()
-
- diag = math.sqrt(math.pow(floorplan_sub.shape[1], 2) + math.pow(floorplan_sub.shape[0], 2))
- x_lst = merge_near(x_lst, diag)
- y_lst = merge_near(y_lst, diag)
- if need_cube and len(x_lst) > 2:
- x_lst = [x_lst[0], x_lst[-1]]
- if need_cube and len(y_lst) > 2:
- y_lst = [y_lst[0], y_lst[-1]]
-
- ans = np.zeros((floorplan_sub.shape[0], floorplan_sub.shape[1]))
- for i in range(len(x_lst) - 1):
- for j in range(len(y_lst) - 1):
- sample = floorplan_sub[y_lst[j]:y_lst[j + 1], x_lst[i]:x_lst[i + 1]]
- score = 0 if sample.size == 0 else sample.mean()
- if score >= 0.3:
- ans[y_lst[j]:y_lst[j + 1], x_lst[i]:x_lst[i + 1]] = 1
-
- pred = np.uint8(ans)
- pred_polys = cv2.findContours(pred, 1, 3)
- if isinstance(pred_polys, tuple):
- if len(pred_polys) == 3:
- pred_polys = pred_polys[1]
- else:
- pred_polys = pred_polys[0]
-
- polygon = [(p[0][1], p[0][0]) for p in pred_polys[0][::-1]]
-
- v = np.array([p[0] + sub_y for p in polygon])
- u = np.array([p[1] + sub_x for p in polygon])
- # side_l
- # v<-----------|o
- # | | |
- # | ----|----z | side_l
- # | | |
- # | x \|/
- # |------------u
- side_l = floorplan.shape[0]
- pred_xz = np.concatenate((u[:, np.newaxis] - side_l // 2, side_l // 2 - v[:, np.newaxis]), axis=1)
-
- pred_xz = pred_xz * show_radius / (side_l // 2)
- if show:
- draw_floorplan(pred_xz, show_radius=show_radius, show=show)
- return pred_xz
-
-
-if __name__ == '__main__':
- from utils.conversion import uv2xyz
-
- pano_img = np.zeros([512, 1024, 3])
- corners = np.array([[0.1, 0.7],
- [0.4, 0.7],
- [0.3, 0.6],
- [0.6, 0.6],
- [0.8, 0.7]])
- xz = uv2xyz(corners)[..., ::2]
- draw_floorplan(xz, show=True, marker_color=None, center_color=0.8)
-
- xz = fit_layout_old(xz)
- draw_floorplan(xz, show=True, marker_color=None, center_color=0.8)
diff --git a/spaces/Datasculptor/StyleGAN-NADA/op/fused_bias_act.cpp b/spaces/Datasculptor/StyleGAN-NADA/op/fused_bias_act.cpp
deleted file mode 100644
index 02be898f970bcc8ea297867fcaa4e71b24b3d949..0000000000000000000000000000000000000000
--- a/spaces/Datasculptor/StyleGAN-NADA/op/fused_bias_act.cpp
+++ /dev/null
@@ -1,21 +0,0 @@
-#include
-
-
-torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
- int act, int grad, float alpha, float scale);
-
-#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
-#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
-#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
-
-torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
- int act, int grad, float alpha, float scale) {
- CHECK_CUDA(input);
- CHECK_CUDA(bias);
-
- return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
-}
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
-}
\ No newline at end of file
diff --git a/spaces/DeepakJaiz/QA_evaluator/README.md b/spaces/DeepakJaiz/QA_evaluator/README.md
deleted file mode 100644
index f4bda15912a5a26975a7805afd5b237259e0a803..0000000000000000000000000000000000000000
--- a/spaces/DeepakJaiz/QA_evaluator/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: QA Evaluator
-emoji: 👁
-colorFrom: gray
-colorTo: purple
-sdk: streamlit
-sdk_version: 1.21.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Demosthene-OR/avr23-cds-translation/tabs/custom_vectorizer.py b/spaces/Demosthene-OR/avr23-cds-translation/tabs/custom_vectorizer.py
deleted file mode 100644
index 1130e9d684fdf994de7d27e197932bebb8bf3ca7..0000000000000000000000000000000000000000
--- a/spaces/Demosthene-OR/avr23-cds-translation/tabs/custom_vectorizer.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# Les 2 fonctions suivantes sont nécéssaires afin de sérialiser ces parametre de CountVectorizer
-# et ainsi de sauvegarder le vectorizer pour un un usage ultérieur sans utiliser X_train pour le réinitialiser
-import tiktoken
-
-tokenizer = tiktoken.get_encoding("cl100k_base")
-
-def custom_tokenizer(text):
- global tokenizer
-
- tokens = tokenizer.encode(text) # Cela divise le texte en mots
- return tokens
-
-def custom_preprocessor(text):
- return text
\ No newline at end of file
diff --git a/spaces/DragGan/DragGan-Inversion/PTI/training/coaches/base_coach.py b/spaces/DragGan/DragGan-Inversion/PTI/training/coaches/base_coach.py
deleted file mode 100644
index ccea133353df1f6b6737f9672ae7e2cb9438071d..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/PTI/training/coaches/base_coach.py
+++ /dev/null
@@ -1,158 +0,0 @@
-import abc
-import os
-import pickle
-from argparse import Namespace
-import os.path
-from PTI.criteria.localitly_regulizer import Space_Regulizer
-import torch
-from torchvision import transforms
-from lpips import LPIPS
-from PTI.training.projectors import w_projector
-from PTI.configs import global_config, paths_config, hyperparameters
-from PTI.criteria import l2_loss
-from PTI.models.e4e.psp import pSp
-from PTI.utils.log_utils import log_image_from_w
-from PTI.utils.models_utils import toogle_grad, load_old_G
-
-
-class BaseCoach:
- def __init__(self, data_loader, use_wandb):
-
- self.use_wandb = use_wandb
- self.data_loader = data_loader
- self.w_pivots = {}
- self.image_counter = 0
-
- if hyperparameters.first_inv_type == 'w+':
- self.initilize_e4e()
-
- self.e4e_image_transform = transforms.Compose([
- transforms.ToPILImage(),
- transforms.Resize((256, 256)),
- transforms.ToTensor(),
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
-
- # Initialize loss
- self.lpips_loss = LPIPS(net=hyperparameters.lpips_type).to(
- global_config.device).eval()
-
- self.restart_training()
-
- # Initialize checkpoint dir
- self.checkpoint_dir = paths_config.checkpoints_dir
- os.makedirs(self.checkpoint_dir, exist_ok=True)
-
- def restart_training(self):
-
- # Initialize networks
- self.G = load_old_G()
- toogle_grad(self.G, True)
-
- self.original_G = load_old_G()
-
- self.space_regulizer = Space_Regulizer(
- self.original_G, self.lpips_loss)
- self.optimizer = self.configure_optimizers()
-
- def get_inversion(self, w_path_dir, image_name, image):
- embedding_dir = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}'
- os.makedirs(embedding_dir, exist_ok=True)
-
- w_pivot = None
- if hyperparameters.use_last_w_pivots:
- w_pivot = self.load_inversions(w_path_dir, image_name)
-
- if not hyperparameters.use_last_w_pivots or w_pivot is None:
- w_pivot = self.calc_inversions(image, image_name)
- torch.save(w_pivot, f'{embedding_dir}/0.pt')
-
- w_pivot = w_pivot.to(global_config.device)
- return w_pivot
-
- def load_inversions(self, w_path_dir, image_name):
- if image_name in self.w_pivots:
- return self.w_pivots[image_name]
-
- if hyperparameters.first_inv_type == 'w+':
- w_potential_path = f'{w_path_dir}/{paths_config.e4e_results_keyword}/{image_name}/0.pt'
- else:
- w_potential_path = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}/0.pt'
- if not os.path.isfile(w_potential_path):
- return None
- w = torch.load(w_potential_path).to(global_config.device)
- self.w_pivots[image_name] = w
- return w
-
- def calc_inversions(self, image, image_name):
- if hyperparameters.first_inv_type == 'w+':
- w = self.get_e4e_inversion(image)
-
- else:
- id_image = torch.squeeze(
- (image.to(global_config.device) + 1) / 2) * 255
- w = w_projector.project(self.G, id_image, device=torch.device(global_config.device), w_avg_samples=600,
- num_steps=hyperparameters.first_inv_steps, w_name=image_name,
- use_wandb=self.use_wandb)
-
- return w
-
- @abc.abstractmethod
- def train(self):
- pass
-
- def configure_optimizers(self):
- optimizer = torch.optim.Adam(
- self.G.parameters(), lr=hyperparameters.pti_learning_rate)
-
- return optimizer
-
- def calc_loss(self, generated_images, real_images, log_name, new_G, use_ball_holder, w_batch):
- loss = 0.0
-
- if hyperparameters.pt_l2_lambda > 0:
- l2_loss_val = l2_loss.l2_loss(generated_images, real_images)
- if self.use_wandb:
- wandb.log({f'MSE_loss_val_{log_name}': l2_loss_val.detach(
- ).cpu()}, step=global_config.training_step)
- loss += l2_loss_val * hyperparameters.pt_l2_lambda
- if hyperparameters.pt_lpips_lambda > 0:
- loss_lpips = self.lpips_loss(generated_images, real_images)
- loss_lpips = torch.squeeze(loss_lpips)
- if self.use_wandb:
- wandb.log({f'LPIPS_loss_val_{log_name}': loss_lpips.detach(
- ).cpu()}, step=global_config.training_step)
- loss += loss_lpips * hyperparameters.pt_lpips_lambda
-
- if use_ball_holder and hyperparameters.use_locality_regularization:
- ball_holder_loss_val = self.space_regulizer.space_regulizer_loss(
- new_G, w_batch, use_wandb=self.use_wandb)
- loss += ball_holder_loss_val
-
- return loss, l2_loss_val, loss_lpips
-
- def forward(self, w):
- generated_images = self.G.synthesis(
- w, noise_mode='const', force_fp32=True)
-
- return generated_images
-
- def initilize_e4e(self):
- ckpt = torch.load(paths_config.e4e, map_location='cpu')
- opts = ckpt['opts']
- opts['batch_size'] = hyperparameters.train_batch_size
- opts['checkpoint_path'] = paths_config.e4e
- opts = Namespace(**opts)
- self.e4e_inversion_net = pSp(opts)
- self.e4e_inversion_net.eval()
- self.e4e_inversion_net = self.e4e_inversion_net.to(
- global_config.device)
- toogle_grad(self.e4e_inversion_net, False)
-
- def get_e4e_inversion(self, image):
- image = (image + 1) / 2
- new_image = self.e4e_image_transform(image[0]).to(global_config.device)
- _, w = self.e4e_inversion_net(new_image.unsqueeze(0), randomize_noise=False, return_latents=True, resize=False,
- input_code=False)
- if self.use_wandb:
- log_image_from_w(w, self.G, 'First e4e inversion')
- return w
diff --git a/spaces/DragGan/DragGan-Inversion/PTI/training/projectors/w_plus_projector.py b/spaces/DragGan/DragGan-Inversion/PTI/training/projectors/w_plus_projector.py
deleted file mode 100644
index b9cce427e5374c5ddce90199e1184f84a13d30c5..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/PTI/training/projectors/w_plus_projector.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Project given image to the latent space of pretrained network pickle."""
-
-import copy
-import wandb
-import numpy as np
-import torch
-import torch.nn.functional as F
-from tqdm import tqdm
-from configs import global_config, hyperparameters
-import dnnlib
-from utils.log_utils import log_image_from_w
-
-
-def project(
- G,
- target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution
- *,
- num_steps=1000,
- w_avg_samples=10000,
- initial_learning_rate=0.01,
- initial_noise_factor=0.05,
- lr_rampdown_length=0.25,
- lr_rampup_length=0.05,
- noise_ramp_length=0.75,
- regularize_noise_weight=1e5,
- verbose=False,
- device: torch.device,
- use_wandb=False,
- initial_w=None,
- image_log_step=global_config.image_rec_result_log_snapshot,
- w_name: str
-):
- assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution)
-
- def logprint(*args):
- if verbose:
- print(*args)
-
- G = copy.deepcopy(G).eval().requires_grad_(False).to(device).float() # type: ignore
-
- # Compute w stats.
- logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...')
- z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim)
- w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C]
- w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C]
- w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C]
- w_avg_tensor = torch.from_numpy(w_avg).to(global_config.device)
- w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5
-
- start_w = initial_w if initial_w is not None else w_avg
-
- # Setup noise inputs.
- noise_bufs = {name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name}
-
- # Load VGG16 feature detector.
- url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
- with dnnlib.util.open_url(url) as f:
- vgg16 = torch.jit.load(f).eval().to(device)
-
- # Features for target image.
- target_images = target.unsqueeze(0).to(device).to(torch.float32)
- if target_images.shape[2] > 256:
- target_images = F.interpolate(target_images, size=(256, 256), mode='area')
- target_features = vgg16(target_images, resize_images=False, return_lpips=True)
-
- start_w = np.repeat(start_w, G.mapping.num_ws, axis=1)
- w_opt = torch.tensor(start_w, dtype=torch.float32, device=device,
- requires_grad=True) # pylint: disable=not-callable
-
- optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999),
- lr=hyperparameters.first_inv_lr)
-
- # Init noise.
- for buf in noise_bufs.values():
- buf[:] = torch.randn_like(buf)
- buf.requires_grad = True
-
- for step in tqdm(range(num_steps)):
-
- # Learning rate schedule.
- t = step / num_steps
- w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2
- lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
- lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
- lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
- lr = initial_learning_rate * lr_ramp
- for param_group in optimizer.param_groups:
- param_group['lr'] = lr
-
- # Synth images from opt_w.
- w_noise = torch.randn_like(w_opt) * w_noise_scale
- ws = (w_opt + w_noise)
-
- synth_images = G.synthesis(ws, noise_mode='const', force_fp32=True)
-
- # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
- synth_images = (synth_images + 1) * (255 / 2)
- if synth_images.shape[2] > 256:
- synth_images = F.interpolate(synth_images, size=(256, 256), mode='area')
-
- # Features for synth images.
- synth_features = vgg16(synth_images, resize_images=False, return_lpips=True)
- dist = (target_features - synth_features).square().sum()
-
- # Noise regularization.
- reg_loss = 0.0
- for v in noise_bufs.values():
- noise = v[None, None, :, :] # must be [1,1,H,W] for F.avg_pool2d()
- while True:
- reg_loss += (noise * torch.roll(noise, shifts=1, dims=3)).mean() ** 2
- reg_loss += (noise * torch.roll(noise, shifts=1, dims=2)).mean() ** 2
- if noise.shape[2] <= 8:
- break
- noise = F.avg_pool2d(noise, kernel_size=2)
- loss = dist + reg_loss * regularize_noise_weight
-
- if step % image_log_step == 0:
- with torch.no_grad():
- if use_wandb:
- global_config.training_step += 1
- wandb.log({f'first projection _{w_name}': loss.detach().cpu()}, step=global_config.training_step)
- log_image_from_w(w_opt, G, w_name)
-
- # Step
- optimizer.zero_grad(set_to_none=True)
- loss.backward()
- optimizer.step()
- logprint(f'step {step + 1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}')
-
- # Normalize noise.
- with torch.no_grad():
- for buf in noise_bufs.values():
- buf -= buf.mean()
- buf *= buf.square().mean().rsqrt()
-
- del G
- return w_opt
diff --git a/spaces/Dragonnext/charybdis/greeting.md b/spaces/Dragonnext/charybdis/greeting.md
deleted file mode 100644
index 6f053fea2069febf3c36660b9740cc503bf6add3..0000000000000000000000000000000000000000
--- a/spaces/Dragonnext/charybdis/greeting.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
-It will open ALWAYS every Friday 6PM UTC, till monday 7AM UTC. (No gatekeeper)
-
-
-(special proxy pass for non time frame access: Additional hints soon)
-
-Hints:
-
-All 62 unique words to get timeout (Including variations example > test + t3st)
-
-315 Minutes
-
-SillyTavern Hivemind.
-
-https://pastebin.com/DhKk9w92
-
-Formating: all LOWERCASE no SPACES, first need to be ordered ALPHABETICALLY (Special character included, and should be first), cyrilic excluded (200 first letters, exclude the rest, due to ST proxy password limit being 200)
\ No newline at end of file
diff --git a/spaces/EsoCode/text-generation-webui/modules/sampler_hijack.py b/spaces/EsoCode/text-generation-webui/modules/sampler_hijack.py
deleted file mode 100644
index 391ece929e46bf4e85f10b8cfe6c76352ff114fa..0000000000000000000000000000000000000000
--- a/spaces/EsoCode/text-generation-webui/modules/sampler_hijack.py
+++ /dev/null
@@ -1,204 +0,0 @@
-import math
-
-import torch
-import transformers
-from transformers import LogitsWarper
-from transformers.generation.logits_process import (
- LogitNormalization,
- LogitsProcessor,
- LogitsProcessorList,
- TemperatureLogitsWarper
-)
-
-
-class TailFreeLogitsWarper(LogitsWarper):
- def __init__(self, tfs: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
- tfs = float(tfs)
- if tfs < 0 or tfs > 1.0:
- raise ValueError(f"`tfs` has to be a float >= 0 and <= 1, but is {tfs}")
- self.tfs = tfs
- self.filter_value = filter_value
- self.min_tokens_to_keep = min_tokens_to_keep
-
- def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
- sorted_logits, sorted_indices = torch.sort(scores, descending=True)
- probs = sorted_logits.softmax(dim=-1)
-
- # Compute second derivative normalized CDF
- d2 = probs.diff().diff().abs()
- normalized_d2 = d2 / d2.sum(dim=-1, keepdim=True)
- normalized_d2_cdf = normalized_d2.cumsum(dim=-1)
-
- # Remove tokens with CDF value above the threshold (token with 0 are kept)
- sorted_indices_to_remove = normalized_d2_cdf > self.tfs
-
- # Centre the distribution around the cutoff as in the original implementation of the algorithm
- sorted_indices_to_remove = torch.cat(
- (
- torch.zeros(scores.shape[0], 1, dtype=torch.bool, device=scores.device),
- sorted_indices_to_remove,
- torch.ones(scores.shape[0], 1, dtype=torch.bool, device=scores.device),
- ),
- dim=-1,
- )
-
- if self.min_tokens_to_keep > 1:
- # Keep at least min_tokens_to_keep
- sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0
-
- indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
- scores = scores.masked_fill(indices_to_remove, self.filter_value)
- return scores
-
-
-class TopALogitsWarper(LogitsWarper):
- def __init__(self, top_a: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
- top_a = float(top_a)
- if top_a < 0 or top_a > 1.0:
- raise ValueError(f"`top_a` has to be a float >= 0 and <= 1, but is {top_a}")
- self.top_a = top_a
- self.filter_value = filter_value
- self.min_tokens_to_keep = min_tokens_to_keep
-
- def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
- sorted_logits, sorted_indices = torch.sort(scores, descending=True)
- probs = sorted_logits.softmax(dim=-1)
-
- # Remove tokens with probability less than top_a*(max(probs))^2 (token with 0 are kept)
- probs_max = probs[..., 0, None]
- sorted_indices_to_remove = probs < probs_max * probs_max * self.top_a
-
- if self.min_tokens_to_keep > 1:
- # Keep at least min_tokens_to_keep
- sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0
-
- indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
- scores = scores.masked_fill(indices_to_remove, self.filter_value)
- return scores
-
-
-class MirostatLogitsWarper(LogitsWarper):
- def __init__(self, mirostat_mode: int, mirostat_tau: float, mirostat_eta: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
- if mirostat_mode not in [2]:
- raise ValueError(f"`mirostat` has to be a an integer 2, but is {mirostat_mode}")
- self.mirostat_mode = mirostat_mode
- self.mirostat_eta = mirostat_eta
- self.mirostat_tau = mirostat_tau
- self.filter_value = filter_value
- self.min_tokens_to_keep = min_tokens_to_keep
- self.mu = 2 * self.mirostat_tau
- self.e = 0
-
- def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
- logits = scores[0]
- sorted_logits, sorted_indices = torch.sort(logits, descending=True)
- prob_original = torch.softmax(sorted_logits, dim=-1).tolist() # candidates
-
- # Truncate the words with surprise values greater than mu
- for i, candidate in enumerate(prob_original):
- if candidate > 0 and -math.log2(candidate) > self.mu:
- if (i == 0):
- sorted_logits = sorted_logits[:1]
- else:
- sorted_logits = sorted_logits[:i]
- break
-
- # Normalize the probabilities of the remaining words
- prob_topk = torch.softmax(sorted_logits, dim=0)
-
- prev_i = torch.multinomial(prob_topk, num_samples=1, replacement=True).to('cuda')
-
- observed_surprise = -math.log2(prob_topk[prev_i])
- self.e = observed_surprise - self.mirostat_tau
-
- # Update mu using the learning rate and error
- self.mu -= self.mirostat_eta * self.e
-
- sorted_indices_to_remove = torch.ones_like(scores[0], dtype=torch.bool)
- sorted_indices_to_remove[prev_i] = False
-
- indices_to_remove = sorted_indices_to_remove.unsqueeze(0).scatter(1, sorted_indices.unsqueeze(0), sorted_indices_to_remove.unsqueeze(0))
- scores = scores.masked_fill(indices_to_remove, self.filter_value)
- return scores
-
-
-class RepetitionPenaltyLogitsProcessorWithRange(LogitsProcessor):
- '''
- Copied from the transformers library
- '''
- def __init__(self, penalty: float, _range: int):
- if not isinstance(penalty, float) or not (penalty > 0):
- raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
-
- self.penalty = penalty
- self._range = _range
-
- def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
-
- input_ids = input_ids[:, -self._range:]
- score = torch.gather(scores, 1, input_ids)
-
- # if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability
- score = torch.where(score < 0, score * self.penalty, score / self.penalty)
-
- scores.scatter_(1, input_ids, score)
- return scores
-
-
-def get_logits_warper_patch(self, generation_config):
- warpers = self._get_logits_warper_old(generation_config)
- warpers_to_add = LogitsProcessorList()
- min_tokens_to_keep = 2 if generation_config.num_beams > 1 else 1
-
- if generation_config.mirostat_mode is not None and generation_config.mirostat_mode == 2:
- warpers_to_add.append(MirostatLogitsWarper(mirostat_mode=generation_config.mirostat_mode, mirostat_eta=generation_config.mirostat_eta, mirostat_tau=generation_config.mirostat_tau, min_tokens_to_keep=min_tokens_to_keep))
- # We need to disable samplers other than temperature
- for warper in warpers:
- if not isinstance(warper, TemperatureLogitsWarper):
- warpers.remove(warper)
- else:
- if generation_config.tfs is not None and 0.0 <= generation_config.tfs <= 1.0:
- warpers_to_add.append(TailFreeLogitsWarper(tfs=generation_config.tfs, min_tokens_to_keep=min_tokens_to_keep))
- if generation_config.top_a is not None and 0.0 <= generation_config.top_a <= 1.0:
- warpers_to_add.append(TopALogitsWarper(top_a=generation_config.top_a, min_tokens_to_keep=min_tokens_to_keep))
-
- if warpers and isinstance(warpers[-1], LogitNormalization):
- warpers = warpers[:-1] + warpers_to_add + [warpers[-1]]
- else:
- warpers += warpers_to_add
-
- return warpers
-
-
-def get_logits_processor_patch(self, **kwargs):
- result = self._get_logits_processor_old(**kwargs)
- repetition_penalty_range = kwargs['generation_config'].repetition_penalty_range
- repetition_penalty = kwargs['generation_config'].repetition_penalty
-
- if repetition_penalty_range > 0:
- for i in range(len(result)):
- if result[i].__class__.__name__ == 'RepetitionPenaltyLogitsProcessor':
- result[i] = RepetitionPenaltyLogitsProcessorWithRange(repetition_penalty, repetition_penalty_range)
-
- return result
-
-
-def generation_config_init_patch(self, **kwargs):
- self.__init___old(**kwargs)
- self.tfs = kwargs.pop("tfs", 1.0)
- self.top_a = kwargs.pop("top_a", 0.0)
- self.mirostat_mode = kwargs.pop("mirostat_mode", 0)
- self.mirostat_eta = kwargs.pop("mirostat_eta", 0.1)
- self.mirostat_tau = kwargs.pop("mirostat_tau", 5)
- self.repetition_penalty_range = kwargs.pop("repetition_penalty_range", 0)
-
-
-def hijack_samplers():
- transformers.GenerationMixin._get_logits_warper_old = transformers.GenerationMixin._get_logits_warper
- transformers.GenerationMixin._get_logits_warper = get_logits_warper_patch
-
- transformers.GenerationMixin._get_logits_processor_old = transformers.GenerationMixin._get_logits_processor
- transformers.GenerationMixin._get_logits_processor = get_logits_processor_patch
-
- transformers.GenerationConfig.__init___old = transformers.GenerationConfig.__init__
- transformers.GenerationConfig.__init__ = generation_config_init_patch
diff --git a/spaces/EuroPython2022/mmocr-demo/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015.py b/spaces/EuroPython2022/mmocr-demo/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015.py
deleted file mode 100644
index 5feb0c61ff2738338527e1aceaa569051a655cf8..0000000000000000000000000000000000000000
--- a/spaces/EuroPython2022/mmocr-demo/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015.py
+++ /dev/null
@@ -1,33 +0,0 @@
-_base_ = [
- '../../_base_/default_runtime.py',
- '../../_base_/det_models/ocr_mask_rcnn_r50_fpn_ohem.py',
- '../../_base_/schedules/schedule_sgd_160e.py',
- '../../_base_/det_datasets/icdar2015.py',
- '../../_base_/det_pipelines/maskrcnn_pipeline.py'
-]
-
-train_list = {{_base_.train_list}}
-test_list = {{_base_.test_list}}
-
-train_pipeline = {{_base_.train_pipeline}}
-test_pipeline_icdar2015 = {{_base_.test_pipeline_icdar2015}}
-
-data = dict(
- samples_per_gpu=8,
- workers_per_gpu=4,
- val_dataloader=dict(samples_per_gpu=1),
- test_dataloader=dict(samples_per_gpu=1),
- train=dict(
- type='UniformConcatDataset',
- datasets=train_list,
- pipeline=train_pipeline),
- val=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline_icdar2015),
- test=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline_icdar2015))
-
-evaluation = dict(interval=10, metric='hmean-iou')
diff --git a/spaces/Evanell/Venus/README.md b/spaces/Evanell/Venus/README.md
deleted file mode 100644
index 558ec08e296a4f8c8e96289d7128fcc8e74243b3..0000000000000000000000000000000000000000
--- a/spaces/Evanell/Venus/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Venus
-emoji: ⚡
-colorFrom: gray
-colorTo: green
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/FourthBrainGenAI/DeepLearningAIDemoChatBot/app.py b/spaces/FourthBrainGenAI/DeepLearningAIDemoChatBot/app.py
deleted file mode 100644
index 660575bdc04e7159c4d82d823a1de24484405b71..0000000000000000000000000000000000000000
--- a/spaces/FourthBrainGenAI/DeepLearningAIDemoChatBot/app.py
+++ /dev/null
@@ -1,281 +0,0 @@
-import torch
-from peft import PeftModel
-from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
-import datetime
-import os
-from threading import Event, Thread
-from uuid import uuid4
-import gradio as gr
-import requests
-
-model_name = "decapoda-research/llama-13b-hf"
-adapters_name = 'timdettmers/guanaco-13b'
-
-print(f"Starting to load the model {model_name} into memory")
-
-model = AutoModelForCausalLM.from_pretrained(
- model_name,
- load_in_4bit=True,
- torch_dtype=torch.bfloat16,
- device_map={"": 0}
-)
-
-model = PeftModel.from_pretrained(model, adapters_name)
-tokenizer = LlamaTokenizer.from_pretrained(model_name)
-tokenizer.bos_token_id = 1
-stop_token_ids = [0]
-
-max_new_tokens = 2048
-
-start_message = """A chat between a human user and a kind AI. The assistant gives helpful, cordial, and polite answers to the user's questions."""
-
-class StopOnTokens(StoppingCriteria):
- def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
- for stop_id in stop_token_ids:
- if input_ids[0][-1] == stop_id:
- return True
- return False
-
-
-def convert_history_to_text(history):
- text = start_message + "".join(
- [
- "".join(
- [
- f"### Human: {item[0]}\n",
- f"### Assistant: {item[1]}\n",
- ]
- )
- for item in history[:-1]
- ]
- )
- text += "".join(
- [
- "".join(
- [
- f"### Human: {history[-1][0]}\n",
- f"### Assistant: {history[-1][1]}\n",
- ]
- )
- ]
- )
- return text
-
-
-def log_conversation(conversation_id, history, messages, generate_kwargs):
- logging_url = os.getenv("LOGGING_URL", None)
- if logging_url is None:
- return
-
- timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
-
- data = {
- "conversation_id": conversation_id,
- "timestamp": timestamp,
- "history": history,
- "messages": messages,
- "generate_kwargs": generate_kwargs,
- }
-
- try:
- requests.post(logging_url, json=data)
- except requests.exceptions.RequestException as e:
- print(f"Error logging conversation: {e}")
-
-
-def user(message, history):
- # Append the user's message to the conversation history
- return "", history + [[message, ""]]
-
-
-def bot(history, temperature, top_p, top_k, repetition_penalty, conversation_id):
- print(f"history: {history}")
- # Initialize a StopOnTokens object
- stop = StopOnTokens()
-
- # Construct the input message string for the model by concatenating the current system message and conversation history
- messages = convert_history_to_text(history)
-
- # Tokenize the messages string
- input_ids = tokenizer(messages, return_tensors="pt").input_ids
- input_ids = input_ids.to(model.device)
- streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
- generate_kwargs = dict(
- input_ids=input_ids,
- max_new_tokens=max_new_tokens,
- temperature=temperature,
- do_sample=temperature > 0.0,
- top_p=top_p,
- top_k=top_k,
- repetition_penalty=repetition_penalty,
- streamer=streamer,
- stopping_criteria=StoppingCriteriaList([stop]),
- )
-
- stream_complete = Event()
-
- def generate_and_signal_complete():
- model.generate(**generate_kwargs)
- stream_complete.set()
-
- def log_after_stream_complete():
- stream_complete.wait()
- log_conversation(
- conversation_id,
- history,
- messages,
- {
- "top_k": top_k,
- "top_p": top_p,
- "temperature": temperature,
- "repetition_penalty": repetition_penalty,
- },
- )
-
- t1 = Thread(target=generate_and_signal_complete)
- t1.start()
-
- t2 = Thread(target=log_after_stream_complete)
- t2.start()
-
- # Initialize an empty string to store the generated text
- partial_text = ""
- for new_text in streamer:
- partial_text += new_text
- history[-1][1] = partial_text
- yield history
-
-
-def get_uuid():
- return str(uuid4())
-
-
-with gr.Blocks(
- theme=gr.themes.Soft(),
- css=".disclaimer {font-variant-caps: all-small-caps;}",
-) as demo:
- conversation_id = gr.State(get_uuid)
- gr.Markdown(
- """
FourthBrain DeepLearningAI ChatBot Demo
-"""
- )
- chatbot = gr.Chatbot().style(height=500)
- with gr.Row():
- with gr.Column():
- msg = gr.Textbox(
- label="Chat Message Box",
- placeholder="Chat Message Box",
- show_label=False,
- ).style(container=False)
- with gr.Column():
- with gr.Row():
- submit = gr.Button("Submit")
- stop = gr.Button("Stop")
- clear = gr.Button("Clear")
- with gr.Row():
- with gr.Accordion("Advanced Options:", open=False):
- with gr.Row():
- with gr.Column():
- with gr.Row():
- temperature = gr.Slider(
- label="Temperature",
- value=0.7,
- minimum=0.0,
- maximum=1.0,
- step=0.1,
- interactive=True,
- info="Higher values produce more diverse outputs",
- )
- with gr.Column():
- with gr.Row():
- top_p = gr.Slider(
- label="Top-p (nucleus sampling)",
- value=0.9,
- minimum=0.0,
- maximum=1,
- step=0.01,
- interactive=True,
- info=(
- "Sample from the smallest possible set of tokens whose cumulative probability "
- "exceeds top_p. Set to 1 to disable and sample from all tokens."
- ),
- )
- with gr.Column():
- with gr.Row():
- top_k = gr.Slider(
- label="Top-k",
- value=0,
- minimum=0.0,
- maximum=200,
- step=1,
- interactive=True,
- info="Sample from a shortlist of top-k tokens — 0 to disable and sample from all tokens.",
- )
- with gr.Column():
- with gr.Row():
- repetition_penalty = gr.Slider(
- label="Repetition Penalty",
- value=1.1,
- minimum=1.0,
- maximum=2.0,
- step=0.1,
- interactive=True,
- info="Penalize repetition — 1.0 to disable.",
- )
- with gr.Row():
- gr.Markdown(
- "Disclaimer: The model can produce factually incorrect output, and should not be relied on to produce "
- "factually accurate information. The model was trained on various public datasets; while great efforts "
- "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
- "biased, or otherwise offensive outputs.",
- elem_classes=["disclaimer"],
- )
-
- submit_event = msg.submit(
- fn=user,
- inputs=[msg, chatbot],
- outputs=[msg, chatbot],
- queue=False,
- ).then(
- fn=bot,
- inputs=[
- chatbot,
- temperature,
- top_p,
- top_k,
- repetition_penalty,
- conversation_id,
- ],
- outputs=chatbot,
- queue=True,
- )
- submit_click_event = submit.click(
- fn=user,
- inputs=[msg, chatbot],
- outputs=[msg, chatbot],
- queue=False,
- ).then(
- fn=bot,
- inputs=[
- chatbot,
- temperature,
- top_p,
- top_k,
- repetition_penalty,
- conversation_id,
- ],
- outputs=chatbot,
- queue=True,
- )
- stop.click(
- fn=None,
- inputs=None,
- outputs=None,
- cancels=[submit_event, submit_click_event],
- queue=False,
- )
- clear.click(lambda: None, None, chatbot, queue=False)
-
-demo.queue(max_size=128, concurrency_count=2)
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/FoxMeo/fire-detector/train.py b/spaces/FoxMeo/fire-detector/train.py
deleted file mode 100644
index 86c7e48d5ac214ad4a4c0a63b924d7ece429211c..0000000000000000000000000000000000000000
--- a/spaces/FoxMeo/fire-detector/train.py
+++ /dev/null
@@ -1,705 +0,0 @@
-import argparse
-import logging
-import math
-import os
-import random
-import time
-from copy import deepcopy
-from pathlib import Path
-from threading import Thread
-
-import numpy as np
-import torch.distributed as dist
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.optim as optim
-import torch.optim.lr_scheduler as lr_scheduler
-import torch.utils.data
-import yaml
-from torch.cuda import amp
-from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.utils.tensorboard import SummaryWriter
-from tqdm import tqdm
-
-import test # import test.py to get mAP after each epoch
-from models.experimental import attempt_load
-from models.yolo import Model
-from utils.autoanchor import check_anchors
-from utils.datasets import create_dataloader
-from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
- fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
- check_requirements, print_mutation, set_logging, one_cycle, colorstr
-from utils.google_utils import attempt_download
-from utils.loss import ComputeLoss, ComputeLossOTA
-from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
-from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
-from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
-
-logger = logging.getLogger(__name__)
-
-
-def train(hyp, opt, device, tb_writer=None):
- logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
- save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \
- Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze
-
- # Directories
- wdir = save_dir / 'weights'
- wdir.mkdir(parents=True, exist_ok=True) # make dir
- last = wdir / 'last.pt'
- best = wdir / 'best.pt'
- results_file = save_dir / 'results.txt'
-
- # Save run settings
- with open(save_dir / 'hyp.yaml', 'w') as f:
- yaml.dump(hyp, f, sort_keys=False)
- with open(save_dir / 'opt.yaml', 'w') as f:
- yaml.dump(vars(opt), f, sort_keys=False)
-
- # Configure
- plots = not opt.evolve # create plots
- cuda = device.type != 'cpu'
- init_seeds(2 + rank)
- with open(opt.data) as f:
- data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
- is_coco = opt.data.endswith('coco.yaml')
-
- # Logging- Doing this before checking the dataset. Might update data_dict
- loggers = {'wandb': None} # loggers dict
- if rank in [-1, 0]:
- opt.hyp = hyp # add hyperparameters
- run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
- wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
- loggers['wandb'] = wandb_logger.wandb
- data_dict = wandb_logger.data_dict
- if wandb_logger.wandb:
- weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
-
- nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
- names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
- assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
-
- # Model
- pretrained = weights.endswith('.pt')
- if pretrained:
- with torch_distributed_zero_first(rank):
- attempt_download(weights) # download if not found locally
- ckpt = torch.load(weights, map_location=device) # load checkpoint
- model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
- exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
- state_dict = ckpt['model'].float().state_dict() # to FP32
- state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
- model.load_state_dict(state_dict, strict=False) # load
- logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
- else:
- model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
- with torch_distributed_zero_first(rank):
- check_dataset(data_dict) # check
- train_path = data_dict['train']
- test_path = data_dict['val']
-
- # Freeze
- freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # parameter names to freeze (full or partial)
- for k, v in model.named_parameters():
- v.requires_grad = True # train all layers
- if any(x in k for x in freeze):
- print('freezing %s' % k)
- v.requires_grad = False
-
- # Optimizer
- nbs = 64 # nominal batch size
- accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
- hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
- logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
-
- pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
- for k, v in model.named_modules():
- if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
- pg2.append(v.bias) # biases
- if isinstance(v, nn.BatchNorm2d):
- pg0.append(v.weight) # no decay
- elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
- pg1.append(v.weight) # apply decay
- if hasattr(v, 'im'):
- if hasattr(v.im, 'implicit'):
- pg0.append(v.im.implicit)
- else:
- for iv in v.im:
- pg0.append(iv.implicit)
- if hasattr(v, 'imc'):
- if hasattr(v.imc, 'implicit'):
- pg0.append(v.imc.implicit)
- else:
- for iv in v.imc:
- pg0.append(iv.implicit)
- if hasattr(v, 'imb'):
- if hasattr(v.imb, 'implicit'):
- pg0.append(v.imb.implicit)
- else:
- for iv in v.imb:
- pg0.append(iv.implicit)
- if hasattr(v, 'imo'):
- if hasattr(v.imo, 'implicit'):
- pg0.append(v.imo.implicit)
- else:
- for iv in v.imo:
- pg0.append(iv.implicit)
- if hasattr(v, 'ia'):
- if hasattr(v.ia, 'implicit'):
- pg0.append(v.ia.implicit)
- else:
- for iv in v.ia:
- pg0.append(iv.implicit)
- if hasattr(v, 'attn'):
- if hasattr(v.attn, 'logit_scale'):
- pg0.append(v.attn.logit_scale)
- if hasattr(v.attn, 'q_bias'):
- pg0.append(v.attn.q_bias)
- if hasattr(v.attn, 'v_bias'):
- pg0.append(v.attn.v_bias)
- if hasattr(v.attn, 'relative_position_bias_table'):
- pg0.append(v.attn.relative_position_bias_table)
- if hasattr(v, 'rbr_dense'):
- if hasattr(v.rbr_dense, 'weight_rbr_origin'):
- pg0.append(v.rbr_dense.weight_rbr_origin)
- if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'):
- pg0.append(v.rbr_dense.weight_rbr_avg_conv)
- if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'):
- pg0.append(v.rbr_dense.weight_rbr_pfir_conv)
- if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'):
- pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1)
- if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'):
- pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2)
- if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'):
- pg0.append(v.rbr_dense.weight_rbr_gconv_dw)
- if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'):
- pg0.append(v.rbr_dense.weight_rbr_gconv_pw)
- if hasattr(v.rbr_dense, 'vector'):
- pg0.append(v.rbr_dense.vector)
-
- if opt.adam:
- optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
- else:
- optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
-
- optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
- optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
- logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
- del pg0, pg1, pg2
-
- # Scheduler https://arxiv.org/pdf/1812.01187.pdf
- # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
- if opt.linear_lr:
- lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
- else:
- lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
- scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
- # plot_lr_scheduler(optimizer, scheduler, epochs)
-
- # EMA
- ema = ModelEMA(model) if rank in [-1, 0] else None
-
- # Resume
- start_epoch, best_fitness = 0, 0.0
- if pretrained:
- # Optimizer
- if ckpt['optimizer'] is not None:
- optimizer.load_state_dict(ckpt['optimizer'])
- best_fitness = ckpt['best_fitness']
-
- # EMA
- if ema and ckpt.get('ema'):
- ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
- ema.updates = ckpt['updates']
-
- # Results
- if ckpt.get('training_results') is not None:
- results_file.write_text(ckpt['training_results']) # write results.txt
-
- # Epochs
- start_epoch = ckpt['epoch'] + 1
- if opt.resume:
- assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
- if epochs < start_epoch:
- logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
- (weights, ckpt['epoch'], epochs))
- epochs += ckpt['epoch'] # finetune additional epochs
-
- del ckpt, state_dict
-
- # Image sizes
- gs = max(int(model.stride.max()), 32) # grid size (max stride)
- nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
- imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
-
- # DP mode
- if cuda and rank == -1 and torch.cuda.device_count() > 1:
- model = torch.nn.DataParallel(model)
-
- # SyncBatchNorm
- if opt.sync_bn and cuda and rank != -1:
- model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
- logger.info('Using SyncBatchNorm()')
-
- # Trainloader
- dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
- hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
- world_size=opt.world_size, workers=opt.workers,
- image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
- mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
- nb = len(dataloader) # number of batches
- assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
-
- # Process 0
- if rank in [-1, 0]:
- testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
- hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
- world_size=opt.world_size, workers=opt.workers,
- pad=0.5, prefix=colorstr('val: '))[0]
-
- if not opt.resume:
- labels = np.concatenate(dataset.labels, 0)
- c = torch.tensor(labels[:, 0]) # classes
- # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
- # model._initialize_biases(cf.to(device))
- if plots:
- #plot_labels(labels, names, save_dir, loggers)
- if tb_writer:
- tb_writer.add_histogram('classes', c, 0)
-
- # Anchors
- if not opt.noautoanchor:
- check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
- model.half().float() # pre-reduce anchor precision
-
- # DDP mode
- if cuda and rank != -1:
- model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
- # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
- find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
-
- # Model parameters
- hyp['box'] *= 3. / nl # scale to layers
- hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
- hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
- hyp['label_smoothing'] = opt.label_smoothing
- model.nc = nc # attach number of classes to model
- model.hyp = hyp # attach hyperparameters to model
- model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
- model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
- model.names = names
-
- # Start training
- t0 = time.time()
- nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
- # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
- maps = np.zeros(nc) # mAP per class
- results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
- scheduler.last_epoch = start_epoch - 1 # do not move
- scaler = amp.GradScaler(enabled=cuda)
- compute_loss_ota = ComputeLossOTA(model) # init loss class
- compute_loss = ComputeLoss(model) # init loss class
- logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
- f'Using {dataloader.num_workers} dataloader workers\n'
- f'Logging results to {save_dir}\n'
- f'Starting training for {epochs} epochs...')
- torch.save(model, wdir / 'init.pt')
- for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
- model.train()
-
- # Update image weights (optional)
- if opt.image_weights:
- # Generate indices
- if rank in [-1, 0]:
- cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
- iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
- dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
- # Broadcast if DDP
- if rank != -1:
- indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
- dist.broadcast(indices, 0)
- if rank != 0:
- dataset.indices = indices.cpu().numpy()
-
- # Update mosaic border
- # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
- # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
-
- mloss = torch.zeros(4, device=device) # mean losses
- if rank != -1:
- dataloader.sampler.set_epoch(epoch)
- pbar = enumerate(dataloader)
- logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
- if rank in [-1, 0]:
- pbar = tqdm(pbar, total=nb) # progress bar
- optimizer.zero_grad()
- for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
- ni = i + nb * epoch # number integrated batches (since train start)
- imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
-
- # Warmup
- if ni <= nw:
- xi = [0, nw] # x interp
- # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
- accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
- for j, x in enumerate(optimizer.param_groups):
- # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
- x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
- if 'momentum' in x:
- x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
-
- # Multi-scale
- if opt.multi_scale:
- sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
- sf = sz / max(imgs.shape[2:]) # scale factor
- if sf != 1:
- ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
- imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
-
- # Forward
- with amp.autocast(enabled=cuda):
- pred = model(imgs) # forward
- if 'loss_ota' not in hyp or hyp['loss_ota'] == 1:
- loss, loss_items = compute_loss_ota(pred, targets.to(device), imgs) # loss scaled by batch_size
- else:
- loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
- if rank != -1:
- loss *= opt.world_size # gradient averaged between devices in DDP mode
- if opt.quad:
- loss *= 4.
-
- # Backward
- scaler.scale(loss).backward()
-
- # Optimize
- if ni % accumulate == 0:
- scaler.step(optimizer) # optimizer.step
- scaler.update()
- optimizer.zero_grad()
- if ema:
- ema.update(model)
-
- # Print
- if rank in [-1, 0]:
- mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
- mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
- s = ('%10s' * 2 + '%10.4g' * 6) % (
- '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
- pbar.set_description(s)
-
- # Plot
- if plots and ni < 10:
- f = save_dir / f'train_batch{ni}.jpg' # filename
- Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
- # if tb_writer:
- # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
- # tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph
- elif plots and ni == 10 and wandb_logger.wandb:
- wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
- save_dir.glob('train*.jpg') if x.exists()]})
-
- # end batch ------------------------------------------------------------------------------------------------
- # end epoch ----------------------------------------------------------------------------------------------------
-
- # Scheduler
- lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
- scheduler.step()
-
- # DDP process 0 or single-GPU
- if rank in [-1, 0]:
- # mAP
- ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
- final_epoch = epoch + 1 == epochs
- if not opt.notest or final_epoch: # Calculate mAP
- wandb_logger.current_epoch = epoch + 1
- results, maps, times = test.test(data_dict,
- batch_size=batch_size * 2,
- imgsz=imgsz_test,
- model=ema.ema,
- single_cls=opt.single_cls,
- dataloader=testloader,
- save_dir=save_dir,
- verbose=nc < 50 and final_epoch,
- plots=plots and final_epoch,
- wandb_logger=wandb_logger,
- compute_loss=compute_loss,
- is_coco=is_coco,
- v5_metric=opt.v5_metric)
-
- # Write
- with open(results_file, 'a') as f:
- f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
- if len(opt.name) and opt.bucket:
- os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
-
- # Log
- tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
- 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
- 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
- 'x/lr0', 'x/lr1', 'x/lr2'] # params
- for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
- if tb_writer:
- tb_writer.add_scalar(tag, x, epoch) # tensorboard
- if wandb_logger.wandb:
- wandb_logger.log({tag: x}) # W&B
-
- # Update best mAP
- fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
- if fi > best_fitness:
- best_fitness = fi
- wandb_logger.end_epoch(best_result=best_fitness == fi)
-
- # Save model
- if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
- ckpt = {'epoch': epoch,
- 'best_fitness': best_fitness,
- 'training_results': results_file.read_text(),
- 'model': deepcopy(model.module if is_parallel(model) else model).half(),
- 'ema': deepcopy(ema.ema).half(),
- 'updates': ema.updates,
- 'optimizer': optimizer.state_dict(),
- 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
-
- # Save last, best and delete
- torch.save(ckpt, last)
- if best_fitness == fi:
- torch.save(ckpt, best)
- if (best_fitness == fi) and (epoch >= 200):
- torch.save(ckpt, wdir / 'best_{:03d}.pt'.format(epoch))
- if epoch == 0:
- torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
- elif ((epoch+1) % 25) == 0:
- torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
- elif epoch >= (epochs-5):
- torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
- if wandb_logger.wandb:
- if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
- wandb_logger.log_model(
- last.parent, opt, epoch, fi, best_model=best_fitness == fi)
- del ckpt
-
- # end epoch ----------------------------------------------------------------------------------------------------
- # end training
- if rank in [-1, 0]:
- # Plots
- if plots:
- plot_results(save_dir=save_dir) # save as results.png
- if wandb_logger.wandb:
- files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
- wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
- if (save_dir / f).exists()]})
- # Test best.pt
- logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
- if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
- for m in (last, best) if best.exists() else (last): # speed, mAP tests
- results, _, _ = test.test(opt.data,
- batch_size=batch_size * 2,
- imgsz=imgsz_test,
- conf_thres=0.001,
- iou_thres=0.7,
- model=attempt_load(m, device).half(),
- single_cls=opt.single_cls,
- dataloader=testloader,
- save_dir=save_dir,
- save_json=True,
- plots=False,
- is_coco=is_coco,
- v5_metric=opt.v5_metric)
-
- # Strip optimizers
- final = best if best.exists() else last # final model
- for f in last, best:
- if f.exists():
- strip_optimizer(f) # strip optimizers
- if opt.bucket:
- os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
- if wandb_logger.wandb and not opt.evolve: # Log the stripped model
- wandb_logger.wandb.log_artifact(str(final), type='model',
- name='run_' + wandb_logger.wandb_run.id + '_model',
- aliases=['last', 'best', 'stripped'])
- wandb_logger.finish_run()
- else:
- dist.destroy_process_group()
- torch.cuda.empty_cache()
- return results
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--weights', type=str, default='yolo7.pt', help='initial weights path')
- parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
- parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path')
- parser.add_argument('--hyp', type=str, default='data/hyp.scratch.p5.yaml', help='hyperparameters path')
- parser.add_argument('--epochs', type=int, default=300)
- parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
- parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
- parser.add_argument('--rect', action='store_true', help='rectangular training')
- parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
- parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
- parser.add_argument('--notest', action='store_true', help='only test final epoch')
- parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
- parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
- parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
- parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
- parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
- parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
- parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
- parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
- parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
- parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
- parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
- parser.add_argument('--project', default='runs/train', help='save to project/name')
- parser.add_argument('--entity', default=None, help='W&B entity')
- parser.add_argument('--name', default='exp', help='save to project/name')
- parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
- parser.add_argument('--quad', action='store_true', help='quad dataloader')
- parser.add_argument('--linear-lr', action='store_true', help='linear LR')
- parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
- parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
- parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
- parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
- parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
- parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone of yolov7=50, first3=0 1 2')
- parser.add_argument('--v5-metric', action='store_true', help='assume maximum recall as 1.0 in AP calculation')
- opt = parser.parse_args()
-
- # Set DDP variables
- opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
- opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
- set_logging(opt.global_rank)
- #if opt.global_rank in [-1, 0]:
- # check_git_status()
- # check_requirements()
-
- # Resume
- wandb_run = check_wandb_resume(opt)
- if opt.resume and not wandb_run: # resume an interrupted run
- ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
- assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
- apriori = opt.global_rank, opt.local_rank
- with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
- opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace
- opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
- logger.info('Resuming training from %s' % ckpt)
- else:
- # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
- opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
- assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
- opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
- opt.name = 'evolve' if opt.evolve else opt.name
- opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
-
- # DDP mode
- opt.total_batch_size = opt.batch_size
- device = select_device(opt.device, batch_size=opt.batch_size)
- if opt.local_rank != -1:
- assert torch.cuda.device_count() > opt.local_rank
- torch.cuda.set_device(opt.local_rank)
- device = torch.device('cuda', opt.local_rank)
- dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
- assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
- opt.batch_size = opt.total_batch_size // opt.world_size
-
- # Hyperparameters
- with open(opt.hyp) as f:
- hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps
-
- # Train
- logger.info(opt)
- if not opt.evolve:
- tb_writer = None # init loggers
- if opt.global_rank in [-1, 0]:
- prefix = colorstr('tensorboard: ')
- logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
- tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
- train(hyp, opt, device, tb_writer)
-
- # Evolve hyperparameters (optional)
- else:
- # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
- meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
- 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
- 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
- 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
- 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
- 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
- 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
- 'box': (1, 0.02, 0.2), # box loss gain
- 'cls': (1, 0.2, 4.0), # cls loss gain
- 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
- 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
- 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
- 'iou_t': (0, 0.1, 0.7), # IoU training threshold
- 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
- 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
- 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
- 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
- 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
- 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
- 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
- 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
- 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
- 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
- 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
- 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
- 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
- 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
- 'mixup': (1, 0.0, 1.0), # image mixup (probability)
- 'copy_paste': (1, 0.0, 1.0), # segment copy-paste (probability)
- 'paste_in': (1, 0.0, 1.0)} # segment copy-paste (probability)
-
- with open(opt.hyp, errors='ignore') as f:
- hyp = yaml.safe_load(f) # load hyps dict
- if 'anchors' not in hyp: # anchors commented in hyp.yaml
- hyp['anchors'] = 3
-
- assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
- opt.notest, opt.nosave = True, True # only test/save final epoch
- # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
- yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
- if opt.bucket:
- os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
-
- for _ in range(300): # generations to evolve
- if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
- # Select parent(s)
- parent = 'single' # parent selection method: 'single' or 'weighted'
- x = np.loadtxt('evolve.txt', ndmin=2)
- n = min(5, len(x)) # number of previous results to consider
- x = x[np.argsort(-fitness(x))][:n] # top n mutations
- w = fitness(x) - fitness(x).min() # weights
- if parent == 'single' or len(x) == 1:
- # x = x[random.randint(0, n - 1)] # random selection
- x = x[random.choices(range(n), weights=w)[0]] # weighted selection
- elif parent == 'weighted':
- x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
-
- # Mutate
- mp, s = 0.8, 0.2 # mutation probability, sigma
- npr = np.random
- npr.seed(int(time.time()))
- g = np.array([x[0] for x in meta.values()]) # gains 0-1
- ng = len(meta)
- v = np.ones(ng)
- while all(v == 1): # mutate until a change occurs (prevent duplicates)
- v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
- for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
- hyp[k] = float(x[i + 7] * v[i]) # mutate
-
- # Constrain to limits
- for k, v in meta.items():
- hyp[k] = max(hyp[k], v[1]) # lower limit
- hyp[k] = min(hyp[k], v[2]) # upper limit
- hyp[k] = round(hyp[k], 5) # significant digits
-
- # Train mutation
- results = train(hyp.copy(), opt, device)
-
- # Write mutation results
- print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
-
- # Plot results
- plot_evolution(yaml_file)
- print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
- f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
diff --git a/spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/ContentVec256L12_Onnx.py b/spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/ContentVec256L12_Onnx.py
deleted file mode 100644
index 9ad5085e02654fd1fcfbdad7d476bfa9b763d2c6..0000000000000000000000000000000000000000
--- a/spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/ContentVec256L12_Onnx.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from vencoder.encoder import SpeechEncoder
-import onnxruntime
-import torch
-
-class ContentVec256L12_Onnx(SpeechEncoder):
- def __init__(self,vec_path = "pretrain/vec-256-layer-12.onnx",device=None):
- print("load model(s) from {}".format(vec_path))
- self.hidden_dim = 256
- if device is None:
- self.dev = torch.device("cpu")
- else:
- self.dev = torch.device(device)
- if device == 'cpu' or device == torch.device("cpu") or device is None:
- providers = ['CPUExecutionProvider']
- elif device == 'cuda' or device == torch.device("cuda"):
- providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
- self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
-
- def encoder(self, wav):
- feats = wav
- if feats.dim() == 2: # double channels
- feats = feats.mean(-1)
- assert feats.dim() == 1, feats.dim()
- feats = feats.view(1, -1)
- feats = feats.unsqueeze(0).cpu().detach().numpy()
- onnx_input = {self.model.get_inputs()[0].name: feats}
- logits = self.model.run(None, onnx_input)
- return torch.tensor(logits[0]).transpose(1, 2).to(self.dev)
diff --git a/spaces/GeorgeOrville/bingo/src/components/external-link.tsx b/spaces/GeorgeOrville/bingo/src/components/external-link.tsx
deleted file mode 100644
index 011265f364d5a64a770f4c7e9c65c5ade21d623a..0000000000000000000000000000000000000000
--- a/spaces/GeorgeOrville/bingo/src/components/external-link.tsx
+++ /dev/null
@@ -1,30 +0,0 @@
-export function ExternalLink({
- href,
- children
-}: {
- href: string
- children: React.ReactNode
-}) {
- return (
-
- {children}
-
-
- )
-}
diff --git a/spaces/GeorgeOrville/bingo/src/components/tone-selector.tsx b/spaces/GeorgeOrville/bingo/src/components/tone-selector.tsx
deleted file mode 100644
index 5c6e464c91f564b895acd121f0a4a79ed9c5c356..0000000000000000000000000000000000000000
--- a/spaces/GeorgeOrville/bingo/src/components/tone-selector.tsx
+++ /dev/null
@@ -1,43 +0,0 @@
-import React from 'react'
-import { BingConversationStyle } from '@/lib/bots/bing/types'
-import { cn } from '@/lib/utils'
-
-type ToneItem = {
- type: BingConversationStyle,
- name: string
-}
-
-const ToneList: ToneItem[] = [
- { name: '有创造力', type: BingConversationStyle.Creative },
- { name: '更平衡', type: BingConversationStyle.Balanced },
- { name: '更精确', type: BingConversationStyle.Precise }
-]
-
-interface ToneSelectorProps {
- type: BingConversationStyle | ''
- onChange?: (type: BingConversationStyle) => void
-}
-
-export function ToneSelector({ type, onChange }: ToneSelectorProps) {
- return (
-
-
- 选择对话样式
-
-
-
- {
- ToneList.map(tone => (
-
onChange?.(tone.type)}>
-
-
- ))
- }
-
-
-
- )
-}
diff --git a/spaces/GeorgeOrville/bingo/src/components/ui/badge.tsx b/spaces/GeorgeOrville/bingo/src/components/ui/badge.tsx
deleted file mode 100644
index d9a84b394090e5b4b3bd34f6135b9a2f2ead0aa2..0000000000000000000000000000000000000000
--- a/spaces/GeorgeOrville/bingo/src/components/ui/badge.tsx
+++ /dev/null
@@ -1,36 +0,0 @@
-import * as React from 'react'
-import { cva, type VariantProps } from 'class-variance-authority'
-
-import { cn } from '@/lib/utils'
-
-const badgeVariants = cva(
- 'inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2',
- {
- variants: {
- variant: {
- default:
- 'border-transparent bg-primary text-primary-foreground hover:bg-primary/80',
- secondary:
- 'border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80',
- destructive:
- 'border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80',
- outline: 'text-foreground'
- }
- },
- defaultVariants: {
- variant: 'default'
- }
- }
-)
-
-export interface BadgeProps
- extends React.HTMLAttributes,
- VariantProps {}
-
-function Badge({ className, variant, ...props }: BadgeProps) {
- return (
-
- )
-}
-
-export { Badge, badgeVariants }
diff --git a/spaces/GitMylo/bark-voice-cloning/hubert/pre_kmeans_hubert.py b/spaces/GitMylo/bark-voice-cloning/hubert/pre_kmeans_hubert.py
deleted file mode 100644
index b66ba98108e879abb35807e311a2815da88e4f2b..0000000000000000000000000000000000000000
--- a/spaces/GitMylo/bark-voice-cloning/hubert/pre_kmeans_hubert.py
+++ /dev/null
@@ -1,85 +0,0 @@
-from pathlib import Path
-
-import torch
-from torch import nn
-from einops import pack, unpack
-
-import fairseq
-
-from torchaudio.functional import resample
-
-import logging
-logging.root.setLevel(logging.ERROR)
-
-
-def exists(val):
- return val is not None
-
-
-def default(val, d):
- return val if exists(val) else d
-
-
-class CustomHubert(nn.Module):
- """
- checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert
- or you can train your own
- """
-
- def __init__(
- self,
- checkpoint_path,
- target_sample_hz=16000,
- seq_len_multiple_of=None,
- output_layer=9
- ):
- super().__init__()
- self.target_sample_hz = target_sample_hz
- self.seq_len_multiple_of = seq_len_multiple_of
- self.output_layer = output_layer
-
- model_path = Path(checkpoint_path)
-
- assert model_path.exists(), f'path {checkpoint_path} does not exist'
-
- checkpoint = torch.load(checkpoint_path)
- load_model_input = {checkpoint_path: checkpoint}
- model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
-
- self.model = model[0]
- self.model.eval()
-
- @property
- def groups(self):
- return 1
-
- @torch.no_grad()
- def forward(
- self,
- wav_input,
- flatten=True,
- input_sample_hz=None
- ):
- device = wav_input.device
-
- if exists(input_sample_hz):
- wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
-
- embed = self.model(
- wav_input,
- features_only=True,
- mask=False, # thanks to @maitycyrus for noticing that mask is defaulted to True in the fairseq code
- output_layer=self.output_layer
- )
-
- embed, packed_shape = pack([embed['x']], '* d')
-
- # codebook_indices = self.kmeans.predict(embed.cpu().detach().numpy())
-
- codebook_indices = torch.from_numpy(embed.cpu().detach().numpy()).to(device) # .long()
-
- if flatten:
- return codebook_indices
-
- codebook_indices, = unpack(codebook_indices, packed_shape, '*')
- return codebook_indices
diff --git "a/spaces/Gmq-x/gpt-academic/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py" "b/spaces/Gmq-x/gpt-academic/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py"
deleted file mode 100644
index c299e59d3894b7ac2d33df1502746adaef4a47b8..0000000000000000000000000000000000000000
--- "a/spaces/Gmq-x/gpt-academic/crazy_functions/Latex\345\205\250\346\226\207\346\266\246\350\211\262.py"
+++ /dev/null
@@ -1,175 +0,0 @@
-from toolbox import update_ui
-from toolbox import CatchException, report_execption, write_results_to_file
-fast_debug = False
-
-class PaperFileGroup():
- def __init__(self):
- self.file_paths = []
- self.file_contents = []
- self.sp_file_contents = []
- self.sp_file_index = []
- self.sp_file_tag = []
-
- # count_token
- from request_llm.bridge_all import model_info
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
- def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
- self.get_token_num = get_token_num
-
- def run_file_split(self, max_token_limit=1900):
- """
- 将长文本分离开来
- """
- for index, file_content in enumerate(self.file_contents):
- if self.get_token_num(file_content) < max_token_limit:
- self.sp_file_contents.append(file_content)
- self.sp_file_index.append(index)
- self.sp_file_tag.append(self.file_paths[index])
- else:
- from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
- segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
- for j, segment in enumerate(segments):
- self.sp_file_contents.append(segment)
- self.sp_file_index.append(index)
- self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
-
- print('Segmentation: done')
-
-def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
- import time, os, re
- from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
-
-
- # <-------- 读取Latex文件,删除其中的所有注释 ---------->
- pfg = PaperFileGroup()
-
- for index, fp in enumerate(file_manifest):
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
- file_content = f.read()
- # 定义注释的正则表达式
- comment_pattern = r'%.*'
- # 使用正则表达式查找注释,并替换为空字符串
- clean_tex_content = re.sub(comment_pattern, '', file_content)
- # 记录删除注释后的文本
- pfg.file_paths.append(fp)
- pfg.file_contents.append(clean_tex_content)
-
- # <-------- 拆分过长的latex文件 ---------->
- pfg.run_file_split(max_token_limit=1024)
- n_split = len(pfg.sp_file_contents)
-
- # <-------- 抽取摘要 ---------->
- # if language == 'en':
- # abs_extract_inputs = f"Please write an abstract for this paper"
-
- # # 单线,获取文章meta信息
- # paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
- # inputs=abs_extract_inputs,
- # inputs_show_user=f"正在抽取摘要信息。",
- # llm_kwargs=llm_kwargs,
- # chatbot=chatbot, history=[],
- # sys_prompt="Your job is to collect information from materials。",
- # )
-
- # <-------- 多线程润色开始 ---------->
- if language == 'en':
- inputs_array = ["Below is a section from an academic paper, polish this section to meet the academic standard, improve the grammar, clarity and overall readability, do not modify any latex command such as \section, \cite and equations:" +
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
- inputs_show_user_array = [f"Polish {f}" for f in pfg.sp_file_tag]
- sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
- elif language == 'zh':
- inputs_array = [f"以下是一篇学术论文中的一段内容,请将此部分润色以满足学术标准,提高语法、清晰度和整体可读性,不要修改任何LaTeX命令,例如\section,\cite和方程式:" +
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
- inputs_show_user_array = [f"润色 {f}" for f in pfg.sp_file_tag]
- sys_prompt_array=["你是一位专业的中文学术论文作家。" for _ in range(n_split)]
-
-
- gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
- inputs_array=inputs_array,
- inputs_show_user_array=inputs_show_user_array,
- llm_kwargs=llm_kwargs,
- chatbot=chatbot,
- history_array=[[""] for _ in range(n_split)],
- sys_prompt_array=sys_prompt_array,
- # max_workers=5, # 并行任务数量限制,最多同时执行5个,其他的排队等待
- scroller_max_len = 80
- )
-
- # <-------- 整理结果,退出 ---------->
- create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
- res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
- history = gpt_response_collection
- chatbot.append((f"{fp}完成了吗?", res))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-
-@CatchException
-def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import tiktoken
- except:
- report_execption(chatbot, history,
- a=f"解析项目: {txt}",
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- history = [] # 清空历史,以免输入溢出
- import glob, os
- if os.path.exists(txt):
- project_folder = txt
- else:
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en')
-
-
-
-
-
-
-@CatchException
-def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import tiktoken
- except:
- report_execption(chatbot, history,
- a=f"解析项目: {txt}",
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- history = [] # 清空历史,以免输入溢出
- import glob, os
- if os.path.exists(txt):
- project_folder = txt
- else:
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh')
\ No newline at end of file
diff --git a/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/latex/attention/parameter_attention.tex b/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/latex/attention/parameter_attention.tex
deleted file mode 100644
index 7bc4fe452dbdbfe44ff72f0cdbd37acd5c786ce6..0000000000000000000000000000000000000000
--- a/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/latex/attention/parameter_attention.tex
+++ /dev/null
@@ -1,45 +0,0 @@
-\pagebreak
-\section*{Two Feed-Forward Layers = Attention over Parameters}\label{sec:parameter_attention}
-
-In addition to attention layers, our model contains position-wise feed-forward networks (Section \ref{sec:ffn}), which consist of two linear transformations with a ReLU activation in between. In fact, these networks too can be seen as a form of attention. Compare the formula for such a network with the formula for a simple dot-product attention layer (biases and scaling factors omitted):
-
-\begin{align*}
- FFN(x, W_1, W_2) = ReLU(xW_1)W_2 \\
- A(q, K, V) = Softmax(qK^T)V
-\end{align*}
-
-Based on the similarity of these formulae, the two-layer feed-forward network can be seen as a kind of attention, where the keys and values are the rows of the trainable parameter matrices $W_1$ and $W_2$, and where we use ReLU instead of Softmax in the compatibility function.
-
-%the compatablity function is $compat(q, k_i) = ReLU(q \cdot k_i)$ instead of $Softmax(qK_T)_i$.
-
-Given this similarity, we experimented with replacing the position-wise feed-forward networks with attention layers similar to the ones we use everywhere else our model. The multi-head-attention-over-parameters sublayer is identical to the multi-head attention described in \ref{sec:multihead}, except that the "keys" and "values" inputs to each attention head are trainable model parameters, as opposed to being linear projections of a previous layer. These parameters are scaled up by a factor of $\sqrt{d_{model}}$ in order to be more similar to activations.
-
-In our first experiment, we replaced each position-wise feed-forward network with a multi-head-attention-over-parameters sublayer with $h_p=8$ heads, key-dimensionality $d_{pk}=64$, and value-dimensionality $d_{pv}=64$, using $n_p=1536$ key-value pairs for each attention head. The sublayer has a total of $2097152$ parameters, including the parameters in the query projection and the output projection. This matches the number of parameters in the position-wise feed-forward network that we replaced. While the theoretical amount of computation is also the same, in practice, the attention version caused the step times to be about 30\% longer.
-
-In our second experiment, we used $h_p=8$ heads, and $n_p=512$ key-value pairs for each attention head, again matching the total number of parameters in the base model.
-
-Results for the first experiment were slightly worse than for the base model, and results for the second experiment were slightly better, see Table~\ref{tab:parameter_attention}.
-
-\begin{table}[h]
-\caption{Replacing the position-wise feed-forward networks with multihead-attention-over-parameters produces similar results to the base model. All metrics are on the English-to-German translation development set, newstest2013.}
-\label{tab:parameter_attention}
-\begin{center}
-\vspace{-2mm}
-%\scalebox{1.0}{
-\begin{tabular}{c|cccccc|cccc}
-\hline\rule{0pt}{2.0ex}
- & \multirow{2}{*}{$\dmodel$} & \multirow{2}{*}{$\dff$} &
-\multirow{2}{*}{$h_p$} & \multirow{2}{*}{$d_{pk}$} & \multirow{2}{*}{$d_{pv}$} &
- \multirow{2}{*}{$n_p$} &
- PPL & BLEU & params & training\\
- & & & & & & & (dev) & (dev) & $\times10^6$ & time \\
-\hline\rule{0pt}{2.0ex}
-base & 512 & 2048 & & & & & 4.92 & 25.8 & 65 & 12 hours\\
-\hline\rule{0pt}{2.0ex}
-AOP$_1$ & 512 & & 8 & 64 & 64 & 1536 & 4.92& 25.5 & 65 & 16 hours\\
-AOP$_2$ & 512 & & 16 & 64 & 64 & 512 & \textbf{4.86} & \textbf{25.9} & 65 & 16 hours \\
-\hline
-\end{tabular}
-%}
-\end{center}
-\end{table}
diff --git a/spaces/Godrose0728/Aisound02/text/cleaners.py b/spaces/Godrose0728/Aisound02/text/cleaners.py
deleted file mode 100644
index eedbeaee8ad73dd4aaf6c12e3f900fc34a1ee630..0000000000000000000000000000000000000000
--- a/spaces/Godrose0728/Aisound02/text/cleaners.py
+++ /dev/null
@@ -1,150 +0,0 @@
-import re
-import pyopenjtalk
-
-pyopenjtalk._lazy_init()
-
-
-def japanese_cleaners(text):
- from text.japanese import japanese_to_romaji_with_accent
- text = japanese_to_romaji_with_accent(text)
- text = re.sub(r'([A-Za-z])$', r'\1.', text)
- return text
-
-
-def japanese_cleaners2(text):
- return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
-
-
-def korean_cleaners(text):
- '''Pipeline for Korean text'''
- from text.korean import latin_to_hangul, number_to_hangul, divide_hangul
- text = latin_to_hangul(text)
- text = number_to_hangul(text)
- text = divide_hangul(text)
- text = re.sub(r'([\u3131-\u3163])$', r'\1.', text)
- return text
-
-
-def chinese_cleaners(text):
- '''Pipeline for Chinese text'''
- from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text)
- return text
-
-
-def zh_ja_mixture_cleaners(text):
- from text.mandarin import chinese_to_romaji
- from text.japanese import japanese_to_romaji_with_accent
- text = re.sub(r'\[ZH\](.*?)\[ZH\]',
- lambda x: chinese_to_romaji(x.group(1)) + ' ', text)
- text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent(
- x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…') + ' ', text)
- text = re.sub(r'\s+$', '', text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-
-
-def sanskrit_cleaners(text):
- text = text.replace('॥', '।').replace('ॐ', 'ओम्')
- if text[-1] != '।':
- text += ' ।'
- return text
-
-
-def cjks_cleaners(text):
- from text.mandarin import chinese_to_lazy_ipa
- from text.japanese import japanese_to_ipa
- from text.korean import korean_to_lazy_ipa
- from text.sanskrit import devanagari_to_ipa
- from text.english import english_to_lazy_ipa
- text = re.sub(r'\[ZH\](.*?)\[ZH\]',
- lambda x: chinese_to_lazy_ipa(x.group(1)) + ' ', text)
- text = re.sub(r'\[JA\](.*?)\[JA\]',
- lambda x: japanese_to_ipa(x.group(1)) + ' ', text)
- text = re.sub(r'\[KO\](.*?)\[KO\]',
- lambda x: korean_to_lazy_ipa(x.group(1)) + ' ', text)
- text = re.sub(r'\[SA\](.*?)\[SA\]',
- lambda x: devanagari_to_ipa(x.group(1)) + ' ', text)
- text = re.sub(r'\[EN\](.*?)\[EN\]',
- lambda x: english_to_lazy_ipa(x.group(1)) + ' ', text)
- text = re.sub(r'\s+$', '', text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-
-
-def cjke_cleaners(text):
- from text.mandarin import chinese_to_lazy_ipa
- from text.japanese import japanese_to_ipa
- from text.korean import korean_to_ipa
- from text.english import english_to_ipa2
- text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace(
- 'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn') + ' ', text)
- text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace(
- 'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz') + ' ', text)
- text = re.sub(r'\[KO\](.*?)\[KO\]',
- lambda x: korean_to_ipa(x.group(1)) + ' ', text)
- text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace(
- 'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u') + ' ', text)
- text = re.sub(r'\s+$', '', text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-
-
-def cjke_cleaners2(text):
- from text.mandarin import chinese_to_ipa
- from text.japanese import japanese_to_ipa2
- from text.korean import korean_to_ipa
- from text.english import english_to_ipa2
- text = re.sub(r'\[ZH\](.*?)\[ZH\]',
- lambda x: chinese_to_ipa(x.group(1)) + ' ', text)
- text = re.sub(r'\[JA\](.*?)\[JA\]',
- lambda x: japanese_to_ipa2(x.group(1)) + ' ', text)
- text = re.sub(r'\[KO\](.*?)\[KO\]',
- lambda x: korean_to_ipa(x.group(1)) + ' ', text)
- text = re.sub(r'\[EN\](.*?)\[EN\]',
- lambda x: english_to_ipa2(x.group(1)) + ' ', text)
- text = re.sub(r'\s+$', '', text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-
-
-def thai_cleaners(text):
- from text.thai import num_to_thai, latin_to_thai
- text = num_to_thai(text)
- text = latin_to_thai(text)
- return text
-
-
-def shanghainese_cleaners(text):
- from text.shanghainese import shanghainese_to_ipa
- text = shanghainese_to_ipa(text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-
-
-def chinese_dialect_cleaners(text):
- from text.mandarin import chinese_to_ipa2
- from text.japanese import japanese_to_ipa3
- from text.shanghainese import shanghainese_to_ipa
- from text.cantonese import cantonese_to_ipa
- from text.english import english_to_lazy_ipa2
- from text.ngu_dialect import ngu_dialect_to_ipa
- text = re.sub(r'\[ZH\](.*?)\[ZH\]',
- lambda x: chinese_to_ipa2(x.group(1)) + ' ', text)
- text = re.sub(r'\[JA\](.*?)\[JA\]',
- lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ') + ' ', text)
- text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5',
- '˧˧˦').replace(
- '6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e') + ' ', text)
- text = re.sub(r'\[GD\](.*?)\[GD\]',
- lambda x: cantonese_to_ipa(x.group(1)) + ' ', text)
- text = re.sub(r'\[EN\](.*?)\[EN\]',
- lambda x: english_to_lazy_ipa2(x.group(1)) + ' ', text)
- text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group(
- 1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ') + ' ', text)
- text = re.sub(r'\s+$', '', text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
diff --git a/spaces/Gradio-Blocks/speech-to-text-app/README.md b/spaces/Gradio-Blocks/speech-to-text-app/README.md
deleted file mode 100644
index 5f9173ca38a46de0fe07f58c554ad4a8c5dfe28b..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/speech-to-text-app/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Speech To Text App
-emoji: 📊
-colorFrom: green
-colorTo: yellow
-sdk: streamlit
-sdk_version: 1.9.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py
deleted file mode 100644
index e8df265edefee1b7e5892fe373c1c0f80f59bf7b..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py'
-# model settings
-model = dict(
- pretrained='open-mmlab://msra/hrnetv2_w18',
- backbone=dict(
- extra=dict(
- stage2=dict(num_channels=(18, 36)),
- stage3=dict(num_channels=(18, 36, 72)),
- stage4=dict(num_channels=(18, 36, 72, 144)))),
- neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/losses/gaussian_focal_loss.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/losses/gaussian_focal_loss.py
deleted file mode 100644
index e45506a38e8e3c187be8288d0b714cc1ee29cf27..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/losses/gaussian_focal_loss.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import mmcv
-import torch.nn as nn
-
-from ..builder import LOSSES
-from .utils import weighted_loss
-
-
-@mmcv.jit(derivate=True, coderize=True)
-@weighted_loss
-def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
- """`Focal Loss `_ for targets in gaussian
- distribution.
-
- Args:
- pred (torch.Tensor): The prediction.
- gaussian_target (torch.Tensor): The learning target of the prediction
- in gaussian distribution.
- alpha (float, optional): A balanced form for Focal Loss.
- Defaults to 2.0.
- gamma (float, optional): The gamma for calculating the modulating
- factor. Defaults to 4.0.
- """
- eps = 1e-12
- pos_weights = gaussian_target.eq(1)
- neg_weights = (1 - gaussian_target).pow(gamma)
- pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
- neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
- return pos_loss + neg_loss
-
-
-@LOSSES.register_module()
-class GaussianFocalLoss(nn.Module):
- """GaussianFocalLoss is a variant of focal loss.
-
- More details can be found in the `paper
- `_
- Code is modified from `kp_utils.py
- `_ # noqa: E501
- Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
- not 0/1 binary target.
-
- Args:
- alpha (float): Power of prediction.
- gamma (float): Power of target for negative samples.
- reduction (str): Options are "none", "mean" and "sum".
- loss_weight (float): Loss weight of current loss.
- """
-
- def __init__(self,
- alpha=2.0,
- gamma=4.0,
- reduction='mean',
- loss_weight=1.0):
- super(GaussianFocalLoss, self).__init__()
- self.alpha = alpha
- self.gamma = gamma
- self.reduction = reduction
- self.loss_weight = loss_weight
-
- def forward(self,
- pred,
- target,
- weight=None,
- avg_factor=None,
- reduction_override=None):
- """Forward function.
-
- Args:
- pred (torch.Tensor): The prediction.
- target (torch.Tensor): The learning target of the prediction
- in gaussian distribution.
- weight (torch.Tensor, optional): The weight of loss for each
- prediction. Defaults to None.
- avg_factor (int, optional): Average factor that is used to average
- the loss. Defaults to None.
- reduction_override (str, optional): The reduction method used to
- override the original reduction method of the loss.
- Defaults to None.
- """
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- loss_reg = self.loss_weight * gaussian_focal_loss(
- pred,
- target,
- weight,
- alpha=self.alpha,
- gamma=self.gamma,
- reduction=reduction,
- avg_factor=avg_factor)
- return loss_reg
diff --git a/spaces/HaHaBill/LandShapes-Antarctica/decomposition.py b/spaces/HaHaBill/LandShapes-Antarctica/decomposition.py
deleted file mode 100644
index 4819e3324707f15c33fba6f35ab6abdc66dea919..0000000000000000000000000000000000000000
--- a/spaces/HaHaBill/LandShapes-Antarctica/decomposition.py
+++ /dev/null
@@ -1,402 +0,0 @@
-# Copyright 2020 Erik Härkönen. All rights reserved.
-# This file is licensed to you under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License. You may obtain a copy
-# of the License at http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software distributed under
-# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
-# OF ANY KIND, either express or implied. See the License for the specific language
-# governing permissions and limitations under the License.
-
-# Patch for broken CTRL+C handler
-# https://github.com/ContinuumIO/anaconda-issues/issues/905
-import os
-os.environ['FOR_DISABLE_CONSOLE_CTRL_HANDLER'] = '1'
-
-import numpy as np
-import os
-from pathlib import Path
-import re
-import sys
-import datetime
-import argparse
-import torch
-import json
-from types import SimpleNamespace
-import scipy
-from scipy.cluster.vq import kmeans
-from tqdm import trange
-from netdissect.nethook import InstrumentedModel
-from config import Config
-from estimators import get_estimator
-from models import get_instrumented_model
-
-SEED_SAMPLING = 1
-SEED_RANDOM_DIRS = 2
-SEED_LINREG = 3
-SEED_VISUALIZATION = 5
-
-B = 20
-n_clusters = 500
-
-def get_random_dirs(components, dimensions):
- gen = np.random.RandomState(seed=SEED_RANDOM_DIRS)
- dirs = gen.normal(size=(components, dimensions))
- dirs /= np.sqrt(np.sum(dirs**2, axis=1, keepdims=True))
- return dirs.astype(np.float32)
-
-# Compute maximum batch size for given VRAM and network
-def get_max_batch_size(inst, device, layer_name=None):
- inst.remove_edits()
-
- # Reset statistics
- torch.cuda.reset_max_memory_cached(device)
- torch.cuda.reset_max_memory_allocated(device)
- total_mem = torch.cuda.get_device_properties(device).total_memory
-
- B_max = 20
-
- # Measure actual usage
- for i in range(2, B_max, 2):
- z = inst.model.sample_latent(n_samples=i)
- if layer_name:
- inst.model.partial_forward(z, layer_name)
- else:
- inst.model.forward(z)
-
- maxmem = torch.cuda.max_memory_allocated(device)
- del z
-
- if maxmem > 0.5*total_mem:
- print('Batch size {:d}: memory usage {:.0f}MB'.format(i, maxmem / 1e6))
- return i
-
- return B_max
-
-# Solve for directions in latent space that match PCs in activaiton space
-def linreg_lstsq(comp_np, mean_np, stdev_np, inst, config):
- print('Performing least squares regression', flush=True)
-
- torch.manual_seed(SEED_LINREG)
- np.random.seed(SEED_LINREG)
-
- comp = torch.from_numpy(comp_np).float().to(inst.model.device)
- mean = torch.from_numpy(mean_np).float().to(inst.model.device)
- stdev = torch.from_numpy(stdev_np).float().to(inst.model.device)
-
- n_samp = max(10_000, config.n) // B * B # make divisible
- n_comp = comp.shape[0]
- latent_dims = inst.model.get_latent_dims()
-
- # We're looking for M s.t. M*P*G'(Z) = Z => M*A = Z
- # Z = batch of latent vectors (n_samples x latent_dims)
- # G'(Z) = batch of activations at intermediate layer
- # A = P*G'(Z) = projected activations (n_samples x pca_coords)
- # M = linear mapping (pca_coords x latent_dims)
-
- # Minimization min_M ||MA - Z||_l2 rewritten as min_M.T ||A.T*M.T - Z.T||_l2
- # to match format expected by pytorch.lstsq
-
- # TODO: regression on pixel-space outputs? (using nonlinear optimizer)
- # min_M lpips(G_full(MA), G_full(Z))
-
- # Tensors to fill with data
- # Dimensions other way around, so these are actually the transposes
- A = np.zeros((n_samp, n_comp), dtype=np.float32)
- Z = np.zeros((n_samp, latent_dims), dtype=np.float32)
-
- # Project tensor X onto PCs, return coordinates
- def project(X, comp):
- N = X.shape[0]
- K = comp.shape[0]
- coords = torch.bmm(comp.expand([N]+[-1]*comp.ndim), X.view(N, -1, 1))
- return coords.reshape(N, K)
-
- for i in trange(n_samp // B, desc='Collecting samples', ascii=True):
- z = inst.model.sample_latent(B)
- inst.model.partial_forward(z, config.layer)
- act = inst.retained_features()[config.layer].reshape(B, -1)
-
- # Project onto basis
- act = act - mean
- coords = project(act, comp)
- coords_scaled = coords / stdev
-
- A[i*B:(i+1)*B] = coords_scaled.detach().cpu().numpy()
- Z[i*B:(i+1)*B] = z.detach().cpu().numpy().reshape(B, -1)
-
- # Solve least squares fit
-
- # gelsd = divide-and-conquer SVD; good default
- # gelsy = complete orthogonal factorization; sometimes faster
- # gelss = SVD; slow but less memory hungry
- M_t = scipy.linalg.lstsq(A, Z, lapack_driver='gelsd')[0] # torch.lstsq(Z, A)[0][:n_comp, :]
-
- # Solution given by rows of M_t
- Z_comp = M_t[:n_comp, :]
- Z_mean = np.mean(Z, axis=0, keepdims=True)
-
- return Z_comp, Z_mean
-
-def regression(comp, mean, stdev, inst, config):
- # Sanity check: verify orthonormality
- M = np.dot(comp, comp.T)
- if not np.allclose(M, np.identity(M.shape[0])):
- det = np.linalg.det(M)
- print(f'WARNING: Computed basis is not orthonormal (determinant={det})')
-
- return linreg_lstsq(comp, mean, stdev, inst, config)
-
-def compute(config, dump_name, instrumented_model):
- global B
-
- timestamp = lambda : datetime.datetime.now().strftime("%d.%m %H:%M")
- print(f'[{timestamp()}] Computing', dump_name.name)
-
- # Ensure reproducibility
- torch.manual_seed(0) # also sets cuda seeds
- np.random.seed(0)
-
- # Speed up backend
- torch.backends.cudnn.benchmark = True
-
- has_gpu = torch.cuda.is_available()
- device = torch.device('cuda' if has_gpu else 'cpu')
- layer_key = config.layer
-
- if instrumented_model is None:
- inst = get_instrumented_model(config.model, config.output_class, layer_key, device)
- model = inst.model
- else:
- print('Reusing InstrumentedModel instance')
- inst = instrumented_model
- model = inst.model
- inst.remove_edits()
- model.set_output_class(config.output_class)
-
- # Regress back to w space
- if config.use_w:
- print('Using W latent space')
- model.use_w()
-
- inst.retain_layer(layer_key)
- model.partial_forward(model.sample_latent(1), layer_key)
- sample_shape = inst.retained_features()[layer_key].shape
- sample_dims = np.prod(sample_shape)
- print('Feature shape:', sample_shape)
-
- input_shape = inst.model.get_latent_shape()
- input_dims = inst.model.get_latent_dims()
-
- config.components = min(config.components, sample_dims)
- transformer = get_estimator(config.estimator, config.components, config.sparsity)
-
- X = None
- X_global_mean = None
-
- # Figure out batch size if not provided
- B = config.batch_size or get_max_batch_size(inst, device, layer_key)
-
- # Divisible by B (ignored in output name)
- N = config.n // B * B
-
- # Compute maximum batch size based on RAM + pagefile budget
- target_bytes = 20 * 1_000_000_000 # GB
- feat_size_bytes = sample_dims * np.dtype('float64').itemsize
- N_limit_RAM = np.floor_divide(target_bytes, feat_size_bytes)
- if not transformer.batch_support and N > N_limit_RAM:
- print('WARNING: estimator does not support batching, ' \
- 'given config will use {:.1f} GB memory.'.format(feat_size_bytes / 1_000_000_000 * N))
-
- # 32-bit LAPACK gets very unhappy about huge matrices (in linalg.svd)
- if config.estimator == 'ica':
- lapack_max_N = np.floor_divide(np.iinfo(np.int32).max // 4, sample_dims) # 4x extra buffer
- if N > lapack_max_N:
- raise RuntimeError(f'Matrices too large for ICA, please use N <= {lapack_max_N}')
-
- print('B={}, N={}, dims={}, N/dims={:.1f}'.format(B, N, sample_dims, N/sample_dims), flush=True)
-
- # Must not depend on chosen batch size (reproducibility)
- NB = max(B, max(2_000, 3*config.components)) # ipca: as large as possible!
-
- samples = None
- if not transformer.batch_support:
- samples = np.zeros((N + NB, sample_dims), dtype=np.float32)
-
- torch.manual_seed(config.seed or SEED_SAMPLING)
- np.random.seed(config.seed or SEED_SAMPLING)
-
- # Use exactly the same latents regardless of batch size
- # Store in main memory, since N might be huge (1M+)
- # Run in batches, since sample_latent() might perform Z -> W mapping
- n_lat = ((N + NB - 1) // B + 1) * B
- latents = np.zeros((n_lat, *input_shape[1:]), dtype=np.float32)
- with torch.no_grad():
- for i in trange(n_lat // B, desc='Sampling latents'):
- latents[i*B:(i+1)*B] = model.sample_latent(n_samples=B).cpu().numpy()
-
- # Decomposition on non-Gaussian latent space
- samples_are_latents = layer_key in ['g_mapping', 'style'] and inst.model.latent_space_name() == 'W'
-
- canceled = False
- try:
- X = np.ones((NB, sample_dims), dtype=np.float32)
- action = 'Fitting' if transformer.batch_support else 'Collecting'
- for gi in trange(0, N, NB, desc=f'{action} batches (NB={NB})', ascii=True):
- for mb in range(0, NB, B):
- z = torch.from_numpy(latents[gi+mb:gi+mb+B]).to(device)
-
- if samples_are_latents:
- # Decomposition on latents directly (e.g. StyleGAN W)
- batch = z.reshape((B, -1))
- else:
- # Decomposition on intermediate layer
- with torch.no_grad():
- model.partial_forward(z, layer_key)
-
- # Permuted to place PCA dimensions last
- batch = inst.retained_features()[layer_key].reshape((B, -1))
-
- space_left = min(B, NB - mb)
- X[mb:mb+space_left] = batch.cpu().numpy()[:space_left]
-
- if transformer.batch_support:
- if not transformer.fit_partial(X.reshape(-1, sample_dims)):
- break
- else:
- samples[gi:gi+NB, :] = X.copy()
- except KeyboardInterrupt:
- if not transformer.batch_support:
- sys.exit(1) # no progress yet
-
- dump_name = dump_name.parent / dump_name.name.replace(f'n{N}', f'n{gi}')
- print(f'Saving current state to "{dump_name.name}" before exiting')
- canceled = True
-
- if not transformer.batch_support:
- X = samples # Use all samples
- X_global_mean = X.mean(axis=0, keepdims=True, dtype=np.float32) # TODO: activations surely multi-modal...!
- X -= X_global_mean
-
- print(f'[{timestamp()}] Fitting whole batch')
- t_start_fit = datetime.datetime.now()
-
- transformer.fit(X)
-
- print(f'[{timestamp()}] Done in {datetime.datetime.now() - t_start_fit}')
- assert np.all(transformer.transformer.mean_ < 1e-3), 'Mean of normalized data should be zero'
- else:
- X_global_mean = transformer.transformer.mean_.reshape((1, sample_dims))
- X = X.reshape(-1, sample_dims)
- X -= X_global_mean
-
- X_comp, X_stdev, X_var_ratio = transformer.get_components()
-
- assert X_comp.shape[1] == sample_dims \
- and X_comp.shape[0] == config.components \
- and X_global_mean.shape[1] == sample_dims \
- and X_stdev.shape[0] == config.components, 'Invalid shape'
-
- # 'Activations' are really latents in a secondary latent space
- if samples_are_latents:
- Z_comp = X_comp
- Z_global_mean = X_global_mean
- else:
- Z_comp, Z_global_mean = regression(X_comp, X_global_mean, X_stdev, inst, config)
-
- # Normalize
- Z_comp /= np.linalg.norm(Z_comp, axis=-1, keepdims=True)
-
- # Random projections
- # We expect these to explain much less of the variance
- random_dirs = get_random_dirs(config.components, np.prod(sample_shape))
- n_rand_samples = min(5000, X.shape[0])
- X_view = X[:n_rand_samples, :].T
- assert np.shares_memory(X_view, X), "Error: slice produced copy"
- X_stdev_random = np.dot(random_dirs, X_view).std(axis=1)
-
- # Inflate back to proper shapes (for easier broadcasting)
- X_comp = X_comp.reshape(-1, *sample_shape)
- X_global_mean = X_global_mean.reshape(sample_shape)
- Z_comp = Z_comp.reshape(-1, *input_shape)
- Z_global_mean = Z_global_mean.reshape(input_shape)
-
- # Compute stdev in latent space if non-Gaussian
- lat_stdev = np.ones_like(X_stdev)
- if config.use_w:
- samples = model.sample_latent(5000).reshape(5000, input_dims).detach().cpu().numpy()
- coords = np.dot(Z_comp.reshape(-1, input_dims), samples.T)
- lat_stdev = coords.std(axis=1)
-
- os.makedirs(dump_name.parent, exist_ok=True)
- np.savez_compressed(dump_name, **{
- 'act_comp': X_comp.astype(np.float32),
- 'act_mean': X_global_mean.astype(np.float32),
- 'act_stdev': X_stdev.astype(np.float32),
- 'lat_comp': Z_comp.astype(np.float32),
- 'lat_mean': Z_global_mean.astype(np.float32),
- 'lat_stdev': lat_stdev.astype(np.float32),
- 'var_ratio': X_var_ratio.astype(np.float32),
- 'random_stdevs': X_stdev_random.astype(np.float32),
- })
-
- if canceled:
- sys.exit(1)
-
- # Don't shutdown if passed as param
- if instrumented_model is None:
- inst.close()
- del inst
- del model
-
- del X
- del X_comp
- del random_dirs
- del batch
- del samples
- del latents
- torch.cuda.empty_cache()
-
-# Return cached results or commpute if needed
-# Pass existing InstrumentedModel instance to reuse it
-def get_or_compute(config, model=None, submit_config=None, force_recompute=False):
- if submit_config is None:
- wrkdir = str(Path(__file__).parent.resolve())
- submit_config = SimpleNamespace(run_dir_root = wrkdir, run_dir = wrkdir)
-
- # Called directly by run.py
- return _compute(submit_config, config, model, force_recompute)
-
-def _compute(submit_config, config, model=None, force_recompute=False):
- basedir = Path(submit_config.run_dir)
- outdir = basedir / 'out'
-
- if config.n is None:
- raise RuntimeError('Must specify number of samples with -n=XXX')
-
- if model and not isinstance(model, InstrumentedModel):
- raise RuntimeError('Passed model has to be wrapped in "InstrumentedModel"')
-
- if config.use_w and not 'StyleGAN' in config.model:
- raise RuntimeError(f'Cannot change latent space of non-StyleGAN model {config.model}')
-
- transformer = get_estimator(config.estimator, config.components, config.sparsity)
- dump_name = "{}-{}_{}_{}_n{}{}{}.npz".format(
- config.model.lower(),
- config.output_class.replace(' ', '_'),
- config.layer.lower(),
- transformer.get_param_str(),
- config.n,
- '_w' if config.use_w else '',
- f'_seed{config.seed}' if config.seed else ''
- )
-
- dump_path = basedir / 'cache' / 'components' / dump_name
-
- if not dump_path.is_file() or force_recompute:
- print('Not cached')
- t_start = datetime.datetime.now()
- compute(config, dump_path, model)
- print('Total time:', datetime.datetime.now() - t_start)
-
- return dump_path
\ No newline at end of file
diff --git a/spaces/Hallucinate/demo/utils.py b/spaces/Hallucinate/demo/utils.py
deleted file mode 100644
index fcfbb1cc4c34909c97e80d721fa6715434609ccb..0000000000000000000000000000000000000000
--- a/spaces/Hallucinate/demo/utils.py
+++ /dev/null
@@ -1,140 +0,0 @@
-import base64
-import math
-import re
-from io import BytesIO
-
-import matplotlib.cm
-import numpy as np
-import torch
-import torch.nn
-from PIL import Image
-
-
-class RunningAverage:
- def __init__(self):
- self.avg = 0
- self.count = 0
-
- def append(self, value):
- self.avg = (value + self.count * self.avg) / (self.count + 1)
- self.count += 1
-
- def get_value(self):
- return self.avg
-
-
-def denormalize(x, device='cpu'):
- mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
- std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
- return x * std + mean
-
-
-class RunningAverageDict:
- def __init__(self):
- self._dict = None
-
- def update(self, new_dict):
- if self._dict is None:
- self._dict = dict()
- for key, value in new_dict.items():
- self._dict[key] = RunningAverage()
-
- for key, value in new_dict.items():
- self._dict[key].append(value)
-
- def get_value(self):
- return {key: value.get_value() for key, value in self._dict.items()}
-
-
-def colorize(value, vmin=10, vmax=1000, cmap='magma_r'):
- value = value.cpu().numpy()[0, :, :]
- invalid_mask = value == -1
-
- # normalize
- vmin = value.min() if vmin is None else vmin
- vmax = value.max() if vmax is None else vmax
- if vmin != vmax:
- value = (value - vmin) / (vmax - vmin) # vmin..vmax
- else:
- # Avoid 0-division
- value = value * 0.
- # squeeze last dim if it exists
- # value = value.squeeze(axis=0)
- cmapper = matplotlib.cm.get_cmap(cmap)
- value = cmapper(value, bytes=True) # (nxmx4)
- value[invalid_mask] = 255
- img = value[:, :, :3]
-
- # return img.transpose((2, 0, 1))
- return img
-
-
-def count_parameters(model):
- return sum(p.numel() for p in model.parameters() if p.requires_grad)
-
-
-def compute_errors(gt, pred):
- thresh = np.maximum((gt / pred), (pred / gt))
- a1 = (thresh < 1.25).mean()
- a2 = (thresh < 1.25 ** 2).mean()
- a3 = (thresh < 1.25 ** 3).mean()
-
- abs_rel = np.mean(np.abs(gt - pred) / gt)
- sq_rel = np.mean(((gt - pred) ** 2) / gt)
-
- rmse = (gt - pred) ** 2
- rmse = np.sqrt(rmse.mean())
-
- rmse_log = (np.log(gt) - np.log(pred)) ** 2
- rmse_log = np.sqrt(rmse_log.mean())
-
- err = np.log(pred) - np.log(gt)
- silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
-
- log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean()
- return dict(a1=a1, a2=a2, a3=a3, abs_rel=abs_rel, rmse=rmse, log_10=log_10, rmse_log=rmse_log,
- silog=silog, sq_rel=sq_rel)
-
-
-##################################### Demo Utilities ############################################
-def b64_to_pil(b64string):
- image_data = re.sub('^data:image/.+;base64,', '', b64string)
- # image = Image.open(cStringIO.StringIO(image_data))
- return Image.open(BytesIO(base64.b64decode(image_data)))
-
-
-# Compute edge magnitudes
-from scipy import ndimage
-
-
-def edges(d):
- dx = ndimage.sobel(d, 0) # horizontal derivative
- dy = ndimage.sobel(d, 1) # vertical derivative
- return np.abs(dx) + np.abs(dy)
-
-
-class PointCloudHelper():
- def __init__(self, width=640, height=480):
- self.xx, self.yy = self.worldCoords(width, height)
-
- def worldCoords(self, width=640, height=480):
- hfov_degrees, vfov_degrees = 57, 43
- hFov = math.radians(hfov_degrees)
- vFov = math.radians(vfov_degrees)
- cx, cy = width / 2, height / 2
- fx = width / (2 * math.tan(hFov / 2))
- fy = height / (2 * math.tan(vFov / 2))
- xx, yy = np.tile(range(width), height), np.repeat(range(height), width)
- xx = (xx - cx) / fx
- yy = (yy - cy) / fy
- return xx, yy
-
- def depth_to_points(self, depth):
- depth[edges(depth) > 0.3] = np.nan # Hide depth edges
- length = depth.shape[0] * depth.shape[1]
- # depth[edges(depth) > 0.3] = 1e6 # Hide depth edges
- z = depth.reshape(length)
-
- return np.dstack((self.xx * z, self.yy * z, z)).reshape((length, 3))
-
-#####################################################################################################
\ No newline at end of file
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/multilingual/sampled_multi_epoch_dataset.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/multilingual/sampled_multi_epoch_dataset.py
deleted file mode 100644
index 17387b2f85c0ee76db1a003091331b46de8d8def..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/multilingual/sampled_multi_epoch_dataset.py
+++ /dev/null
@@ -1,199 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import hashlib
-import logging
-import math
-
-import numpy as np
-from fairseq.data import SampledMultiDataset
-
-from .sampled_multi_dataset import CollateFormat, default_virtual_size_func
-
-
-logger = logging.getLogger(__name__)
-
-
-class SampledMultiEpochDataset(SampledMultiDataset):
- """Samples from multiple sub-datasets according to sampling ratios
- using virtual epoch sizes to speed up dataloading.
- Args:
- datasets (
- List[~torch.utils.data.Dataset]
- or OrderedDict[str, ~torch.utils.data.Dataset]
- ): datasets
- sampling_ratios (List[float]): list of probability of each dataset to be sampled
- (default: None, which corresponds to concating all dataset together).
- seed (int): RNG seed to use (default: 2).
- epoch (int): starting epoch number (default: 1).
- eval_key (str, optional): a key used at evaluation time that causes
- this instance to pass-through batches from *datasets[eval_key]*.
- collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or
- CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures
- the collater to output batches of data mixed from all sub-datasets,
- and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys
- of sub-datasets.
- Note that not all sub-datasets will present in a single batch in both formats.
- virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func).
- split (str): the split of the data, e.g. 'train', 'valid' or 'test'.
- virtual_epoch_size (int): virtual epoch size, the dataset will go through the data by
- this virtual epoch size one by one to speed up data loading, e.g. indicing and filtering
- can be performed whenever a virtual epoch is loaded without waiting for the whole dataset to be loaded.
- shared_collater (bool): whether or not to all sub-datasets have the same collater.
- shard_epoch (int): the real epoch number for shard selection.
- shuffle (bool): whether or not to shuffle data (default: True).
- """
-
- def __init__(
- self,
- datasets,
- sampling_ratios=None,
- seed=2,
- epoch=1,
- eval_key=None,
- collate_format=CollateFormat.single,
- virtual_size=default_virtual_size_func,
- split="",
- virtual_epoch_size=None,
- shared_collater=False,
- shard_epoch=1,
- shuffle=True,
- ):
- self.virtual_epoch_size = virtual_epoch_size
- self._current_epoch_start_index = None
- self._random_global_indices = None
- self.shard_epoch = shard_epoch if shard_epoch is not None else 1
- self.load_next_shard = None
- self._epoch_sizes = None
- super().__init__(
- datasets=datasets,
- sampling_ratios=sampling_ratios,
- seed=seed,
- epoch=epoch,
- eval_key=eval_key,
- collate_format=collate_format,
- virtual_size=virtual_size,
- split=split,
- shared_collater=shared_collater,
- shuffle=shuffle,
- )
-
- def _setup(self, epoch):
- self.virtual_epoch_size = (
- self.virtual_epoch_size
- if self.virtual_epoch_size is not None
- else self.virtual_size
- )
- if self.virtual_epoch_size > self.virtual_size:
- logger.warning(
- f"virtual epoch size {self.virtual_epoch_size} "
- f"is greater than virtual dataset size {self.virtual_size}"
- )
- self.virtual_epoch_size = self.virtual_size
- self.num_virtual_epochs = math.ceil(self.virtual_size / self.virtual_epoch_size)
- self._current_epoch_start_index = self._get_epoch_start_index(epoch)
- logger.info(
- f"virtual epoch size {self.virtual_epoch_size}; virtual dataset size {self.virtual_size}"
- )
-
- def _map_epoch_index_to_global(self, index):
- index = self._current_epoch_start_index + index
- # add randomness
- return self._random_global_indices[index]
-
- @property
- def sizes(self):
- if self._epoch_sizes is not None:
- return self._epoch_sizes
- _sizes = super().sizes
- indices = self._random_global_indices[
- self._current_epoch_start_index : self._current_epoch_start_index
- + len(self)
- ]
- self._epoch_sizes = _sizes[indices]
- # del super()._sizes to save memory
- del self._sizes
- self._sizes = None
- return self._epoch_sizes
-
- def _get_dataset_and_index(self, index):
- i = self._map_epoch_index_to_global(index)
- return super()._get_dataset_and_index(i)
-
- def __len__(self):
- return (
- self.virtual_epoch_size
- if self._current_epoch_start_index + self.virtual_epoch_size
- < self.virtual_size
- else self.virtual_size - self._current_epoch_start_index
- )
-
- def set_epoch(self, epoch):
- if self._current_epoch_start_index is None:
- # initializing epoch idnices of a virtual dataset
- self._setup(epoch)
- self._next_virtual_epoch(epoch)
- else:
- # working on already intialized epoch indices
- if epoch == self._cur_epoch:
- # re-enter so return
- return
- self._next_virtual_epoch(epoch)
-
- def _get_epoch_start_index(self, epoch):
- assert epoch >= 1 # fairseq is using 1-based epoch everywhere
- return ((epoch - 1) % self.num_virtual_epochs) * self.virtual_epoch_size
-
- def _next_global_indices(self, epoch):
- rng = np.random.RandomState(
- [
- int(
- hashlib.sha1(
- str(self.__class__.__name__).encode("utf-8")
- ).hexdigest(),
- 16,
- )
- % (2 ** 32),
- self.seed % (2 ** 32), # global seed
- epoch, # epoch index,
- ]
- )
- del self._random_global_indices
- self._random_global_indices = rng.choice(
- self.virtual_size, self.virtual_size, replace=False
- )
- if self.load_next_shard is None:
- self.load_next_shard = False
- else:
- # increase shard epoch for next loading
- self.shard_epoch += 1
- self.load_next_shard = True
- logger.info(
- "to load next epoch/shard in next load_dataset: "
- f"epoch={epoch}/shard_epoch={self.shard_epoch}"
- )
-
- def _next_virtual_epoch(self, epoch):
- index = self._get_epoch_start_index(epoch)
- if index == 0 or self._random_global_indices is None:
- # need to start from the beginning,
- # so call super().set_epoch(epoch) to establish the global virtual indices
- logger.info(
- "establishing a new set of global virtual indices for "
- f"epoch={epoch}/shard_epoch={self.shard_epoch}"
- )
- super().set_epoch(epoch)
- self._next_global_indices(epoch)
- else:
- self._cur_epoch = epoch
-
- # reset cache sizes and ordered_indices for the epoch after moving to a new epoch
- self._clean_if_not_none(
- [
- self._epoch_sizes,
- ]
- )
- self._epoch_sizes = None
- self._current_epoch_start_index = index
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/masked_lm.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/masked_lm.py
deleted file mode 100644
index 0c08132fb742de3d3d1beea0b8fce979ff408ebb..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/masked_lm.py
+++ /dev/null
@@ -1,255 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from dataclasses import dataclass, field
-import logging
-import os
-
-from omegaconf import MISSING, II, OmegaConf
-
-import numpy as np
-from fairseq import utils
-from fairseq.data import (
- Dictionary,
- IdDataset,
- MaskTokensDataset,
- NestedDictionaryDataset,
- NumelDataset,
- NumSamplesDataset,
- PrependTokenDataset,
- RightPadDataset,
- SortDataset,
- TokenBlockDataset,
- data_utils,
-)
-from fairseq.data.encoders.utils import get_whole_word_mask
-from fairseq.data.shorten_dataset import maybe_shorten_dataset
-from fairseq.dataclass import FairseqDataclass
-from fairseq.tasks import FairseqTask, register_task
-
-from .language_modeling import SAMPLE_BREAK_MODE_CHOICES, SHORTEN_METHOD_CHOICES
-
-
-logger = logging.getLogger(__name__)
-
-
-@dataclass
-class MaskedLMConfig(FairseqDataclass):
- data: str = field(
- default=MISSING,
- metadata={
- "help": "colon separated path to data directories list, \
- will be iterated upon during epochs in round-robin manner"
- },
- )
- sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(
- default="none",
- metadata={
- "help": 'If omitted or "none", fills each sample with tokens-per-sample '
- 'tokens. If set to "complete", splits samples only at the end '
- "of sentence, but may include multiple sentences per sample. "
- '"complete_doc" is similar but respects doc boundaries. '
- 'If set to "eos", includes only one sentence per sample.'
- },
- )
- tokens_per_sample: int = field(
- default=1024,
- metadata={"help": "max number of tokens per sample for LM dataset"},
- )
- mask_prob: float = field(
- default=0.15,
- metadata={"help": "probability of replacing a token with mask"},
- )
- leave_unmasked_prob: float = field(
- default=0.1,
- metadata={"help": "probability that a masked token is unmasked"},
- )
- random_token_prob: float = field(
- default=0.1,
- metadata={"help": "probability of replacing a token with a random token"},
- )
- freq_weighted_replacement: bool = field(
- default=False,
- metadata={"help": "sample random replacement words based on word frequencies"},
- )
- mask_whole_words: bool = field(
- default=False,
- metadata={"help": "mask whole words; you may also want to set --bpe"},
- )
- mask_multiple_length: int = field(
- default=1,
- metadata={"help": "repeat the mask indices multiple times"},
- )
- mask_stdev: float = field(
- default=0.0,
- metadata={"help": "stdev of the mask length"},
- )
- shorten_method: SHORTEN_METHOD_CHOICES = field(
- default="none",
- metadata={
- "help": "if not none, shorten sequences that exceed --tokens-per-sample"
- },
- )
- shorten_data_split_list: str = field(
- default="",
- metadata={
- "help": "comma-separated list of dataset splits to apply shortening to, "
- 'e.g., "train,valid" (default: all dataset splits)'
- },
- )
- seed: int = II("common.seed")
-
-
-@register_task("masked_lm", dataclass=MaskedLMConfig)
-class MaskedLMTask(FairseqTask):
-
- cfg: MaskedLMConfig
-
- """Task for training masked language models (e.g., BERT, RoBERTa)."""
-
- def __init__(self, cfg: MaskedLMConfig, dictionary):
- super().__init__(cfg)
- self.dictionary = dictionary
-
- # add mask token
- self.mask_idx = dictionary.add_symbol("")
-
- @classmethod
- def setup_task(cls, cfg: MaskedLMConfig, **kwargs):
- paths = utils.split_paths(cfg.data)
- assert len(paths) > 0
- dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
- logger.info("dictionary: {} types".format(len(dictionary)))
- return cls(cfg, dictionary)
-
- def load_dataset(self, split, epoch=1, combine=False, **kwargs):
- """Load a given dataset split.
-
- Args:
- split (str): name of the split (e.g., train, valid, test)
- """
- paths = utils.split_paths(self.cfg.data)
- assert len(paths) > 0
- data_path = paths[(epoch - 1) % len(paths)]
- split_path = os.path.join(data_path, split)
-
- dataset = data_utils.load_indexed_dataset(
- split_path,
- self.source_dictionary,
- combine=combine,
- )
- if dataset is None:
- raise FileNotFoundError(
- "Dataset not found: {} ({})".format(split, split_path)
- )
-
- dataset = maybe_shorten_dataset(
- dataset,
- split,
- self.cfg.shorten_data_split_list,
- self.cfg.shorten_method,
- self.cfg.tokens_per_sample,
- self.cfg.seed,
- )
-
- # create continuous blocks of tokens
- dataset = TokenBlockDataset(
- dataset,
- dataset.sizes,
- self.cfg.tokens_per_sample - 1, # one less for
- pad=self.source_dictionary.pad(),
- eos=self.source_dictionary.eos(),
- break_mode=self.cfg.sample_break_mode,
- )
- logger.info("loaded {} blocks from: {}".format(len(dataset), split_path))
-
- # prepend beginning-of-sentence token (, equiv. to [CLS] in BERT)
- dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
-
- # create masked input and targets
- mask_whole_words = (
- get_whole_word_mask(self.args, self.source_dictionary)
- if self.cfg.mask_whole_words
- else None
- )
-
- src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
- dataset,
- self.source_dictionary,
- pad_idx=self.source_dictionary.pad(),
- mask_idx=self.mask_idx,
- seed=self.cfg.seed,
- mask_prob=self.cfg.mask_prob,
- leave_unmasked_prob=self.cfg.leave_unmasked_prob,
- random_token_prob=self.cfg.random_token_prob,
- freq_weighted_replacement=self.cfg.freq_weighted_replacement,
- mask_whole_words=mask_whole_words,
- mask_multiple_length=self.cfg.mask_multiple_length,
- mask_stdev=self.cfg.mask_stdev,
- )
-
- with data_utils.numpy_seed(self.cfg.seed):
- shuffle = np.random.permutation(len(src_dataset))
-
- self.datasets[split] = SortDataset(
- NestedDictionaryDataset(
- {
- "id": IdDataset(),
- "net_input": {
- "src_tokens": RightPadDataset(
- src_dataset,
- pad_idx=self.source_dictionary.pad(),
- ),
- "src_lengths": NumelDataset(src_dataset, reduce=False),
- },
- "target": RightPadDataset(
- tgt_dataset,
- pad_idx=self.source_dictionary.pad(),
- ),
- "nsentences": NumSamplesDataset(),
- "ntokens": NumelDataset(src_dataset, reduce=True),
- },
- sizes=[src_dataset.sizes],
- ),
- sort_order=[
- shuffle,
- src_dataset.sizes,
- ],
- )
-
- def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
- src_dataset = RightPadDataset(
- TokenBlockDataset(
- src_tokens,
- src_lengths,
- self.cfg.tokens_per_sample - 1, # one less for
- pad=self.source_dictionary.pad(),
- eos=self.source_dictionary.eos(),
- break_mode="eos",
- ),
- pad_idx=self.source_dictionary.pad(),
- )
- src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
- src_dataset = NestedDictionaryDataset(
- {
- "id": IdDataset(),
- "net_input": {
- "src_tokens": src_dataset,
- "src_lengths": NumelDataset(src_dataset, reduce=False),
- },
- },
- sizes=src_lengths,
- )
- if sort:
- src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
- return src_dataset
-
- @property
- def source_dictionary(self):
- return self.dictionary
-
- @property
- def target_dictionary(self):
- return self.dictionary
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/models/ofa/unify_transformer.py b/spaces/HarryLee/eCommerceImageCaptioning/models/ofa/unify_transformer.py
deleted file mode 100644
index 18f767de523165d407e3ceb2aa506f33cc6bbf79..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/models/ofa/unify_transformer.py
+++ /dev/null
@@ -1,1512 +0,0 @@
-# Copyright 2022 The OFA-Sys Team.
-# All rights reserved.
-# This source code is licensed under the Apache 2.0 license
-# found in the LICENSE file in the root directory.
-
-import math
-import random
-from typing import Any, Dict, List, Optional, Tuple
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.distributed import fsdp_wrap
-from fairseq.models import (
- FairseqEncoder,
- FairseqEncoderDecoderModel,
- FairseqIncrementalDecoder,
- register_model,
- register_model_architecture,
-)
-from fairseq.modules import (
- AdaptiveSoftmax,
- BaseLayer,
- FairseqDropout,
- LayerDropModuleList,
- LayerNorm,
- SinusoidalPositionalEmbedding,
- GradMultiply
-)
-from fairseq.modules.checkpoint_activations import checkpoint_wrapper
-from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
-from torch import Tensor
-
-from .unify_transformer_layer import TransformerEncoderLayer, TransformerDecoderLayer
-from .resnet import ResNet
-
-
-DEFAULT_MAX_SOURCE_POSITIONS = 1024
-DEFAULT_MAX_TARGET_POSITIONS = 1024
-
-
-DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
-
-
-def BatchNorm2d(out_chan, momentum=0.1, eps=1e-3):
- return nn.SyncBatchNorm.convert_sync_batchnorm(
- nn.BatchNorm2d(out_chan, momentum=momentum, eps=eps)
- )
-
-
-def make_token_bucket_position(bucket_size, max_position=DEFAULT_MAX_SOURCE_POSITIONS):
- context_pos = torch.arange(max_position, dtype=torch.long)[:, None]
- memory_pos = torch.arange(max_position, dtype=torch.long)[None, :]
- relative_pos = context_pos - memory_pos
- sign = torch.sign(relative_pos)
- mid = bucket_size // 2
- abs_pos = torch.where((relative_pos -mid), mid-1, torch.abs(relative_pos))
- log_pos = torch.ceil(torch.log(abs_pos/mid)/math.log((max_position-1)/mid) * (mid-1)) + mid
- log_pos = log_pos.int()
- bucket_pos = torch.where(abs_pos.le(mid), relative_pos, log_pos*sign).long()
- return bucket_pos + bucket_size - 1
-
-
-def make_image_bucket_position(bucket_size, num_relative_distance):
- coords_h = torch.arange(bucket_size)
- coords_w = torch.arange(bucket_size)
- coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
- coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
- relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
- relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
- relative_coords[:, :, 0] += bucket_size - 1 # shift to start from 0
- relative_coords[:, :, 1] += bucket_size - 1
- relative_coords[:, :, 0] *= 2 * bucket_size - 1
- relative_position_index = torch.zeros(size=(bucket_size * bucket_size + 1,) * 2, dtype=relative_coords.dtype)
- relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
- relative_position_index[0, 0:] = num_relative_distance - 3
- relative_position_index[0:, 0] = num_relative_distance - 2
- relative_position_index[0, 0] = num_relative_distance - 1
- return relative_position_index
-
-
-@register_model("unify_transformer")
-class TransformerModel(FairseqEncoderDecoderModel):
- """
- Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
- `_.
-
- Args:
- encoder (TransformerEncoder): the encoder
- decoder (TransformerDecoder): the decoder
-
- The Transformer model provides the following named architectures and
- command-line arguments:
-
- .. argparse::
- :ref: fairseq.models.transformer_parser
- :prog:
- """
-
- def __init__(self, args, encoder, decoder):
- super().__init__(encoder, decoder)
- self.args = args
- self.supports_align_args = True
-
- @staticmethod
- def add_args(parser):
- """Add model-specific arguments to the parser."""
- # fmt: off
- parser.add_argument('--activation-fn',
- choices=utils.get_available_activation_fns(),
- help='activation function to use')
- parser.add_argument('--dropout', type=float, metavar='D',
- help='dropout probability')
- parser.add_argument('--attention-dropout', type=float, metavar='D',
- help='dropout probability for attention weights')
- parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
- help='dropout probability after activation in FFN.')
- parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
- help='path to pre-trained encoder embedding')
- parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
- help='encoder embedding dimension')
- parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
- help='encoder embedding dimension for FFN')
- parser.add_argument('--encoder-layers', type=int, metavar='N',
- help='num encoder layers')
- parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
- help='num encoder attention heads')
- parser.add_argument('--encoder-normalize-before', action='store_true',
- help='apply layernorm before each encoder block')
- parser.add_argument('--encoder-learned-pos', action='store_true',
- help='use learned positional embeddings in the encoder')
- parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
- help='path to pre-trained decoder embedding')
- parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
- help='decoder embedding dimension')
- parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
- help='decoder embedding dimension for FFN')
- parser.add_argument('--decoder-layers', type=int, metavar='N',
- help='num decoder layers')
- parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
- help='num decoder attention heads')
- parser.add_argument('--decoder-learned-pos', action='store_true',
- help='use learned positional embeddings in the decoder')
- parser.add_argument('--decoder-normalize-before', action='store_true',
- help='apply layernorm before each decoder block')
- parser.add_argument('--decoder-output-dim', type=int, metavar='N',
- help='decoder output dimension (extra linear layer '
- 'if different from decoder embed dim')
- parser.add_argument('--share-decoder-input-output-embed', action='store_true',
- help='share decoder input and output embeddings')
- parser.add_argument('--share-all-embeddings', action='store_true',
- help='share encoder, decoder and output embeddings'
- ' (requires shared dictionary and embed dim)')
- parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
- help='if set, disables positional embeddings (outside self attention)')
- parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
- help='comma separated list of adaptive softmax cutoff points. '
- 'Must be used with adaptive_loss criterion'),
- parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
- help='sets adaptive softmax dropout for the tail projections')
- parser.add_argument('--layernorm-embedding', action='store_true',
- help='add layernorm to embedding')
- parser.add_argument('--no-scale-embedding', action='store_true',
- help='if True, dont scale embeddings')
- parser.add_argument('--checkpoint-activations', action='store_true',
- help='checkpoint activations at each layer, which saves GPU '
- 'memory usage at the cost of some additional compute')
- parser.add_argument('--offload-activations', action='store_true',
- help='checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations.')
- # args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
- parser.add_argument('--no-cross-attention', default=False, action='store_true',
- help='do not perform cross-attention')
- parser.add_argument('--cross-self-attention', default=False, action='store_true',
- help='perform cross+self-attention')
- # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
- parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
- help='LayerDrop probability for encoder')
- parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
- help='LayerDrop probability for decoder')
- parser.add_argument('--encoder-layers-to-keep', default=None,
- help='which layers to *keep* when pruning as a comma-separated list')
- parser.add_argument('--decoder-layers-to-keep', default=None,
- help='which layers to *keep* when pruning as a comma-separated list')
- # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
- parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
- help='iterative PQ quantization noise at training time')
- parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
- help='block size of quantization noise at training time')
- parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
- help='scalar quantization noise and scalar quantization at training time')
- # args for Fully Sharded Data Parallel (FSDP) training
- parser.add_argument(
- '--min-params-to-wrap', type=int, metavar='D', default=DEFAULT_MIN_PARAMS_TO_WRAP,
- help=(
- 'minimum number of params for a layer to be wrapped with FSDP() when '
- 'training with --ddp-backend=fully_sharded. Smaller values will '
- 'improve memory efficiency, but may make torch.distributed '
- 'communication less efficient due to smaller input sizes. This option '
- 'is set to 0 (i.e., always wrap) when --checkpoint-activations or '
- '--offload-activations are passed.'
- )
- )
-
- parser.add_argument('--resnet-drop-path-rate', type=float,
- help='resnet drop path rate')
- parser.add_argument('--encoder-drop-path-rate', type=float,
- help='encoder drop path rate')
- parser.add_argument('--decoder-drop-path-rate', type=float,
- help='encoder drop path rate')
-
- parser.add_argument('--token-bucket-size', type=int,
- help='token bucket size')
- parser.add_argument('--image-bucket-size', type=int,
- help='image bucket size')
-
- parser.add_argument('--attn-scale-factor', type=float,
- help='attention scale factor')
- parser.add_argument('--freeze-resnet', action='store_true',
- help='freeze resnet')
- parser.add_argument('--freeze-encoder-embedding', action='store_true',
- help='freeze encoder token embedding')
- parser.add_argument('--freeze-decoder-embedding', action='store_true',
- help='freeze decoder token embedding')
- parser.add_argument('--add-type-embedding', action='store_true',
- help='add source/region/patch type embedding')
-
- parser.add_argument('--resnet-type', choices=['resnet50', 'resnet101', 'resnet152'],
- help='resnet type')
- parser.add_argument('--resnet-model-path', type=str, metavar='STR',
- help='path to load resnet')
- parser.add_argument('--code-image-size', type=int,
- help='code image size')
- parser.add_argument('--patch-layernorm-embedding', action='store_true',
- help='add layernorm to patch embedding')
- parser.add_argument('--code-layernorm-embedding', action='store_true',
- help='add layernorm to code embedding')
- parser.add_argument('--entangle-position-embedding', action='store_true',
- help='entangle position embedding')
- parser.add_argument('--disable-entangle', action='store_true',
- help='disable entangle')
- parser.add_argument('--sync-bn', action='store_true',
- help='sync batchnorm')
-
- parser.add_argument('--scale-attn', action='store_true',
- help='scale attn')
- parser.add_argument('--scale-fc', action='store_true',
- help='scale fc')
- parser.add_argument('--scale-heads', action='store_true',
- help='scale heads')
- parser.add_argument('--scale-resids', action='store_true',
- help='scale resids')
- # fmt: on
-
- @classmethod
- def build_model(cls, args, task):
- """Build a new model instance."""
-
- # make sure all arguments are present in older models
- base_architecture(args)
-
- if args.encoder_layers_to_keep:
- args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
- if args.decoder_layers_to_keep:
- args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
-
- if getattr(args, "max_source_positions", None) is None:
- args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
- if getattr(args, "max_target_positions", None) is None:
- args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
-
- src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
-
- if args.share_all_embeddings:
- if src_dict != tgt_dict:
- raise ValueError("--share-all-embeddings requires a joined dictionary")
- if args.encoder_embed_dim != args.decoder_embed_dim:
- raise ValueError(
- "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
- )
- if args.decoder_embed_path and (
- args.decoder_embed_path != args.encoder_embed_path
- ):
- raise ValueError(
- "--share-all-embeddings not compatible with --decoder-embed-path"
- )
- encoder_embed_tokens = cls.build_embedding(
- args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
- )
- decoder_embed_tokens = encoder_embed_tokens
- args.share_decoder_input_output_embed = True
- else:
- encoder_embed_tokens = cls.build_embedding(
- args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
- )
- decoder_embed_tokens = cls.build_embedding(
- args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
- )
- if getattr(args, "freeze_encoder_embedding", False):
- encoder_embed_tokens.weight.requires_grad = False
- if getattr(args, "freeze_decoder_embedding", False):
- decoder_embed_tokens.weight.requires_grad = False
- if getattr(args, "offload_activations", False):
- args.checkpoint_activations = True # offloading implies checkpointing
- encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
- decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
- if not args.share_all_embeddings:
- min_params_to_wrap = getattr(
- args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP
- )
- # fsdp_wrap is a no-op when --ddp-backend != fully_sharded
- encoder = fsdp_wrap(encoder, min_num_params=min_params_to_wrap)
- decoder = fsdp_wrap(decoder, min_num_params=min_params_to_wrap)
- return cls(args, encoder, decoder)
-
- @classmethod
- def build_embedding(cls, args, dictionary, embed_dim, path=None):
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
-
- emb = Embedding(num_embeddings, embed_dim, padding_idx)
- # if provided, load from preloaded dictionaries
- if path:
- embed_dict = utils.parse_embedding(path)
- utils.load_embedding(embed_dict, dictionary, emb)
- return emb
-
- @classmethod
- def build_encoder(cls, args, src_dict, embed_tokens):
- return TransformerEncoder(args, src_dict, embed_tokens)
-
- @classmethod
- def build_decoder(cls, args, tgt_dict, embed_tokens):
- return TransformerDecoder(
- args,
- tgt_dict,
- embed_tokens,
- no_encoder_attn=getattr(args, "no_cross_attention", False),
- )
-
- # TorchScript doesn't support optional arguments with variable length (**kwargs).
- # Current workaround is to add union of all arguments in child classes.
- def forward(
- self,
- src_tokens,
- src_lengths,
- prev_output_tokens,
- return_all_hiddens: bool = True,
- features_only: bool = False,
- alignment_layer: Optional[int] = None,
- alignment_heads: Optional[int] = None,
- ):
- """
- Run the forward pass for an encoder-decoder model.
-
- Copied from the base class, but without ``**kwargs``,
- which are not supported by TorchScript.
- """
- encoder_out = self.encoder(
- src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens
- )
- decoder_out = self.decoder(
- prev_output_tokens,
- encoder_out=encoder_out,
- features_only=features_only,
- alignment_layer=alignment_layer,
- alignment_heads=alignment_heads,
- src_lengths=src_lengths,
- return_all_hiddens=return_all_hiddens,
- )
- return decoder_out
-
- # Since get_normalized_probs is in the Fairseq Model which is not scriptable,
- # I rewrite the get_normalized_probs from Base Class to call the
- # helper function in the Base Class.
- @torch.jit.export
- def get_normalized_probs(
- self,
- net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
- log_probs: bool,
- sample: Optional[Dict[str, Tensor]] = None,
- ):
- """Get normalized probabilities (or log probs) from a net's output."""
- return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
-
-
-class TransformerEncoder(FairseqEncoder):
- """
- Transformer encoder consisting of *args.encoder_layers* layers. Each layer
- is a :class:`TransformerEncoderLayer`.
-
- Args:
- args (argparse.Namespace): parsed command-line arguments
- dictionary (~fairseq.data.Dictionary): encoding dictionary
- embed_tokens (torch.nn.Embedding): input embedding
- """
-
- def __init__(self, args, dictionary, embed_tokens):
- self.args = args
- super().__init__(dictionary)
- self.register_buffer("version", torch.Tensor([3]))
-
- self.dropout_module = FairseqDropout(
- args.dropout, module_name=self.__class__.__name__
- )
- self.encoder_layerdrop = args.encoder_layerdrop
-
- embed_dim = embed_tokens.embedding_dim
- self.padding_idx = embed_tokens.padding_idx
- self.max_source_positions = args.max_source_positions
- self.num_attention_heads = args.encoder_attention_heads
-
- self.embed_tokens = embed_tokens
-
- self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
-
- if getattr(args, "layernorm_embedding", False):
- self.layernorm_embedding = LayerNorm(embed_dim)
- else:
- self.layernorm_embedding = None
-
- if getattr(args, "add_type_embedding", False):
- self.type_embedding = Embedding(2, embed_dim, padding_idx=None)
- else:
- self.type_embedding = None
-
- if getattr(args, "sync_bn", False):
- norm_layer = BatchNorm2d
- else:
- norm_layer = None
-
- if args.resnet_type == 'resnet101':
- self.embed_images = ResNet([3, 4, 23], norm_layer=norm_layer, drop_path_rate=args.resnet_drop_path_rate)
- elif args.resnet_type == 'resnet152':
- self.embed_images = ResNet([3, 8, 36], norm_layer=norm_layer, drop_path_rate=args.resnet_drop_path_rate)
- elif args.resnet_type == 'resnet50':
- self.embed_images = ResNet([3, 4, 6], norm_layer=norm_layer, drop_path_rate=args.resnet_drop_path_rate)
- else:
- raise NotImplementedError
- self.image_proj = Linear(1024, embed_dim)
- if getattr(args, "resnet_model_path", None):
- print("load resnet {}".format(args.resnet_model_path))
- resnet_state_dict = torch.load(self.args.resnet_model_path)
- self.embed_images.load_state_dict(resnet_state_dict)
- if getattr(args, "patch_layernorm_embedding", False):
- self.patch_layernorm_embedding = LayerNorm(embed_dim)
- else:
- self.patch_layernorm_embedding = None
-
- self.embed_positions = Embedding(args.max_source_positions + 2, embed_dim)
- self.embed_image_positions = Embedding(args.image_bucket_size ** 2 + 1, embed_dim)
- self.pos_ln = LayerNorm(embed_dim)
- self.image_pos_ln = LayerNorm(embed_dim)
- self.pos_scaling = float(embed_dim / args.encoder_attention_heads * args.attn_scale_factor) ** -0.5
- self.pos_q_linear = nn.Linear(embed_dim, embed_dim)
- self.pos_k_linear = nn.Linear(embed_dim, embed_dim)
-
- if not args.adaptive_input and args.quant_noise_pq > 0:
- self.quant_noise = apply_quant_noise_(
- nn.Linear(embed_dim, embed_dim, bias=False),
- args.quant_noise_pq,
- args.quant_noise_pq_block_size,
- )
- else:
- self.quant_noise = None
-
- if self.encoder_layerdrop > 0.0:
- self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
- else:
- self.layers = nn.ModuleList([])
-
- dpr = [x.item() for x in torch.linspace(0, args.encoder_drop_path_rate, args.encoder_layers)]
- self.layers.extend(
- [self.build_encoder_layer(args, drop_path_rate=dpr[i]) for i in range(args.encoder_layers)]
- )
- self.num_layers = len(self.layers)
-
- if args.encoder_normalize_before:
- self.layer_norm = LayerNorm(embed_dim)
- else:
- self.layer_norm = None
-
- token_bucket_size = args.token_bucket_size
- token_num_rel_dis = 2 * token_bucket_size - 1
- token_rp_bucket = make_token_bucket_position(token_bucket_size)
- self.token_rel_pos_table_list = nn.ModuleList(
- [Embedding(token_num_rel_dis, self.num_attention_heads, zero_init=True) for _ in range(args.encoder_layers)]
- )
-
- image_bucket_size = args.image_bucket_size
- image_num_rel_dis = (2 * image_bucket_size - 1) * (2 * image_bucket_size - 1) + 3
- image_rp_bucket = make_image_bucket_position(image_bucket_size, image_num_rel_dis)
- self.image_rel_pos_table_list = nn.ModuleList(
- [Embedding(image_num_rel_dis, self.num_attention_heads, zero_init=True) for _ in range(args.encoder_layers)]
- )
-
- self.register_buffer("token_rp_bucket", token_rp_bucket)
- self.register_buffer("image_rp_bucket", image_rp_bucket)
- self.entangle_position_embedding = args.entangle_position_embedding
-
- def train(self, mode=True):
- super(TransformerEncoder, self).train(mode)
- if getattr(self.args, "freeze_resnet", False):
- for m in self.embed_images.modules():
- if isinstance(m, nn.BatchNorm2d):
- m.eval()
- m.weight.requires_grad = False
- m.bias.requires_grad = False
-
- def build_encoder_layer(self, args, drop_path_rate=0.0):
- layer = TransformerEncoderLayer(args, drop_path_rate=drop_path_rate)
- checkpoint = getattr(args, "checkpoint_activations", False)
- if checkpoint:
- offload_to_cpu = getattr(args, "offload_activations", False)
- layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
- # if we are checkpointing, enforce that FSDP always wraps the
- # checkpointed layer, regardless of layer size
- min_params_to_wrap = (
- getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
- if not checkpoint else 0
- )
- layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
- return layer
-
- def get_rel_pos_bias(self, x, idx):
- seq_len = x.size(1)
- rp_bucket = self.token_rp_bucket[:seq_len, :seq_len]
- values = F.embedding(rp_bucket, self.token_rel_pos_table_list[idx].weight)
- values = values.unsqueeze(0).expand(x.size(0), -1, -1, -1)
- values = values.permute([0, 3, 1, 2])
- return values.contiguous()
-
- def get_image_rel_pos_bias(self, image_position_ids, idx):
- bsz, seq_len = image_position_ids.shape
- rp_bucket_size = self.image_rp_bucket.size(1)
-
- rp_bucket = self.image_rp_bucket.unsqueeze(0).expand(
- bsz, rp_bucket_size, rp_bucket_size
- ).gather(1, image_position_ids[:, :, None].expand(bsz, seq_len, rp_bucket_size)
- ).gather(2, image_position_ids[:, None, :].expand(bsz, seq_len, seq_len))
- values = F.embedding(rp_bucket, self.image_rel_pos_table_list[idx].weight)
- values = values.permute(0, 3, 1, 2)
- return values
-
- def get_patch_images_info(self, patch_images, sample_patch_num, device):
- image_embed = self.embed_images(patch_images)
- h, w = image_embed.shape[-2:]
- image_num_patches = h * w
- image_padding_mask = patch_images.new_zeros((patch_images.size(0), image_num_patches)).bool()
- image_position_idx = torch.arange(w).unsqueeze(0).expand(h, w) + \
- torch.arange(h).unsqueeze(1) * self.args.image_bucket_size + 1
- image_position_idx = image_position_idx.view(-1).to(device)
- image_position_ids = image_position_idx[None, :].expand(patch_images.size(0), image_num_patches)
-
- image_embed = image_embed.flatten(2).transpose(1, 2)
- if sample_patch_num is not None:
- patch_orders = [
- random.sample(range(image_num_patches), k=sample_patch_num)
- for _ in range(patch_images.size(0))
- ]
- patch_orders = torch.LongTensor(patch_orders).to(device)
- image_embed = image_embed.gather(
- 1, patch_orders.unsqueeze(2).expand(-1, -1, image_embed.size(2))
- )
- image_num_patches = sample_patch_num
- image_padding_mask = image_padding_mask.gather(1, patch_orders)
- image_position_ids = image_position_ids.gather(1, patch_orders)
- image_pos_embed = self.embed_image_positions(image_position_ids)
-
- return image_embed, image_num_patches, image_padding_mask, image_position_ids, image_pos_embed
-
- def forward_embedding(
- self,
- src_tokens,
- image_embed: Optional[torch.Tensor] = None,
- image_embed_2: Optional[torch.Tensor] = None,
- token_embedding: Optional[torch.Tensor] = None,
- pos_embed: Optional[torch.Tensor] = None,
- image_pos_embed: Optional[torch.Tensor] = None,
- image_pos_embed_2: Optional[torch.Tensor] = None
- ):
- # embed tokens and positions
- if token_embedding is None:
- token_embedding = self.embed_tokens(src_tokens)
- x = embed = self.embed_scale * token_embedding
- if self.entangle_position_embedding and pos_embed is not None:
- x += pos_embed
- if self.type_embedding is not None:
- x += self.type_embedding(src_tokens.new_zeros(x.size()[:2]))
- if self.layernorm_embedding is not None:
- x = self.layernorm_embedding(x)
- x = self.dropout_module(x)
- if self.quant_noise is not None:
- x = self.quant_noise(x)
-
- # embed raw images
- if image_embed is not None:
- image_embed = self.image_proj(image_embed)
- image_x = image_embed = self.embed_scale * image_embed
- if self.entangle_position_embedding and image_pos_embed is not None:
- image_x += image_pos_embed
- if self.type_embedding is not None:
- image_x += self.type_embedding(src_tokens.new_ones(image_x.size()[:2]))
- if self.patch_layernorm_embedding is not None:
- image_x = self.patch_layernorm_embedding(image_x)
- image_x = self.dropout_module(image_x)
- if self.quant_noise is not None:
- image_x = self.quant_noise(image_x)
- x = torch.cat([image_x, x], dim=1)
- embed = torch.cat([image_embed, embed], dim=1)
-
- if image_embed_2 is not None:
- assert self.type_embedding is not None
- image_embed_2 = self.image_proj(image_embed_2)
- image_x_2 = image_embed_2 = self.embed_scale * image_embed_2
- if self.entangle_position_embedding and image_pos_embed_2 is not None:
- image_x_2 += image_pos_embed_2
- if self.type_embedding is not None:
- image_x_2 += self.type_embedding(src_tokens.new_full(image_x_2.size()[:2], fill_value=2))
- if self.patch_layernorm_embedding is not None:
- image_x_2 = self.patch_layernorm_embedding(image_x_2)
- image_x_2 = self.dropout_module(image_x_2)
- if self.quant_noise is not None:
- image_x_2 = self.quant_noise(image_x_2)
- x = torch.cat([image_x_2, x], dim=1)
- embed = torch.cat([image_embed_2, embed], dim=1)
-
- return x, embed
-
- def forward(
- self,
- src_tokens,
- src_lengths,
- patch_images: Optional[torch.Tensor] = None,
- patch_images_2: Optional[torch.Tensor] = None,
- patch_masks: Optional[torch.Tensor] = None,
- code_masks: Optional[torch.Tensor] = None,
- return_all_hiddens: bool = False,
- token_embeddings: Optional[torch.Tensor] = None,
- sample_patch_num: Optional[int] = None
- ):
- """
- Args:
- src_tokens (LongTensor): tokens in the source language of shape
- `(batch, src_len)`
- src_lengths (torch.LongTensor): lengths of each source sentence of
- shape `(batch)`
- return_all_hiddens (bool, optional): also return all of the
- intermediate hidden states (default: False).
- token_embeddings (torch.Tensor, optional): precomputed embeddings
- default `None` will recompute embeddings
-
- Returns:
- dict:
- - **encoder_out** (Tensor): the last encoder layer's output of
- shape `(src_len, batch, embed_dim)`
- - **encoder_padding_mask** (ByteTensor): the positions of
- padding elements of shape `(batch, src_len)`
- - **encoder_embedding** (Tensor): the (scaled) embedding lookup
- of shape `(batch, src_len, embed_dim)`
- - **encoder_states** (List[Tensor]): all intermediate
- hidden states of shape `(src_len, batch, embed_dim)`.
- Only populated if *return_all_hiddens* is True.
- """
- return self.forward_scriptable(src_tokens,
- src_lengths,
- patch_images,
- patch_images_2,
- patch_masks,
- return_all_hiddens,
- token_embeddings,
- sample_patch_num)
-
- # TorchScript doesn't support super() method so that the scriptable Subclass
- # can't access the base class model in Torchscript.
- # Current workaround is to add a helper function with different name and
- # call the helper function from scriptable Subclass.
- def forward_scriptable(
- self,
- src_tokens,
- src_lengths,
- patch_images: Optional[torch.Tensor] = None,
- patch_images_2: Optional[torch.Tensor] = None,
- patch_masks: Optional[torch.Tensor] = None,
- return_all_hiddens: bool = False,
- token_embeddings: Optional[torch.Tensor] = None,
- sample_patch_num: Optional[int] = None
- ):
- """
- Args:
- src_tokens (LongTensor): tokens in the source language of shape
- `(batch, src_len)`
- src_lengths (torch.LongTensor): lengths of each source sentence of
- shape `(batch)`
- return_all_hiddens (bool, optional): also return all of the
- intermediate hidden states (default: False).
- token_embeddings (torch.Tensor, optional): precomputed embeddings
- default `None` will recompute embeddings
-
- Returns:
- dict:
- - **encoder_out** (Tensor): the last encoder layer's output of
- shape `(src_len, batch, embed_dim)`
- - **encoder_padding_mask** (ByteTensor): the positions of
- padding elements of shape `(batch, src_len)`
- - **encoder_embedding** (Tensor): the (scaled) embedding lookup
- of shape `(batch, src_len, embed_dim)`
- - **encoder_states** (List[Tensor]): all intermediate
- hidden states of shape `(src_len, batch, embed_dim)`.
- Only populated if *return_all_hiddens* is True.
- """
- image_embed = None
- image_embed_2 = None
- image_pos_embed = None
- image_pos_embed_2 = None
- if patch_images is not None:
- image_embed, image_num_patches, image_padding_mask, image_position_ids, image_pos_embed = \
- self.get_patch_images_info(patch_images, sample_patch_num, src_tokens.device)
- image_padding_mask[~patch_masks] = True
- if patch_images_2 is not None:
- image_embed_2, image_num_patches_2, image_padding_mask_2, image_position_ids_2, image_pos_embed_2 = \
- self.get_patch_images_info(patch_images_2, sample_patch_num, src_tokens.device)
- image_padding_mask_2[~patch_masks] = True
-
- encoder_padding_mask = src_tokens.eq(self.padding_idx)
- if patch_images is not None:
- encoder_padding_mask = torch.cat([image_padding_mask, encoder_padding_mask], dim=1)
- if patch_images_2 is not None:
- encoder_padding_mask = torch.cat([image_padding_mask_2, encoder_padding_mask], dim=1)
- has_pads = (src_tokens.device.type == "xla" or encoder_padding_mask.any())
-
- pos_embed = self.embed_positions(utils.new_arange(src_tokens))
- x, encoder_embedding = self.forward_embedding(
- src_tokens, image_embed, image_embed_2, token_embeddings,
- pos_embed, image_pos_embed, image_pos_embed_2
- )
-
- # account for padding while computing the representation
- if has_pads:
- x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- pos_embed = self.pos_ln(pos_embed)
- if patch_images is not None:
- image_pos_embed = self.image_pos_ln(image_pos_embed)
- pos_embed = torch.cat([image_pos_embed, pos_embed], dim=1)
- if patch_images_2 is not None:
- image_pos_embed_2 = self.image_pos_ln(image_pos_embed_2)
- pos_embed = torch.cat([image_pos_embed_2, pos_embed], dim=1)
-
- pos_q = self.pos_q_linear(pos_embed).view(
- x.size(1), x.size(0), self.num_attention_heads, -1
- ).transpose(1, 2) * self.pos_scaling
- pos_k = self.pos_k_linear(pos_embed).view(
- x.size(1), x.size(0), self.num_attention_heads, -1
- ).transpose(1, 2)
- abs_pos_bias = torch.matmul(pos_q, pos_k.transpose(2, 3))
-
- encoder_states = []
-
- if return_all_hiddens:
- encoder_states.append(x)
-
- # encoder layers
- for idx, layer in enumerate(self.layers):
- self_attn_bias = abs_pos_bias.clone()
- self_attn_bias[:, :, -src_tokens.size(1):, -src_tokens.size(1):] += self.get_rel_pos_bias(src_tokens, idx)
- if patch_images_2 is not None:
- self_attn_bias[:, :, :image_num_patches_2, :image_num_patches_2] += \
- self.get_image_rel_pos_bias(image_position_ids_2, idx)
- self_attn_bias[:, :, image_num_patches_2:image_num_patches_2+image_num_patches, image_num_patches_2:image_num_patches_2+image_num_patches] += \
- self.get_image_rel_pos_bias(image_position_ids, idx)
- elif patch_images is not None:
- self_attn_bias[:, :, :x.size(0) - src_tokens.size(1), :x.size(0) - src_tokens.size(1)] += \
- self.get_image_rel_pos_bias(image_position_ids, idx)
- self_attn_bias = self_attn_bias.reshape(-1, x.size(0), x.size(0))
-
- x = layer(
- x, encoder_padding_mask=encoder_padding_mask if has_pads else None, self_attn_bias=self_attn_bias
- )
- if return_all_hiddens:
- assert encoder_states is not None
- encoder_states.append(x)
-
- if self.layer_norm is not None:
- x = self.layer_norm(x)
-
- # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
- # `forward` so we use a dictionary instead.
- # TorchScript does not support mixed values so the values are all lists.
- # The empty list is equivalent to None.
- return {
- "encoder_out": [x], # T x B x C
- "encoder_padding_mask": [encoder_padding_mask], # B x T
- "encoder_embedding": [], # B x T x C
- "encoder_states": encoder_states, # List[T x B x C]
- "src_tokens": [],
- "src_lengths": [],
- "position_embeddings": [pos_embed], # B x T x C
- }
-
- @torch.jit.export
- def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
- """
- Reorder encoder output according to *new_order*.
-
- Args:
- encoder_out: output from the ``forward()`` method
- new_order (LongTensor): desired order
-
- Returns:
- *encoder_out* rearranged according to *new_order*
- """
- if len(encoder_out["encoder_out"]) == 0:
- new_encoder_out = []
- else:
- new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
- if len(encoder_out["encoder_padding_mask"]) == 0:
- new_encoder_padding_mask = []
- else:
- new_encoder_padding_mask = [
- encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
- ]
- if len(encoder_out["encoder_embedding"]) == 0:
- new_encoder_embedding = []
- else:
- new_encoder_embedding = [
- encoder_out["encoder_embedding"][0].index_select(0, new_order)
- ]
-
- if len(encoder_out["src_tokens"]) == 0:
- new_src_tokens = []
- else:
- new_src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
-
- if len(encoder_out["src_lengths"]) == 0:
- new_src_lengths = []
- else:
- new_src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
-
- if len(encoder_out["position_embeddings"]) == 0:
- new_position_embeddings = []
- else:
- new_position_embeddings = [(encoder_out["position_embeddings"][0]).index_select(0, new_order)]
-
- encoder_states = encoder_out["encoder_states"]
- if len(encoder_states) > 0:
- for idx, state in enumerate(encoder_states):
- encoder_states[idx] = state.index_select(1, new_order)
-
- return {
- "encoder_out": new_encoder_out, # T x B x C
- "encoder_padding_mask": new_encoder_padding_mask, # B x T
- "encoder_embedding": new_encoder_embedding, # B x T x C
- "encoder_states": encoder_states, # List[T x B x C]
- "src_tokens": new_src_tokens, # B x T
- "src_lengths": new_src_lengths, # B x 1
- "position_embeddings": new_position_embeddings, # B x T x C
- }
-
- def max_positions(self):
- """Maximum input length supported by the encoder."""
- if self.embed_positions is None:
- return self.max_source_positions
- return self.max_source_positions
-
- def upgrade_state_dict_named(self, state_dict, name):
- """Upgrade a (possibly old) state dict for new versions of fairseq."""
- if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
- weights_key = "{}.embed_positions.weights".format(name)
- if weights_key in state_dict:
- print("deleting {0}".format(weights_key))
- del state_dict[weights_key]
- state_dict[
- "{}.embed_positions._float_tensor".format(name)
- ] = torch.FloatTensor(1)
- for i in range(self.num_layers):
- # update layer norms
- self.layers[i].upgrade_state_dict_named(
- state_dict, "{}.layers.{}".format(name, i)
- )
-
- # version_key = "{}.version".format(name)
- # if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
- # # earlier checkpoints did not normalize after the stack of layers
- # self.layer_norm = None
- # self.normalize = False
- # state_dict[version_key] = torch.Tensor([1])
-
- prefix = name + "." if name != "" else ""
- for param_name, param_tensor in self.state_dict().items():
- if (prefix + param_name) not in state_dict:
- state_dict[prefix + param_name] = self.state_dict()[param_name]
-
- if len(state_dict["encoder.embed_image_positions.weight"]) < len(self.state_dict()["embed_image_positions.weight"]):
- num_posids_to_add = len(self.state_dict()["embed_image_positions.weight"]) - len(state_dict["encoder.embed_image_positions.weight"])
- embed_dim = state_dict["encoder.embed_image_positions.weight"].size(1)
- new_pos_embed_to_add = torch.zeros(num_posids_to_add, embed_dim)
- nn.init.normal_(new_pos_embed_to_add, mean=0, std=embed_dim ** -0.5)
- new_pos_embed_to_add = new_pos_embed_to_add.to(
- dtype=state_dict["encoder.embed_image_positions.weight"].dtype,
- )
- state_dict["encoder.embed_image_positions.weight"] = torch.cat(
- [state_dict["encoder.embed_image_positions.weight"], new_pos_embed_to_add]
- )
- return state_dict
-
-
-class TransformerDecoder(FairseqIncrementalDecoder):
- """
- Transformer decoder consisting of *args.decoder_layers* layers. Each layer
- is a :class:`TransformerDecoderLayer`.
-
- Args:
- args (argparse.Namespace): parsed command-line arguments
- dictionary (~fairseq.data.Dictionary): decoding dictionary
- embed_tokens (torch.nn.Embedding): output embedding
- no_encoder_attn (bool, optional): whether to attend to encoder outputs
- (default: False).
- """
-
- def __init__(
- self,
- args,
- dictionary,
- embed_tokens,
- no_encoder_attn=False,
- output_projection=None,
- ):
- self.args = args
- super().__init__(dictionary)
- self.register_buffer("version", torch.Tensor([3]))
- self._future_mask = torch.empty(0)
-
- self.dropout_module = FairseqDropout(
- args.dropout, module_name=self.__class__.__name__
- )
- self.decoder_layerdrop = args.decoder_layerdrop
- self.share_input_output_embed = args.share_decoder_input_output_embed
- self.num_attention_heads = args.decoder_attention_heads
-
- input_embed_dim = embed_tokens.embedding_dim
- embed_dim = args.decoder_embed_dim
- self.embed_dim = embed_dim
- self.output_embed_dim = args.decoder_output_dim
-
- self.padding_idx = embed_tokens.padding_idx
- self.max_target_positions = args.max_target_positions
-
- self.embed_tokens = embed_tokens
-
- self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
-
- if not args.adaptive_input and args.quant_noise_pq > 0:
- self.quant_noise = apply_quant_noise_(
- nn.Linear(embed_dim, embed_dim, bias=False),
- args.quant_noise_pq,
- args.quant_noise_pq_block_size,
- )
- else:
- self.quant_noise = None
-
- self.project_in_dim = (
- Linear(input_embed_dim, embed_dim, bias=False)
- if embed_dim != input_embed_dim
- else None
- )
-
- if getattr(args, "layernorm_embedding", False):
- self.layernorm_embedding = LayerNorm(embed_dim)
- else:
- self.layernorm_embedding = None
-
- self.window_size = args.code_image_size // 8
-
- self.embed_positions = Embedding(args.max_target_positions + 2, embed_dim)
- self.embed_image_positions = Embedding(args.image_bucket_size ** 2 + 1, embed_dim)
- self.pos_ln = LayerNorm(embed_dim)
- self.image_pos_ln = LayerNorm(embed_dim)
- self.pos_scaling = float(embed_dim / self.num_attention_heads * args.attn_scale_factor) ** -0.5
- self.self_pos_q_linear = nn.Linear(embed_dim, embed_dim)
- self.self_pos_k_linear = nn.Linear(embed_dim, embed_dim)
- self.cross_pos_q_linear = nn.Linear(embed_dim, embed_dim)
- self.cross_pos_k_linear = nn.Linear(embed_dim, embed_dim)
-
- if getattr(args, "code_layernorm_embedding", False):
- self.code_layernorm_embedding = LayerNorm(embed_dim)
- else:
- self.code_layernorm_embedding = None
-
- self.cross_self_attention = getattr(args, "cross_self_attention", False)
-
- if self.decoder_layerdrop > 0.0:
- self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
- else:
- self.layers = nn.ModuleList([])
-
- dpr = [x.item() for x in torch.linspace(0, args.decoder_drop_path_rate, args.decoder_layers)]
- self.layers.extend(
- [
- self.build_decoder_layer(args, no_encoder_attn, drop_path_rate=dpr[i])
- for i in range(args.decoder_layers)
- ]
- )
- self.num_layers = len(self.layers)
-
- if args.decoder_normalize_before:
- self.layer_norm = LayerNorm(embed_dim)
- else:
- self.layer_norm = None
-
- self.project_out_dim = (
- Linear(embed_dim, self.output_embed_dim, bias=False)
- if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights
- else None
- )
-
- self.adaptive_softmax = None
- self.output_projection = output_projection
- if self.output_projection is None:
- self.build_output_projection(args, dictionary, embed_tokens)
-
- token_bucket_size = args.token_bucket_size
- token_num_rel_dis = 2 * token_bucket_size - 1
- token_rp_bucket = make_token_bucket_position(token_bucket_size)
- self.token_rel_pos_table_list = nn.ModuleList(
- [Embedding(token_num_rel_dis, self.num_attention_heads, zero_init=True) for _ in range(args.decoder_layers)]
- )
-
- image_bucket_size = args.image_bucket_size
- image_num_rel_dis = (2 * image_bucket_size - 1) * (2 * image_bucket_size - 1) + 3
- image_rp_bucket = make_image_bucket_position(image_bucket_size, image_num_rel_dis)
- image_position_idx = torch.arange(self.window_size).unsqueeze(0).expand(self.window_size, self.window_size) + \
- torch.arange(self.window_size).unsqueeze(1) * image_bucket_size + 1
- image_position_idx = torch.cat([torch.tensor([0]), image_position_idx.view(-1)])
- image_position_idx = torch.cat([image_position_idx, torch.tensor([1024] * 768)])
- self.image_rel_pos_table_list = nn.ModuleList(
- [Embedding(image_num_rel_dis, self.num_attention_heads, zero_init=True) for _ in range(args.decoder_layers)]
- )
-
- self.register_buffer("token_rp_bucket", token_rp_bucket)
- self.register_buffer("image_rp_bucket", image_rp_bucket)
- self.register_buffer("image_position_idx", image_position_idx)
- self.entangle_position_embedding = args.entangle_position_embedding
-
- def build_output_projection(self, args, dictionary, embed_tokens):
- if args.adaptive_softmax_cutoff is not None:
- self.adaptive_softmax = AdaptiveSoftmax(
- len(dictionary),
- self.output_embed_dim,
- utils.eval_str_list(args.adaptive_softmax_cutoff, type=int),
- dropout=args.adaptive_softmax_dropout,
- adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
- factor=args.adaptive_softmax_factor,
- tie_proj=args.tie_adaptive_proj,
- )
- elif self.share_input_output_embed:
- self.output_projection = nn.Linear(
- self.embed_tokens.weight.shape[1],
- self.embed_tokens.weight.shape[0],
- bias=False,
- )
- self.output_projection.weight = self.embed_tokens.weight
- else:
- self.output_projection = nn.Linear(
- self.output_embed_dim, len(dictionary), bias=False
- )
- nn.init.normal_(
- self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5
- )
- num_base_layers = getattr(args, "base_layers", 0)
- for i in range(num_base_layers):
- self.layers.insert(((i+1) * args.decoder_layers) // (num_base_layers + 1), BaseLayer(args))
-
- def build_decoder_layer(self, args, no_encoder_attn=False, drop_path_rate=0.0):
- layer = TransformerDecoderLayer(args, no_encoder_attn, drop_path_rate=drop_path_rate)
- checkpoint = getattr(args, "checkpoint_activations", False)
- if checkpoint:
- offload_to_cpu = getattr(args, "offload_activations", False)
- layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
- # if we are checkpointing, enforce that FSDP always wraps the
- # checkpointed layer, regardless of layer size
- min_params_to_wrap = (
- getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
- if not checkpoint else 0
- )
- layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
- return layer
-
- def get_rel_pos_bias(self, x, idx):
- seq_len = x.size(1)
- rp_bucket = self.token_rp_bucket[:seq_len, :seq_len]
- values = F.embedding(rp_bucket, self.token_rel_pos_table_list[idx].weight)
- values = values.permute([2, 0, 1])
- return values.contiguous()
-
- def get_image_rel_pos_bias(self, x, idx):
- seq_len = x.size(1)
- image_position_idx = self.image_position_idx[:seq_len]
- rp_bucket = self.image_rp_bucket[image_position_idx][:, image_position_idx]
- values = F.embedding(rp_bucket, self.image_rel_pos_table_list[idx].weight)
- values = values.permute(2, 0, 1)
- return values
-
- def get_pos_info(self, tokens, tgt_pos_embed, src_pos_embed=None, use_image=False):
- batch_size = tokens.size(0)
- tgt_len = tokens.size(1)
- tgt_pos_embed = self.image_pos_ln(tgt_pos_embed) if use_image else self.pos_ln(tgt_pos_embed)
- if src_pos_embed is not None:
- src_len = src_pos_embed.size(1)
- pos_q = self.cross_pos_q_linear(tgt_pos_embed).view(
- batch_size, tgt_len, self.num_attention_heads, -1
- ).transpose(1, 2) * self.pos_scaling
- pos_k = self.cross_pos_k_linear(src_pos_embed).view(
- batch_size, src_len, self.num_attention_heads, -1
- ).transpose(1, 2)
- else:
- src_len = tgt_pos_embed.size(1)
- pos_q = self.self_pos_q_linear(tgt_pos_embed).view(
- batch_size, tgt_len, self.num_attention_heads, -1
- ).transpose(1, 2) * self.pos_scaling
- pos_k = self.self_pos_k_linear(tgt_pos_embed).view(
- batch_size, src_len, self.num_attention_heads, -1
- ).transpose(1, 2)
- abs_pos_bias = torch.matmul(pos_q, pos_k.transpose(2, 3))
- return abs_pos_bias
-
- def forward(
- self,
- prev_output_tokens,
- code_masks: Optional[torch.Tensor] = None,
- encoder_out: Optional[Dict[str, List[Tensor]]] = None,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- features_only: bool = False,
- full_context_alignment: bool = False,
- alignment_layer: Optional[int] = None,
- alignment_heads: Optional[int] = None,
- src_lengths: Optional[Any] = None,
- return_all_hiddens: bool = False,
- ):
- """
- Args:
- prev_output_tokens (LongTensor): previous decoder outputs of shape
- `(batch, tgt_len)`, for teacher forcing
- encoder_out (optional): output from the encoder, used for
- encoder-side attention, should be of size T x B x C
- incremental_state (dict): dictionary used for storing state during
- :ref:`Incremental decoding`
- features_only (bool, optional): only return features without
- applying output layer (default: False).
- full_context_alignment (bool, optional): don't apply
- auto-regressive mask to self-attention (default: False).
-
- Returns:
- tuple:
- - the decoder's output of shape `(batch, tgt_len, vocab)`
- - a dictionary with any model-specific outputs
- """
-
- x, extra = self.extract_features(
- prev_output_tokens,
- code_masks=code_masks,
- encoder_out=encoder_out,
- incremental_state=incremental_state,
- full_context_alignment=full_context_alignment,
- alignment_layer=alignment_layer,
- alignment_heads=alignment_heads,
- )
-
- if not features_only:
- x = self.output_layer(x)
- return x, extra
-
- def extract_features(
- self,
- prev_output_tokens,
- code_masks: Optional[torch.Tensor],
- encoder_out: Optional[Dict[str, List[Tensor]]],
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- full_context_alignment: bool = False,
- alignment_layer: Optional[int] = None,
- alignment_heads: Optional[int] = None,
- ):
- return self.extract_features_scriptable(
- prev_output_tokens,
- code_masks,
- encoder_out,
- incremental_state,
- full_context_alignment,
- alignment_layer,
- alignment_heads,
- )
-
- """
- A scriptable subclass of this class has an extract_features method and calls
- super().extract_features, but super() is not supported in torchscript. A copy of
- this function is made to be used in the subclass instead.
- """
-
- def extract_features_scriptable(
- self,
- prev_output_tokens,
- code_masks: Optional[torch.Tensor],
- encoder_out: Optional[Dict[str, List[Tensor]]],
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- full_context_alignment: bool = False,
- alignment_layer: Optional[int] = None,
- alignment_heads: Optional[int] = None,
- ):
- """
- Similar to *forward* but only return features.
-
- Includes several features from "Jointly Learning to Align and
- Translate with Transformer Models" (Garg et al., EMNLP 2019).
-
- Args:
- full_context_alignment (bool, optional): don't apply
- auto-regressive mask to self-attention (default: False).
- alignment_layer (int, optional): return mean alignment over
- heads at this layer (default: last layer).
- alignment_heads (int, optional): only average alignment over
- this many heads (default: all heads).
-
- Returns:
- tuple:
- - the decoder's features of shape `(batch, tgt_len, embed_dim)`
- - a dictionary with any model-specific outputs
- """
- bs, slen = prev_output_tokens.size()
- if alignment_layer is None:
- alignment_layer = self.num_layers - 1
-
- enc: Optional[Tensor] = None
- padding_mask: Optional[Tensor] = None
- if encoder_out is not None and len(encoder_out["encoder_out"]) > 0:
- enc = encoder_out["encoder_out"][0]
- assert (
- enc.size()[1] == bs
- ), f"Expected enc.shape == (t, {bs}, c) got {enc.shape}"
- if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0:
- padding_mask = encoder_out["encoder_padding_mask"][0]
-
- bsz, tgt_len = prev_output_tokens.shape
- token_position_idx = utils.new_arange(prev_output_tokens)
- tgt_pos_embed = self.embed_positions(token_position_idx)
- if code_masks is not None and torch.any(code_masks):
- image_position_idx = self.image_position_idx[:prev_output_tokens.size(1)].unsqueeze(0).expand(bsz, tgt_len)
- tgt_pos_embed[code_masks] = self.embed_image_positions(image_position_idx)[code_masks]
-
- # self attn position bias
- self_abs_pos_bias = self.get_pos_info(prev_output_tokens, tgt_pos_embed, use_image=False)
- if code_masks is not None and torch.any(code_masks):
- self_image_abs_pos_bias = self.get_pos_info(prev_output_tokens, tgt_pos_embed, use_image=True)
- self_abs_pos_bias[code_masks] = self_image_abs_pos_bias[code_masks]
- # cross attn position bias
- src_pos_embed = encoder_out['position_embeddings'][0]
- cross_abs_pos_bias = self.get_pos_info(prev_output_tokens, tgt_pos_embed, src_pos_embed=src_pos_embed)
- if code_masks is not None and torch.any(code_masks):
- cross_image_abs_pos_bias = self.get_pos_info(prev_output_tokens, tgt_pos_embed, src_pos_embed=src_pos_embed, use_image=True)
- cross_abs_pos_bias[code_masks] = cross_image_abs_pos_bias[code_masks]
- cross_abs_pos_bias = cross_abs_pos_bias.reshape(-1, *cross_abs_pos_bias.size()[-2:])
-
- all_prev_output_tokens = prev_output_tokens.clone()
- if incremental_state is not None:
- prev_output_tokens = prev_output_tokens[:, -1:]
- cross_abs_pos_bias = cross_abs_pos_bias[:, -1:, :]
- tgt_pos_embed = tgt_pos_embed[:, -1:, :]
-
- # embed tokens and positions
- x = self.embed_scale * self.embed_tokens(prev_output_tokens)
-
- if self.quant_noise is not None:
- x = self.quant_noise(x)
-
- if self.project_in_dim is not None:
- x = self.project_in_dim(x)
-
- if self.entangle_position_embedding is not None and not self.args.disable_entangle:
- x += tgt_pos_embed
-
- if self.layernorm_embedding is not None:
- if code_masks is None or not code_masks.any() or not getattr(self, "code_layernorm_embedding", False):
- x = self.layernorm_embedding(x)
- elif code_masks is not None and code_masks.all():
- x = self.code_layernorm_embedding(x)
- else:
- x[~code_masks] = self.layernorm_embedding(x[~code_masks])
- x[code_masks] = self.code_layernorm_embedding(x[code_masks])
-
- x = self.dropout_module(x)
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- self_attn_padding_mask: Optional[Tensor] = None
- if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
- self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
-
- # decoder layers
- attn: Optional[Tensor] = None
- inner_states: List[Optional[Tensor]] = [x]
- for idx, layer in enumerate(self.layers):
- if incremental_state is None and not full_context_alignment:
- self_attn_mask = self.buffered_future_mask(x)
- else:
- self_attn_mask = None
-
- self_attn_bias = self_abs_pos_bias.clone()
- if code_masks is None or not code_masks.any():
- self_attn_bias += self.get_rel_pos_bias(all_prev_output_tokens, idx).unsqueeze(0)
- elif code_masks is not None and code_masks.all():
- self_attn_bias += self.get_image_rel_pos_bias(all_prev_output_tokens, idx).unsqueeze(0)
- else:
- self_attn_bias[~code_masks] += self.get_rel_pos_bias(all_prev_output_tokens, idx).unsqueeze(0)
- self_attn_bias[code_masks] += self.get_image_rel_pos_bias(all_prev_output_tokens, idx).unsqueeze(0)
- self_attn_bias = self_attn_bias.reshape(-1, *self_attn_bias.size()[-2:])
- if incremental_state is not None:
- self_attn_bias = self_attn_bias[:, -1:, :]
-
- x, layer_attn, _ = layer(
- x,
- enc,
- padding_mask,
- incremental_state,
- self_attn_mask=self_attn_mask,
- self_attn_padding_mask=self_attn_padding_mask,
- need_attn=bool((idx == alignment_layer)),
- need_head_weights=bool((idx == alignment_layer)),
- self_attn_bias=self_attn_bias,
- cross_attn_bias=cross_abs_pos_bias
- )
- inner_states.append(x)
- if layer_attn is not None and idx == alignment_layer:
- attn = layer_attn.float().to(x)
-
- if attn is not None:
- if alignment_heads is not None:
- attn = attn[:alignment_heads]
-
- # average probabilities over heads
- attn = attn.mean(dim=0)
-
- if self.layer_norm is not None:
- x = self.layer_norm(x)
-
- # T x B x C -> B x T x C
- x = x.transpose(0, 1)
-
- if self.project_out_dim is not None:
- x = self.project_out_dim(x)
-
- return x, {"attn": [attn], "inner_states": inner_states}
-
- def output_layer(self, features):
- """Project features to the vocabulary size."""
- if self.adaptive_softmax is None:
- # project back to size of vocabulary
- return self.output_projection(features)
- else:
- return features
-
- def max_positions(self):
- """Maximum output length supported by the decoder."""
- if self.embed_positions is None:
- return self.max_target_positions
- return self.max_target_positions
-
- def buffered_future_mask(self, tensor):
- dim = tensor.size(0)
- # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
- if (
- self._future_mask.size(0) == 0
- or (not self._future_mask.device == tensor.device)
- or self._future_mask.size(0) < dim
- ):
- self._future_mask = torch.triu(
- utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
- )
- self._future_mask = self._future_mask.to(tensor)
- return self._future_mask[:dim, :dim]
-
- def upgrade_state_dict_named(self, state_dict, name):
- """Upgrade a (possibly old) state dict for new versions of fairseq."""
- if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
- weights_key = "{}.embed_positions.weights".format(name)
- if weights_key in state_dict:
- del state_dict[weights_key]
- state_dict[
- "{}.embed_positions._float_tensor".format(name)
- ] = torch.FloatTensor(1)
-
- if f"{name}.output_projection.weight" not in state_dict:
- if self.share_input_output_embed:
- embed_out_key = f"{name}.embed_tokens.weight"
- else:
- embed_out_key = f"{name}.embed_out"
- if embed_out_key in state_dict:
- state_dict[f"{name}.output_projection.weight"] = state_dict[
- embed_out_key
- ]
- if not self.share_input_output_embed:
- del state_dict[embed_out_key]
-
- for i in range(self.num_layers):
- # update layer norms
- self.layers[i].upgrade_state_dict_named(
- state_dict, "{}.layers.{}".format(name, i)
- )
-
- # version_key = "{}.version".format(name)
- # if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
- # # earlier checkpoints did not normalize after the stack of layers
- # self.layer_norm = None
- # self.normalize = False
- # state_dict[version_key] = torch.Tensor([1])
-
- prefix = name + "." if name != "" else ""
- image_params = ["image_position_idx"]
- for image_param in image_params:
- state_dict[prefix + image_param] = self.state_dict()[image_param]
- for param_name, param_tensor in self.state_dict().items():
- if (prefix + param_name) not in state_dict:
- state_dict[prefix + param_name] = self.state_dict()[param_name]
-
- if len(state_dict["decoder.embed_image_positions.weight"]) < len(self.state_dict()["embed_image_positions.weight"]):
- num_posids_to_add = len(self.state_dict()["embed_image_positions.weight"]) - len(state_dict["decoder.embed_image_positions.weight"])
- embed_dim = state_dict["decoder.embed_image_positions.weight"].size(1)
- new_pos_embed_to_add = torch.zeros(num_posids_to_add, embed_dim)
- nn.init.normal_(new_pos_embed_to_add, mean=0, std=embed_dim ** -0.5)
- new_pos_embed_to_add = new_pos_embed_to_add.to(
- dtype=state_dict["decoder.embed_image_positions.weight"].dtype,
- )
- state_dict["decoder.embed_image_positions.weight"] = torch.cat(
- [state_dict["decoder.embed_image_positions.weight"], new_pos_embed_to_add]
- )
- return state_dict
-
-
-def Embedding(num_embeddings, embedding_dim, padding_idx=None, zero_init=False):
- m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
- nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
- if padding_idx is not None:
- nn.init.constant_(m.weight[padding_idx], 0)
- if zero_init:
- nn.init.constant_(m.weight, 0)
- return m
-
-
-def Linear(in_features, out_features, bias=True):
- m = nn.Linear(in_features, out_features, bias)
- nn.init.xavier_uniform_(m.weight)
- if bias:
- nn.init.constant_(m.bias, 0.0)
- return m
-
-
-@register_model_architecture("unify_transformer", "unify_transformer")
-def base_architecture(args):
- args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
- args.encoder_layers = getattr(args, "encoder_layers", 6)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
- args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
- args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
- args.decoder_ffn_embed_dim = getattr(
- args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
- )
- args.decoder_layers = getattr(args, "decoder_layers", 6)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
- args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
- args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
- args.attention_dropout = getattr(args, "attention_dropout", 0.0)
- args.activation_dropout = getattr(args, "activation_dropout", 0.0)
- args.activation_fn = getattr(args, "activation_fn", "relu")
- args.dropout = getattr(args, "dropout", 0.1)
- args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
- args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
- args.share_decoder_input_output_embed = getattr(
- args, "share_decoder_input_output_embed", False
- )
- args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
- args.no_token_positional_embeddings = getattr(
- args, "no_token_positional_embeddings", False
- )
- args.adaptive_input = getattr(args, "adaptive_input", False)
- args.no_cross_attention = getattr(args, "no_cross_attention", False)
- args.cross_self_attention = getattr(args, "cross_self_attention", False)
-
- args.decoder_output_dim = getattr(
- args, "decoder_output_dim", args.decoder_embed_dim
- )
- args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
-
- args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
- args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
- args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
- args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
- args.offload_activations = getattr(args, "offload_activations", False)
- if args.offload_activations:
- args.checkpoint_activations = True
- args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
- args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
- args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
- args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
- args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
- args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
- args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
diff --git a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/src/glow_tts/t2s_gradio.py b/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/src/glow_tts/t2s_gradio.py
deleted file mode 100644
index bd9acbe68761759ff259f4476bb3df57a75c78ff..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/src/glow_tts/t2s_gradio.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import gradio as gr
-from texttospeech import TextToMel, MelToWav
-
-text_to_mel = TextToMel(
- glow_model_dir="/path/to/glow-tts/checkpoint/dir", device="cuda"
-)
-mel_to_wav = MelToWav(hifi_model_dir="/path/to/glow-tts/checkpoint/dir", device="cuda")
-
-
-def run_tts(text):
- mel = text_to_mel.generate_mel(text)
- audio, sr = mel_to_wav.generate_wav(mel)
- return (sr, audio)
-
-
-# text = " सीआईएसएफ में उप-निरीक्षक महावीर प्रसाद गोदरा को मरणोपरांत 'शौर्य चक्र' से सम्मानित किया गया। "
-# run_tts(text)
-
-textbox = gr.inputs.Textbox(
- placeholder="Enter Telugu text here", default="", label="TTS"
-)
-op = gr.outputs.Audio(type="numpy", label=None)
-iface = gr.Interface(fn=run_tts, inputs=textbox, outputs=op)
-iface.launch(share=True)
diff --git a/spaces/Hexamind/iPADS/README.md b/spaces/Hexamind/iPADS/README.md
deleted file mode 100644
index 99e60304f468bc82d8cc2d3d2063df903450da00..0000000000000000000000000000000000000000
--- a/spaces/Hexamind/iPADS/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: IPADS
-emoji: 👁
-colorFrom: yellow
-colorTo: pink
-sdk: streamlit
-sdk_version: 1.21.0
-app_file: app.py
-pinned: false
-license: bsd-2-clause
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/HighCWu/GFPGAN-1.3/tests/test_gfpgan_arch.py b/spaces/HighCWu/GFPGAN-1.3/tests/test_gfpgan_arch.py
deleted file mode 100644
index cef14a435aa824a1b7c4baaf2d1fe0a2f6cc4441..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/GFPGAN-1.3/tests/test_gfpgan_arch.py
+++ /dev/null
@@ -1,203 +0,0 @@
-import torch
-
-from gfpgan.archs.gfpganv1_arch import FacialComponentDiscriminator, GFPGANv1, StyleGAN2GeneratorSFT
-from gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean, StyleGAN2GeneratorCSFT
-
-
-def test_stylegan2generatorsft():
- """Test arch: StyleGAN2GeneratorSFT."""
-
- # model init and forward (gpu)
- if torch.cuda.is_available():
- net = StyleGAN2GeneratorSFT(
- out_size=32,
- num_style_feat=512,
- num_mlp=8,
- channel_multiplier=1,
- resample_kernel=(1, 3, 3, 1),
- lr_mlp=0.01,
- narrow=1,
- sft_half=False).cuda().eval()
- style = torch.rand((1, 512), dtype=torch.float32).cuda()
- condition1 = torch.rand((1, 512, 8, 8), dtype=torch.float32).cuda()
- condition2 = torch.rand((1, 512, 16, 16), dtype=torch.float32).cuda()
- condition3 = torch.rand((1, 512, 32, 32), dtype=torch.float32).cuda()
- conditions = [condition1, condition1, condition2, condition2, condition3, condition3]
- output = net([style], conditions)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
- # -------------------- with return_latents ----------------------- #
- output = net([style], conditions, return_latents=True)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 1
- # check latent
- assert output[1][0].shape == (8, 512)
-
- # -------------------- with randomize_noise = False ----------------------- #
- output = net([style], conditions, randomize_noise=False)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
- # -------------------- with truncation = 0.5 and mixing----------------------- #
- output = net([style, style], conditions, truncation=0.5, truncation_latent=style)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
-
-def test_gfpganv1():
- """Test arch: GFPGANv1."""
-
- # model init and forward (gpu)
- if torch.cuda.is_available():
- net = GFPGANv1(
- out_size=32,
- num_style_feat=512,
- channel_multiplier=1,
- resample_kernel=(1, 3, 3, 1),
- decoder_load_path=None,
- fix_decoder=True,
- # for stylegan decoder
- num_mlp=8,
- lr_mlp=0.01,
- input_is_latent=False,
- different_w=False,
- narrow=1,
- sft_half=True).cuda().eval()
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
- output = net(img)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 3
- # check out_rgbs for intermediate loss
- assert output[1][0].shape == (1, 3, 8, 8)
- assert output[1][1].shape == (1, 3, 16, 16)
- assert output[1][2].shape == (1, 3, 32, 32)
-
- # -------------------- with different_w = True ----------------------- #
- net = GFPGANv1(
- out_size=32,
- num_style_feat=512,
- channel_multiplier=1,
- resample_kernel=(1, 3, 3, 1),
- decoder_load_path=None,
- fix_decoder=True,
- # for stylegan decoder
- num_mlp=8,
- lr_mlp=0.01,
- input_is_latent=False,
- different_w=True,
- narrow=1,
- sft_half=True).cuda().eval()
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
- output = net(img)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 3
- # check out_rgbs for intermediate loss
- assert output[1][0].shape == (1, 3, 8, 8)
- assert output[1][1].shape == (1, 3, 16, 16)
- assert output[1][2].shape == (1, 3, 32, 32)
-
-
-def test_facialcomponentdiscriminator():
- """Test arch: FacialComponentDiscriminator."""
-
- # model init and forward (gpu)
- if torch.cuda.is_available():
- net = FacialComponentDiscriminator().cuda().eval()
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
- output = net(img)
- assert len(output) == 2
- assert output[0].shape == (1, 1, 8, 8)
- assert output[1] is None
-
- # -------------------- return intermediate features ----------------------- #
- output = net(img, return_feats=True)
- assert len(output) == 2
- assert output[0].shape == (1, 1, 8, 8)
- assert len(output[1]) == 2
- assert output[1][0].shape == (1, 128, 16, 16)
- assert output[1][1].shape == (1, 256, 8, 8)
-
-
-def test_stylegan2generatorcsft():
- """Test arch: StyleGAN2GeneratorCSFT."""
-
- # model init and forward (gpu)
- if torch.cuda.is_available():
- net = StyleGAN2GeneratorCSFT(
- out_size=32, num_style_feat=512, num_mlp=8, channel_multiplier=1, narrow=1, sft_half=False).cuda().eval()
- style = torch.rand((1, 512), dtype=torch.float32).cuda()
- condition1 = torch.rand((1, 512, 8, 8), dtype=torch.float32).cuda()
- condition2 = torch.rand((1, 512, 16, 16), dtype=torch.float32).cuda()
- condition3 = torch.rand((1, 512, 32, 32), dtype=torch.float32).cuda()
- conditions = [condition1, condition1, condition2, condition2, condition3, condition3]
- output = net([style], conditions)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
- # -------------------- with return_latents ----------------------- #
- output = net([style], conditions, return_latents=True)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 1
- # check latent
- assert output[1][0].shape == (8, 512)
-
- # -------------------- with randomize_noise = False ----------------------- #
- output = net([style], conditions, randomize_noise=False)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
- # -------------------- with truncation = 0.5 and mixing----------------------- #
- output = net([style, style], conditions, truncation=0.5, truncation_latent=style)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
-
-def test_gfpganv1clean():
- """Test arch: GFPGANv1Clean."""
-
- # model init and forward (gpu)
- if torch.cuda.is_available():
- net = GFPGANv1Clean(
- out_size=32,
- num_style_feat=512,
- channel_multiplier=1,
- decoder_load_path=None,
- fix_decoder=True,
- # for stylegan decoder
- num_mlp=8,
- input_is_latent=False,
- different_w=False,
- narrow=1,
- sft_half=True).cuda().eval()
-
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
- output = net(img)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 3
- # check out_rgbs for intermediate loss
- assert output[1][0].shape == (1, 3, 8, 8)
- assert output[1][1].shape == (1, 3, 16, 16)
- assert output[1][2].shape == (1, 3, 32, 32)
-
- # -------------------- with different_w = True ----------------------- #
- net = GFPGANv1Clean(
- out_size=32,
- num_style_feat=512,
- channel_multiplier=1,
- decoder_load_path=None,
- fix_decoder=True,
- # for stylegan decoder
- num_mlp=8,
- input_is_latent=False,
- different_w=True,
- narrow=1,
- sft_half=True).cuda().eval()
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
- output = net(img)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 3
- # check out_rgbs for intermediate loss
- assert output[1][0].shape == (1, 3, 8, 8)
- assert output[1][1].shape == (1, 3, 16, 16)
- assert output[1][2].shape == (1, 3, 32, 32)
diff --git a/spaces/Hila/RobustViT/SegmentationTest/utils/__init__.py b/spaces/Hila/RobustViT/SegmentationTest/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/HuggingFaceH4/human_eval_llm_leaderboard/app.py b/spaces/HuggingFaceH4/human_eval_llm_leaderboard/app.py
deleted file mode 100644
index b513336622a63da9f7e81eecdb6c1590639261d5..0000000000000000000000000000000000000000
--- a/spaces/HuggingFaceH4/human_eval_llm_leaderboard/app.py
+++ /dev/null
@@ -1,170 +0,0 @@
-import json
-import os
-from datetime import datetime, timezone
-
-
-import gradio as gr
-import numpy as np
-import pandas as pd
-from apscheduler.schedulers.background import BackgroundScheduler
-from huggingface_hub import HfApi
-
-from src.assets.text_content import *
-from src.elo_leaderboard.load_results import get_elo_plots, get_elo_results_dicts
-from src.assets.css_html_js import custom_css, get_window_url_params # left in case you need them
-from src.utils_display import EloEvalColumn, fields, styled_error, styled_warning, styled_message
-from src.init import load_all_info_from_hub
-
-# clone / pull the lmeh eval data
-H4_TOKEN = os.environ.get("H4_TOKEN", None)
-HUMAN_EVAL_REPO = "HuggingFaceH4/scale-human-eval"
-GPT_4_EVAL_REPO = "HuggingFaceH4/open_llm_leaderboard_oai_evals"
-IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
-ADD_PLOTS = False
-
-EVAL_REQUESTS_PATH = "auto_evals/eval_requests"
-
-api = HfApi()
-
-
-def restart_space():
- api.restart_space(
- repo_id="HuggingFaceH4/open_llm_leaderboard", token=H4_TOKEN
- )
-
-human_eval_repo, gpt_4_eval_repo = load_all_info_from_hub(HUMAN_EVAL_REPO, GPT_4_EVAL_REPO)
-
-ELO_COLS = [c.name for c in fields(EloEvalColumn)]
-ELO_TYPES = [c.type for c in fields(EloEvalColumn)]
-ELO_SORT_COL = EloEvalColumn.gpt4.name
-
-
-def has_no_nan_values(df, columns):
- return df[columns].notna().all(axis=1)
-
-
-def has_nan_values(df, columns):
- return df[columns].isna().any(axis=1)
-
-
-def get_elo_leaderboard(df_instruct, df_code_instruct, tie_allowed=False):
- if human_eval_repo:
- print("Pulling human_eval_repo changes")
- human_eval_repo.git_pull()
-
- all_data = get_elo_results_dicts(df_instruct, df_code_instruct, tie_allowed)
- dataframe = pd.DataFrame.from_records(all_data)
- dataframe = dataframe.sort_values(by=ELO_SORT_COL, ascending=False)
- dataframe = dataframe[ELO_COLS]
- return dataframe
-
-
-def get_elo_elements():
- df_instruct = pd.read_json("human_evals/without_code.json")
- df_code_instruct = pd.read_json("human_evals/with_code.json")
-
- elo_leaderboard = get_elo_leaderboard(
- df_instruct, df_code_instruct, tie_allowed=False
- )
- elo_leaderboard_with_tie_allowed = get_elo_leaderboard(
- df_instruct, df_code_instruct, tie_allowed=True
- )
- plot_1, plot_2, plot_3, plot_4 = get_elo_plots(
- df_instruct, df_code_instruct, tie_allowed=False
- )
-
- return (
- elo_leaderboard,
- elo_leaderboard_with_tie_allowed,
- plot_1,
- plot_2,
- plot_3,
- plot_4,
- )
-
-(
- elo_leaderboard,
- elo_leaderboard_with_tie_allowed,
- plot_1,
- plot_2,
- plot_3,
- plot_4,
-) = get_elo_elements()
-
-
-demo = gr.Blocks(css=custom_css)
-with demo:
- gr.HTML(TITLE)
- with gr.Row():
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
-
- with gr.Column():
- with gr.Row():
- with gr.Column(scale=2):
- gr.Markdown(HUMAN_GPT_EVAL_TEXT, elem_classes="markdown-text")
- with gr.Column(scale=1):
- gr.Image(
- "src/assets/scale-hf-logo.png", elem_id="scale-logo", show_label=False
- )
- gr.Markdown("## No tie allowed")
- elo_leaderboard_table = gr.components.Dataframe(
- value=elo_leaderboard,
- headers=ELO_COLS,
- datatype=ELO_TYPES,
- max_rows=5,
- )
-
- gr.Markdown("## Tie allowed*")
- elo_leaderboard_table_with_tie_allowed = gr.components.Dataframe(
- value=elo_leaderboard_with_tie_allowed,
- headers=ELO_COLS,
- datatype=ELO_TYPES,
- max_rows=5,
- )
-
- gr.Markdown(
- "\* Results when the scores of 4 and 5 were treated as ties.",
- elem_classes="markdown-text",
- )
-
- gr.Markdown(
- "Let us know in [this discussion](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/65) which models we should add!",
- elem_id="models-to-add-text",
- )
-
- if ADD_PLOTS:
- with gr.Box():
- visualization_title = gr.HTML(VISUALIZATION_TITLE)
- with gr.Row():
- with gr.Column():
- gr.Markdown(f"#### Figure 1: {PLOT_1_TITLE}")
- plot_1 = gr.Plot(plot_1, show_label=False)
- with gr.Column():
- gr.Markdown(f"#### Figure 2: {PLOT_2_TITLE}")
- plot_2 = gr.Plot(plot_2, show_label=False)
- with gr.Row():
- with gr.Column():
- gr.Markdown(f"#### Figure 3: {PLOT_3_TITLE}")
- plot_3 = gr.Plot(plot_3, show_label=False)
- with gr.Column():
- gr.Markdown(f"#### Figure 4: {PLOT_4_TITLE}")
- plot_4 = gr.Plot(plot_4, show_label=False)
-
- with gr.Row():
- with gr.Column():
- with gr.Accordion("📙 Citation", open=False):
- citation_button = gr.Textbox(
- value=CITATION_BUTTON_TEXT,
- label=CITATION_BUTTON_LABEL,
- elem_id="citation-button",
- ).style(show_copy_button=True)
- with gr.Column():
- with gr.Accordion("✨ CHANGELOG", open=False):
- changelog = gr.Markdown(CHANGELOG_TEXT, elem_id="changelog-text")
-
-
-
-scheduler = BackgroundScheduler()
-scheduler.add_job(restart_space, "interval", seconds=3600)
-scheduler.start()
-demo.queue(concurrency_count=40).launch()
diff --git a/spaces/Ibtehaj10/cheating-detection/person_tracking.py b/spaces/Ibtehaj10/cheating-detection/person_tracking.py
deleted file mode 100644
index 516088a3f8b0b4567dbee7303047d6ce65ac066c..0000000000000000000000000000000000000000
--- a/spaces/Ibtehaj10/cheating-detection/person_tracking.py
+++ /dev/null
@@ -1,542 +0,0 @@
-import cv2
-import datetime
-import imutils
-import numpy as np
-from centroidtracker import CentroidTracker
-import pandas as pd
-import torch
-import streamlit as st
-import mediapipe as mp
-import cv2 as cv
-import numpy as np
-import tempfile
-import time
-from PIL import Image
-import pandas as pd
-import torch
-import base64
-import streamlit.components.v1 as components
-import csv
-import pickle
-from pathlib import Path
-import streamlit_authenticator as stauth
-import os
-import csv
-# x-x-x-x-x-x-x-x-x-x-x-x-x-x LOGIN FORM x-x-x-x-x-x-x-x-x
-
-
-import streamlit as st
-import pandas as pd
-import hashlib
-import sqlite3
-#
-
-import pickle
-from pathlib import Path
-import streamlit_authenticator as stauth
-# print("Done !!!")
-
-data = ["student Count",'Date','Id','Mobile','Watch']
-with open('final.csv', 'w') as file:
- writer = csv.writer(file)
- writer.writerow(data)
-
-
-l1 = []
-l2 = []
-if st.button('signup'):
-
-
- usernames = st.text_input('Username')
- pwd = st.text_input('Password')
- l1.append(usernames)
- l2.append(pwd)
-
- names = ["dmin", "ser"]
- if st.button("signupsss"):
- username =l1
-
- password =l2
-
- hashed_passwords =stauth.Hasher(password).generate()
-
- file_path = Path(__file__).parent / "hashed_pw.pkl"
-
- with file_path.open("wb") as file:
- pickle.dump(hashed_passwords, file)
-
-
-elif st.button('Logins'):
- names = ['dmin', 'ser']
-
- username =l1
-
- file_path = Path(__file__).parent / 'hashed_pw.pkl'
-
- with file_path.open('rb') as file:
- hashed_passwords = pickle.load(file)
-
- authenticator = stauth.Authenticate(names,username,hashed_passwords,'Cheating Detection','abcdefg',cookie_expiry_days=180)
-
- name,authentication_status,username= authenticator.login('Login','main')
-
-
- if authentication_status == False:
- st.error('Username/Password is incorrect')
-
- if authentication_status == None:
- st.error('Please enter a username and password')
-
- if authentication_status:
- date_time = time.strftime("%b %d %Y %-I:%M %p")
- date = date_time.split()
- dates = date[0:3]
- times = date[3:5]
- # x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-xAPPLICACTION -x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x
-
- def non_max_suppression_fast(boxes, overlapThresh):
- try:
- if len(boxes) == 0:
- return []
-
- if boxes.dtype.kind == "i":
- boxes = boxes.astype("float")
-
- pick = []
-
- x1 = boxes[:, 0]
- y1 = boxes[:, 1]
- x2 = boxes[:, 2]
- y2 = boxes[:, 3]
-
- area = (x2 - x1 + 1) * (y2 - y1 + 1)
- idxs = np.argsort(y2)
-
- while len(idxs) > 0:
- last = len(idxs) - 1
- i = idxs[last]
- pick.append(i)
-
- xx1 = np.maximum(x1[i], x1[idxs[:last]])
- yy1 = np.maximum(y1[i], y1[idxs[:last]])
- xx2 = np.minimum(x2[i], x2[idxs[:last]])
- yy2 = np.minimum(y2[i], y2[idxs[:last]])
-
- w = np.maximum(0, xx2 - xx1 + 1)
- h = np.maximum(0, yy2 - yy1 + 1)
-
- overlap = (w * h) / area[idxs[:last]]
-
- idxs = np.delete(idxs, np.concatenate(([last],
- np.where(overlap > overlapThresh)[0])))
-
- return boxes[pick].astype("int")
- except Exception as e:
- print("Exception occurred in non_max_suppression : {}".format(e))
-
-
- protopath = "MobileNetSSD_deploy.prototxt"
- modelpath = "MobileNetSSD_deploy.caffemodel"
- detector = cv2.dnn.readNetFromCaffe(prototxt=protopath, caffeModel=modelpath)
- # Only enable it if you are using OpenVino environment
- # detector.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
- # detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
-
-
- CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
- "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
- "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
- "sofa", "train", "tvmonitor"]
-
- tracker = CentroidTracker(maxDisappeared=80, maxDistance=90)
-
- st.markdown(
- """
-
- """,
- unsafe_allow_html=True,
- )
- hide_streamlit_style = """
-
- """
- st.markdown(hide_streamlit_style, unsafe_allow_html=True)
-
-
- # Resize Images to fit Container
- @st.cache()
- # Get Image Dimensions
- def image_resize(image, width=None, height=None, inter=cv.INTER_AREA):
- dim = None
- (h,w) = image.shape[:2]
-
- if width is None and height is None:
- return image
-
- if width is None:
- r = width/float(w)
- dim = (int(w*r),height)
-
- else:
- r = width/float(w)
- dim = width, int(h*r)
-
- # Resize image
- resized = cv.resize(image,dim,interpolation=inter)
-
- return resized
-
- # About Page
- authenticator.logout('Logout')
- app_mode = st.sidebar.selectbox(
- 'App Mode',
- ['About','Application']
- )
- if app_mode == 'About':
- st.title('About Product And Team')
- st.markdown('''
- Imran Bhai Project
- ''')
- st.markdown(
- """
-
- """,
- unsafe_allow_html=True,
- )
-
-
-
-
- elif app_mode == 'Application':
-
- st.set_option('deprecation.showfileUploaderEncoding', False)
-
- use_webcam = st.button('Use Webcam')
- # record = st.sidebar.checkbox("Record Video")
-
- # if record:
- # st.checkbox('Recording', True)
-
- # drawing_spec = mp.solutions.drawing_utils.DrawingSpec(thickness=2, circle_radius=1)
-
- # st.sidebar.markdown('---')
-
- # ## Add Sidebar and Window style
- # st.markdown(
- # """
- #
- # """,
- # unsafe_allow_html=True,
- # )
-
- # max_faces = st.sidebar.number_input('Maximum Number of Faces', value=5, min_value=1)
- # st.sidebar.markdown('---')
- # detection_confidence = st.sidebar.slider('Min Detection Confidence', min_value=0.0,max_value=1.0,value=0.5)
- # tracking_confidence = st.sidebar.slider('Min Tracking Confidence', min_value=0.0,max_value=1.0,value=0.5)
- # st.sidebar.markdown('---')
-
- ## Get Video
- stframe = st.empty()
- video_file_buffer = st.file_uploader("Upload a Video", type=['mp4', 'mov', 'avi', 'asf', 'm4v'])
- temp_file = tempfile.NamedTemporaryFile(delete=False)
-
-
- if not video_file_buffer:
- if use_webcam:
- video = cv.VideoCapture(0)
- else:
- try:
- video = cv.VideoCapture(1)
- temp_file.name = video
- except:
- pass
- else:
- temp_file.write(video_file_buffer.read())
- video = cv.VideoCapture(temp_file.name)
-
- width = int(video.get(cv.CAP_PROP_FRAME_WIDTH))
- height = int(video.get(cv.CAP_PROP_FRAME_HEIGHT))
- fps_input = int(video.get(cv.CAP_PROP_FPS))
-
- ## Recording
- codec = cv.VideoWriter_fourcc('a','v','c','1')
- out = cv.VideoWriter('output1.mp4', codec, fps_input, (width,height))
-
- st.sidebar.text('Input Video')
- # st.sidebar.video(temp_file.name)
-
- fps = 0
- i = 0
-
- drawing_spec = mp.solutions.drawing_utils.DrawingSpec(thickness=2, circle_radius=1)
-
- kpil, kpil2, kpil3,kpil4,kpil5, kpil6 = st.columns(6)
-
- with kpil:
- st.markdown('**Frame Rate**')
- kpil_text = st.markdown('0')
-
- with kpil2:
- st.markdown('**detection ID**')
- kpil2_text = st.markdown('0')
-
- with kpil3:
- st.markdown('**Mobile**')
- kpil3_text = st.markdown('0')
- with kpil4:
- st.markdown('**Watch**')
- kpil4_text = st.markdown('0')
- with kpil5:
- st.markdown('**Count**')
- kpil5_text = st.markdown('0')
- with kpil6:
- st.markdown('**Img Res**')
- kpil6_text = st.markdown('0')
-
-
-
- st.markdown('', unsafe_allow_html=True)
- # try:
- def main():
- db = {}
-
- # cap = cv2.VideoCapture('//home//anas//PersonTracking//WebUI//movement.mp4')
- path='/usr/local/lib/python3.10/dist-packages/yolo0vs5/yolov5s-int8.tflite'
- #count=0
- custom = 'yolov5s'
-
- model = torch.hub.load('/usr/local/lib/python3.10/dist-packages/yolovs5', custom, path,source='local',force_reload=True)
-
- b=model.names[0] = 'person'
- mobile = model.names[67] = 'cell phone'
- watch = model.names[75] = 'clock'
-
- fps_start_time = datetime.datetime.now()
- fps = 0
- size=416
-
- count=0
- counter=0
-
-
- color=(0,0,255)
-
- cy1=250
- offset=6
-
-
- pt1 = (120, 100)
- pt2 = (980, 1150)
- color = (0, 255, 0)
-
- pt3 = (283, 103)
- pt4 = (1500, 1150)
-
- cy2 = 500
- color = (0, 255, 0)
- total_frames = 0
- prevTime = 0
- cur_frame = 0
- count=0
- counter=0
- fps_start_time = datetime.datetime.now()
- fps = 0
- total_frames = 0
- lpc_count = 0
- opc_count = 0
- object_id_list = []
- # success = True
- if st.button("Detect"):
- try:
- while video.isOpened():
-
- ret, frame = video.read()
- frame = imutils.resize(frame, width=600)
- total_frames = total_frames + 1
-
- (H, W) = frame.shape[:2]
-
- blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
-
- detector.setInput(blob)
- person_detections = detector.forward()
- rects = []
- for i in np.arange(0, person_detections.shape[2]):
- confidence = person_detections[0, 0, i, 2]
- if confidence > 0.5:
- idx = int(person_detections[0, 0, i, 1])
-
- if CLASSES[idx] != "person":
- continue
-
- person_box = person_detections[0, 0, i, 3:7] * np.array([W, H, W, H])
- (startX, startY, endX, endY) = person_box.astype("int")
- rects.append(person_box)
-
- boundingboxes = np.array(rects)
- boundingboxes = boundingboxes.astype(int)
- rects = non_max_suppression_fast(boundingboxes, 0.3)
-
- objects = tracker.update(rects)
- for (objectId, bbox) in objects.items():
- x1, y1, x2, y2 = bbox
- x1 = int(x1)
- y1 = int(y1)
- x2 = int(x2)
- y2 = int(y2)
-
- cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
- text = "ID: {}".format(objectId)
- # print(text)
- cv2.putText(frame, text, (x1, y1-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
- if objectId not in object_id_list:
- object_id_list.append(objectId)
- fps_end_time = datetime.datetime.now()
- time_diff = fps_end_time - fps_start_time
- if time_diff.seconds == 0:
- fps = 0.0
- else:
- fps = (total_frames / time_diff.seconds)
-
- fps_text = "FPS: {:.2f}".format(fps)
-
- cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
- lpc_count = len(objects)
- opc_count = len(object_id_list)
-
- lpc_txt = "LPC: {}".format(lpc_count)
- opc_txt = "OPC: {}".format(opc_count)
-
- count += 1
- if count % 4 != 0:
- continue
- # frame=cv.resize(frame, (600,500))
- # cv2.line(frame, pt1, pt2,color,2)
- # cv2.line(frame, pt3, pt4,color,2)
- results = model(frame,size)
- components = results.pandas().xyxy[0]
- for index, row in results.pandas().xyxy[0].iterrows():
- x1 = int(row['xmin'])
- y1 = int(row['ymin'])
- x2 = int(row['xmax'])
- y2 = int(row['ymax'])
- confidence = (row['confidence'])
- obj = (row['class'])
-
-
- # min':x1,'ymin':y1,'xmax':x2,'ymax':y2,'confidence':confidence,'Object':obj}
- # if lpc_txt is not None:
- # try:
- # db["student Count"] = [lpc_txt]
- # except:
- # db["student Count"] = ['N/A']
- if obj == 0:
- cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2)
- rectx1,recty1 = ((x1+x2)/2,(y1+y2)/2)
- rectcenter = int(rectx1),int(recty1)
- cx = rectcenter[0]
- cy = rectcenter[1]
- cv2.circle(frame,(cx,cy),3,(0,255,0),-1)
- cv2.putText(frame,str(b), (x1,y1), cv2.FONT_HERSHEY_PLAIN,2,(255,255,255),2)
-
- db["student Count"] = [lpc_txt]
- db['Date'] = [date_time]
- db['id'] = ['N/A']
- db['Mobile']=['N/A']
- db['Watch'] = ['N/A']
- if cy<(cy1+offset) and cy>(cy1-offset):
- DB = []
- counter+=1
- DB.append(counter)
-
- ff = DB[-1]
- fx = str(ff)
- # cv2.line(frame, pt1, pt2,(0, 0, 255),2)
- # if cy<(cy2+offset) and cy>(cy2-offset):
-
- # cv2.line(frame, pt3, pt4,(0, 0, 255),2)
- font = cv2.FONT_HERSHEY_TRIPLEX
- cv2.putText(frame,fx,(50, 50),font, 1,(0, 0, 255),2,cv2.LINE_4)
- cv2.putText(frame,"Movement",(70, 70),font, 1,(0, 0, 255),2,cv2.LINE_4)
- kpil2_text.write(f"
- All music is generated by Mubert API – www.mubert.com
-
-
- """
- )
- with gr.Group():
- with gr.Box():
- email = gr.Textbox(label="email")
- prompt = gr.Textbox(label="prompt")
- duration = gr.Slider(label="duration (seconds)", value=30)
- is_loop = gr.Checkbox(label="Generate loop")
- out = gr.Audio()
- result_msg = gr.Text(label="Result message")
- tags = gr.Text(label="Tags")
- btn = gr.Button("Submit").style(full_width=True)
-
- btn.click(fn=generate_track_by_prompt, inputs=[email, prompt, duration, is_loop], outputs=[out, result_msg, tags])
- gr.HTML('''
-
- ''')
-
-block.launch()
\ No newline at end of file
diff --git a/spaces/akhaliq/lama/saicinpainting/training/visualizers/colors.py b/spaces/akhaliq/lama/saicinpainting/training/visualizers/colors.py
deleted file mode 100644
index 9e9e39182c58cb06a1c5e97a7e6c497cc3388ebe..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/lama/saicinpainting/training/visualizers/colors.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import random
-import colorsys
-
-import numpy as np
-import matplotlib
-matplotlib.use('agg')
-import matplotlib.pyplot as plt
-from matplotlib.colors import LinearSegmentedColormap
-
-
-def generate_colors(nlabels, type='bright', first_color_black=False, last_color_black=True, verbose=False):
- # https://stackoverflow.com/questions/14720331/how-to-generate-random-colors-in-matplotlib
- """
- Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks
- :param nlabels: Number of labels (size of colormap)
- :param type: 'bright' for strong colors, 'soft' for pastel colors
- :param first_color_black: Option to use first color as black, True or False
- :param last_color_black: Option to use last color as black, True or False
- :param verbose: Prints the number of labels and shows the colormap. True or False
- :return: colormap for matplotlib
- """
- if type not in ('bright', 'soft'):
- print ('Please choose "bright" or "soft" for type')
- return
-
- if verbose:
- print('Number of labels: ' + str(nlabels))
-
- # Generate color map for bright colors, based on hsv
- if type == 'bright':
- randHSVcolors = [(np.random.uniform(low=0.0, high=1),
- np.random.uniform(low=0.2, high=1),
- np.random.uniform(low=0.9, high=1)) for i in range(nlabels)]
-
- # Convert HSV list to RGB
- randRGBcolors = []
- for HSVcolor in randHSVcolors:
- randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2]))
-
- if first_color_black:
- randRGBcolors[0] = [0, 0, 0]
-
- if last_color_black:
- randRGBcolors[-1] = [0, 0, 0]
-
- random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
-
- # Generate soft pastel colors, by limiting the RGB spectrum
- if type == 'soft':
- low = 0.6
- high = 0.95
- randRGBcolors = [(np.random.uniform(low=low, high=high),
- np.random.uniform(low=low, high=high),
- np.random.uniform(low=low, high=high)) for i in range(nlabels)]
-
- if first_color_black:
- randRGBcolors[0] = [0, 0, 0]
-
- if last_color_black:
- randRGBcolors[-1] = [0, 0, 0]
- random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
-
- # Display colorbar
- if verbose:
- from matplotlib import colors, colorbar
- from matplotlib import pyplot as plt
- fig, ax = plt.subplots(1, 1, figsize=(15, 0.5))
-
- bounds = np.linspace(0, nlabels, nlabels + 1)
- norm = colors.BoundaryNorm(bounds, nlabels)
-
- cb = colorbar.ColorbarBase(ax, cmap=random_colormap, norm=norm, spacing='proportional', ticks=None,
- boundaries=bounds, format='%1i', orientation=u'horizontal')
-
- return randRGBcolors, random_colormap
-
diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/distlib/util.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/distlib/util.py
deleted file mode 100644
index dd01849d997e5ae9dc9809295e29ceb871b14216..0000000000000000000000000000000000000000
--- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/distlib/util.py
+++ /dev/null
@@ -1,1932 +0,0 @@
-#
-# Copyright (C) 2012-2021 The Python Software Foundation.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-import codecs
-from collections import deque
-import contextlib
-import csv
-from glob import iglob as std_iglob
-import io
-import json
-import logging
-import os
-import py_compile
-import re
-import socket
-try:
- import ssl
-except ImportError: # pragma: no cover
- ssl = None
-import subprocess
-import sys
-import tarfile
-import tempfile
-import textwrap
-
-try:
- import threading
-except ImportError: # pragma: no cover
- import dummy_threading as threading
-import time
-
-from . import DistlibException
-from .compat import (string_types, text_type, shutil, raw_input, StringIO,
- cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
- splittype, HTTPHandler, BaseConfigurator, valid_ident,
- Container, configparser, URLError, ZipFile, fsdecode,
- unquote, urlparse)
-
-logger = logging.getLogger(__name__)
-
-#
-# Requirement parsing code as per PEP 508
-#
-
-IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
-VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
-COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
-MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
-OR = re.compile(r'^or\b\s*')
-AND = re.compile(r'^and\b\s*')
-NON_SPACE = re.compile(r'(\S+)\s*')
-STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
-
-
-def parse_marker(marker_string):
- """
- Parse a marker string and return a dictionary containing a marker expression.
-
- The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
- the expression grammar, or strings. A string contained in quotes is to be
- interpreted as a literal string, and a string not contained in quotes is a
- variable (such as os_name).
- """
- def marker_var(remaining):
- # either identifier, or literal string
- m = IDENTIFIER.match(remaining)
- if m:
- result = m.groups()[0]
- remaining = remaining[m.end():]
- elif not remaining:
- raise SyntaxError('unexpected end of input')
- else:
- q = remaining[0]
- if q not in '\'"':
- raise SyntaxError('invalid expression: %s' % remaining)
- oq = '\'"'.replace(q, '')
- remaining = remaining[1:]
- parts = [q]
- while remaining:
- # either a string chunk, or oq, or q to terminate
- if remaining[0] == q:
- break
- elif remaining[0] == oq:
- parts.append(oq)
- remaining = remaining[1:]
- else:
- m = STRING_CHUNK.match(remaining)
- if not m:
- raise SyntaxError('error in string literal: %s' % remaining)
- parts.append(m.groups()[0])
- remaining = remaining[m.end():]
- else:
- s = ''.join(parts)
- raise SyntaxError('unterminated string: %s' % s)
- parts.append(q)
- result = ''.join(parts)
- remaining = remaining[1:].lstrip() # skip past closing quote
- return result, remaining
-
- def marker_expr(remaining):
- if remaining and remaining[0] == '(':
- result, remaining = marker(remaining[1:].lstrip())
- if remaining[0] != ')':
- raise SyntaxError('unterminated parenthesis: %s' % remaining)
- remaining = remaining[1:].lstrip()
- else:
- lhs, remaining = marker_var(remaining)
- while remaining:
- m = MARKER_OP.match(remaining)
- if not m:
- break
- op = m.groups()[0]
- remaining = remaining[m.end():]
- rhs, remaining = marker_var(remaining)
- lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
- result = lhs
- return result, remaining
-
- def marker_and(remaining):
- lhs, remaining = marker_expr(remaining)
- while remaining:
- m = AND.match(remaining)
- if not m:
- break
- remaining = remaining[m.end():]
- rhs, remaining = marker_expr(remaining)
- lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
- return lhs, remaining
-
- def marker(remaining):
- lhs, remaining = marker_and(remaining)
- while remaining:
- m = OR.match(remaining)
- if not m:
- break
- remaining = remaining[m.end():]
- rhs, remaining = marker_and(remaining)
- lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
- return lhs, remaining
-
- return marker(marker_string)
-
-
-def parse_requirement(req):
- """
- Parse a requirement passed in as a string. Return a Container
- whose attributes contain the various parts of the requirement.
- """
- remaining = req.strip()
- if not remaining or remaining.startswith('#'):
- return None
- m = IDENTIFIER.match(remaining)
- if not m:
- raise SyntaxError('name expected: %s' % remaining)
- distname = m.groups()[0]
- remaining = remaining[m.end():]
- extras = mark_expr = versions = uri = None
- if remaining and remaining[0] == '[':
- i = remaining.find(']', 1)
- if i < 0:
- raise SyntaxError('unterminated extra: %s' % remaining)
- s = remaining[1:i]
- remaining = remaining[i + 1:].lstrip()
- extras = []
- while s:
- m = IDENTIFIER.match(s)
- if not m:
- raise SyntaxError('malformed extra: %s' % s)
- extras.append(m.groups()[0])
- s = s[m.end():]
- if not s:
- break
- if s[0] != ',':
- raise SyntaxError('comma expected in extras: %s' % s)
- s = s[1:].lstrip()
- if not extras:
- extras = None
- if remaining:
- if remaining[0] == '@':
- # it's a URI
- remaining = remaining[1:].lstrip()
- m = NON_SPACE.match(remaining)
- if not m:
- raise SyntaxError('invalid URI: %s' % remaining)
- uri = m.groups()[0]
- t = urlparse(uri)
- # there are issues with Python and URL parsing, so this test
- # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
- # always parse invalid URLs correctly - it should raise
- # exceptions for malformed URLs
- if not (t.scheme and t.netloc):
- raise SyntaxError('Invalid URL: %s' % uri)
- remaining = remaining[m.end():].lstrip()
- else:
-
- def get_versions(ver_remaining):
- """
- Return a list of operator, version tuples if any are
- specified, else None.
- """
- m = COMPARE_OP.match(ver_remaining)
- versions = None
- if m:
- versions = []
- while True:
- op = m.groups()[0]
- ver_remaining = ver_remaining[m.end():]
- m = VERSION_IDENTIFIER.match(ver_remaining)
- if not m:
- raise SyntaxError('invalid version: %s' % ver_remaining)
- v = m.groups()[0]
- versions.append((op, v))
- ver_remaining = ver_remaining[m.end():]
- if not ver_remaining or ver_remaining[0] != ',':
- break
- ver_remaining = ver_remaining[1:].lstrip()
- # Some packages have a trailing comma which would break things
- # See issue #148
- if not ver_remaining:
- break
- m = COMPARE_OP.match(ver_remaining)
- if not m:
- raise SyntaxError('invalid constraint: %s' % ver_remaining)
- if not versions:
- versions = None
- return versions, ver_remaining
-
- if remaining[0] != '(':
- versions, remaining = get_versions(remaining)
- else:
- i = remaining.find(')', 1)
- if i < 0:
- raise SyntaxError('unterminated parenthesis: %s' % remaining)
- s = remaining[1:i]
- remaining = remaining[i + 1:].lstrip()
- # As a special diversion from PEP 508, allow a version number
- # a.b.c in parentheses as a synonym for ~= a.b.c (because this
- # is allowed in earlier PEPs)
- if COMPARE_OP.match(s):
- versions, _ = get_versions(s)
- else:
- m = VERSION_IDENTIFIER.match(s)
- if not m:
- raise SyntaxError('invalid constraint: %s' % s)
- v = m.groups()[0]
- s = s[m.end():].lstrip()
- if s:
- raise SyntaxError('invalid constraint: %s' % s)
- versions = [('~=', v)]
-
- if remaining:
- if remaining[0] != ';':
- raise SyntaxError('invalid requirement: %s' % remaining)
- remaining = remaining[1:].lstrip()
-
- mark_expr, remaining = parse_marker(remaining)
-
- if remaining and remaining[0] != '#':
- raise SyntaxError('unexpected trailing data: %s' % remaining)
-
- if not versions:
- rs = distname
- else:
- rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
- return Container(name=distname, extras=extras, constraints=versions,
- marker=mark_expr, url=uri, requirement=rs)
-
-
-def get_resources_dests(resources_root, rules):
- """Find destinations for resources files"""
-
- def get_rel_path(root, path):
- # normalizes and returns a lstripped-/-separated path
- root = root.replace(os.path.sep, '/')
- path = path.replace(os.path.sep, '/')
- assert path.startswith(root)
- return path[len(root):].lstrip('/')
-
- destinations = {}
- for base, suffix, dest in rules:
- prefix = os.path.join(resources_root, base)
- for abs_base in iglob(prefix):
- abs_glob = os.path.join(abs_base, suffix)
- for abs_path in iglob(abs_glob):
- resource_file = get_rel_path(resources_root, abs_path)
- if dest is None: # remove the entry if it was here
- destinations.pop(resource_file, None)
- else:
- rel_path = get_rel_path(abs_base, abs_path)
- rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
- destinations[resource_file] = rel_dest + '/' + rel_path
- return destinations
-
-
-def in_venv():
- if hasattr(sys, 'real_prefix'):
- # virtualenv venvs
- result = True
- else:
- # PEP 405 venvs
- result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
- return result
-
-
-def get_executable():
-# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
-# changes to the stub launcher mean that sys.executable always points
-# to the stub on OS X
-# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
-# in os.environ):
-# result = os.environ['__PYVENV_LAUNCHER__']
-# else:
-# result = sys.executable
-# return result
- # Avoid normcasing: see issue #143
- # result = os.path.normcase(sys.executable)
- result = sys.executable
- if not isinstance(result, text_type):
- result = fsdecode(result)
- return result
-
-
-def proceed(prompt, allowed_chars, error_prompt=None, default=None):
- p = prompt
- while True:
- s = raw_input(p)
- p = prompt
- if not s and default:
- s = default
- if s:
- c = s[0].lower()
- if c in allowed_chars:
- break
- if error_prompt:
- p = '%c: %s\n%s' % (c, error_prompt, prompt)
- return c
-
-
-def extract_by_key(d, keys):
- if isinstance(keys, string_types):
- keys = keys.split()
- result = {}
- for key in keys:
- if key in d:
- result[key] = d[key]
- return result
-
-def read_exports(stream):
- if sys.version_info[0] >= 3:
- # needs to be a text stream
- stream = codecs.getreader('utf-8')(stream)
- # Try to load as JSON, falling back on legacy format
- data = stream.read()
- stream = StringIO(data)
- try:
- jdata = json.load(stream)
- result = jdata['extensions']['python.exports']['exports']
- for group, entries in result.items():
- for k, v in entries.items():
- s = '%s = %s' % (k, v)
- entry = get_export_entry(s)
- assert entry is not None
- entries[k] = entry
- return result
- except Exception:
- stream.seek(0, 0)
-
- def read_stream(cp, stream):
- if hasattr(cp, 'read_file'):
- cp.read_file(stream)
- else:
- cp.readfp(stream)
-
- cp = configparser.ConfigParser()
- try:
- read_stream(cp, stream)
- except configparser.MissingSectionHeaderError:
- stream.close()
- data = textwrap.dedent(data)
- stream = StringIO(data)
- read_stream(cp, stream)
-
- result = {}
- for key in cp.sections():
- result[key] = entries = {}
- for name, value in cp.items(key):
- s = '%s = %s' % (name, value)
- entry = get_export_entry(s)
- assert entry is not None
- #entry.dist = self
- entries[name] = entry
- return result
-
-
-def write_exports(exports, stream):
- if sys.version_info[0] >= 3:
- # needs to be a text stream
- stream = codecs.getwriter('utf-8')(stream)
- cp = configparser.ConfigParser()
- for k, v in exports.items():
- # TODO check k, v for valid values
- cp.add_section(k)
- for entry in v.values():
- if entry.suffix is None:
- s = entry.prefix
- else:
- s = '%s:%s' % (entry.prefix, entry.suffix)
- if entry.flags:
- s = '%s [%s]' % (s, ', '.join(entry.flags))
- cp.set(k, entry.name, s)
- cp.write(stream)
-
-
-@contextlib.contextmanager
-def tempdir():
- td = tempfile.mkdtemp()
- try:
- yield td
- finally:
- shutil.rmtree(td)
-
-@contextlib.contextmanager
-def chdir(d):
- cwd = os.getcwd()
- try:
- os.chdir(d)
- yield
- finally:
- os.chdir(cwd)
-
-
-@contextlib.contextmanager
-def socket_timeout(seconds=15):
- cto = socket.getdefaulttimeout()
- try:
- socket.setdefaulttimeout(seconds)
- yield
- finally:
- socket.setdefaulttimeout(cto)
-
-
-class cached_property(object):
- def __init__(self, func):
- self.func = func
- #for attr in ('__name__', '__module__', '__doc__'):
- # setattr(self, attr, getattr(func, attr, None))
-
- def __get__(self, obj, cls=None):
- if obj is None:
- return self
- value = self.func(obj)
- object.__setattr__(obj, self.func.__name__, value)
- #obj.__dict__[self.func.__name__] = value = self.func(obj)
- return value
-
-def convert_path(pathname):
- """Return 'pathname' as a name that will work on the native filesystem.
-
- The path is split on '/' and put back together again using the current
- directory separator. Needed because filenames in the setup script are
- always supplied in Unix style, and have to be converted to the local
- convention before we can actually use them in the filesystem. Raises
- ValueError on non-Unix-ish systems if 'pathname' either starts or
- ends with a slash.
- """
- if os.sep == '/':
- return pathname
- if not pathname:
- return pathname
- if pathname[0] == '/':
- raise ValueError("path '%s' cannot be absolute" % pathname)
- if pathname[-1] == '/':
- raise ValueError("path '%s' cannot end with '/'" % pathname)
-
- paths = pathname.split('/')
- while os.curdir in paths:
- paths.remove(os.curdir)
- if not paths:
- return os.curdir
- return os.path.join(*paths)
-
-
-class FileOperator(object):
- def __init__(self, dry_run=False):
- self.dry_run = dry_run
- self.ensured = set()
- self._init_record()
-
- def _init_record(self):
- self.record = False
- self.files_written = set()
- self.dirs_created = set()
-
- def record_as_written(self, path):
- if self.record:
- self.files_written.add(path)
-
- def newer(self, source, target):
- """Tell if the target is newer than the source.
-
- Returns true if 'source' exists and is more recently modified than
- 'target', or if 'source' exists and 'target' doesn't.
-
- Returns false if both exist and 'target' is the same age or younger
- than 'source'. Raise PackagingFileError if 'source' does not exist.
-
- Note that this test is not very accurate: files created in the same
- second will have the same "age".
- """
- if not os.path.exists(source):
- raise DistlibException("file '%r' does not exist" %
- os.path.abspath(source))
- if not os.path.exists(target):
- return True
-
- return os.stat(source).st_mtime > os.stat(target).st_mtime
-
- def copy_file(self, infile, outfile, check=True):
- """Copy a file respecting dry-run and force flags.
- """
- self.ensure_dir(os.path.dirname(outfile))
- logger.info('Copying %s to %s', infile, outfile)
- if not self.dry_run:
- msg = None
- if check:
- if os.path.islink(outfile):
- msg = '%s is a symlink' % outfile
- elif os.path.exists(outfile) and not os.path.isfile(outfile):
- msg = '%s is a non-regular file' % outfile
- if msg:
- raise ValueError(msg + ' which would be overwritten')
- shutil.copyfile(infile, outfile)
- self.record_as_written(outfile)
-
- def copy_stream(self, instream, outfile, encoding=None):
- assert not os.path.isdir(outfile)
- self.ensure_dir(os.path.dirname(outfile))
- logger.info('Copying stream %s to %s', instream, outfile)
- if not self.dry_run:
- if encoding is None:
- outstream = open(outfile, 'wb')
- else:
- outstream = codecs.open(outfile, 'w', encoding=encoding)
- try:
- shutil.copyfileobj(instream, outstream)
- finally:
- outstream.close()
- self.record_as_written(outfile)
-
- def write_binary_file(self, path, data):
- self.ensure_dir(os.path.dirname(path))
- if not self.dry_run:
- if os.path.exists(path):
- os.remove(path)
- with open(path, 'wb') as f:
- f.write(data)
- self.record_as_written(path)
-
- def write_text_file(self, path, data, encoding):
- self.write_binary_file(path, data.encode(encoding))
-
- def set_mode(self, bits, mask, files):
- if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
- # Set the executable bits (owner, group, and world) on
- # all the files specified.
- for f in files:
- if self.dry_run:
- logger.info("changing mode of %s", f)
- else:
- mode = (os.stat(f).st_mode | bits) & mask
- logger.info("changing mode of %s to %o", f, mode)
- os.chmod(f, mode)
-
- set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
-
- def ensure_dir(self, path):
- path = os.path.abspath(path)
- if path not in self.ensured and not os.path.exists(path):
- self.ensured.add(path)
- d, f = os.path.split(path)
- self.ensure_dir(d)
- logger.info('Creating %s' % path)
- if not self.dry_run:
- os.mkdir(path)
- if self.record:
- self.dirs_created.add(path)
-
- def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):
- dpath = cache_from_source(path, not optimize)
- logger.info('Byte-compiling %s to %s', path, dpath)
- if not self.dry_run:
- if force or self.newer(path, dpath):
- if not prefix:
- diagpath = None
- else:
- assert path.startswith(prefix)
- diagpath = path[len(prefix):]
- compile_kwargs = {}
- if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):
- compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH
- py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error
- self.record_as_written(dpath)
- return dpath
-
- def ensure_removed(self, path):
- if os.path.exists(path):
- if os.path.isdir(path) and not os.path.islink(path):
- logger.debug('Removing directory tree at %s', path)
- if not self.dry_run:
- shutil.rmtree(path)
- if self.record:
- if path in self.dirs_created:
- self.dirs_created.remove(path)
- else:
- if os.path.islink(path):
- s = 'link'
- else:
- s = 'file'
- logger.debug('Removing %s %s', s, path)
- if not self.dry_run:
- os.remove(path)
- if self.record:
- if path in self.files_written:
- self.files_written.remove(path)
-
- def is_writable(self, path):
- result = False
- while not result:
- if os.path.exists(path):
- result = os.access(path, os.W_OK)
- break
- parent = os.path.dirname(path)
- if parent == path:
- break
- path = parent
- return result
-
- def commit(self):
- """
- Commit recorded changes, turn off recording, return
- changes.
- """
- assert self.record
- result = self.files_written, self.dirs_created
- self._init_record()
- return result
-
- def rollback(self):
- if not self.dry_run:
- for f in list(self.files_written):
- if os.path.exists(f):
- os.remove(f)
- # dirs should all be empty now, except perhaps for
- # __pycache__ subdirs
- # reverse so that subdirs appear before their parents
- dirs = sorted(self.dirs_created, reverse=True)
- for d in dirs:
- flist = os.listdir(d)
- if flist:
- assert flist == ['__pycache__']
- sd = os.path.join(d, flist[0])
- os.rmdir(sd)
- os.rmdir(d) # should fail if non-empty
- self._init_record()
-
-def resolve(module_name, dotted_path):
- if module_name in sys.modules:
- mod = sys.modules[module_name]
- else:
- mod = __import__(module_name)
- if dotted_path is None:
- result = mod
- else:
- parts = dotted_path.split('.')
- result = getattr(mod, parts.pop(0))
- for p in parts:
- result = getattr(result, p)
- return result
-
-
-class ExportEntry(object):
- def __init__(self, name, prefix, suffix, flags):
- self.name = name
- self.prefix = prefix
- self.suffix = suffix
- self.flags = flags
-
- @cached_property
- def value(self):
- return resolve(self.prefix, self.suffix)
-
- def __repr__(self): # pragma: no cover
- return '' % (self.name, self.prefix,
- self.suffix, self.flags)
-
- def __eq__(self, other):
- if not isinstance(other, ExportEntry):
- result = False
- else:
- result = (self.name == other.name and
- self.prefix == other.prefix and
- self.suffix == other.suffix and
- self.flags == other.flags)
- return result
-
- __hash__ = object.__hash__
-
-
-ENTRY_RE = re.compile(r'''(?P(\w|[-.+])+)
- \s*=\s*(?P(\w+)([:\.]\w+)*)
- \s*(\[\s*(?P[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
- ''', re.VERBOSE)
-
-def get_export_entry(specification):
- m = ENTRY_RE.search(specification)
- if not m:
- result = None
- if '[' in specification or ']' in specification:
- raise DistlibException("Invalid specification "
- "'%s'" % specification)
- else:
- d = m.groupdict()
- name = d['name']
- path = d['callable']
- colons = path.count(':')
- if colons == 0:
- prefix, suffix = path, None
- else:
- if colons != 1:
- raise DistlibException("Invalid specification "
- "'%s'" % specification)
- prefix, suffix = path.split(':')
- flags = d['flags']
- if flags is None:
- if '[' in specification or ']' in specification:
- raise DistlibException("Invalid specification "
- "'%s'" % specification)
- flags = []
- else:
- flags = [f.strip() for f in flags.split(',')]
- result = ExportEntry(name, prefix, suffix, flags)
- return result
-
-
-def get_cache_base(suffix=None):
- """
- Return the default base location for distlib caches. If the directory does
- not exist, it is created. Use the suffix provided for the base directory,
- and default to '.distlib' if it isn't provided.
-
- On Windows, if LOCALAPPDATA is defined in the environment, then it is
- assumed to be a directory, and will be the parent directory of the result.
- On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
- directory - using os.expanduser('~') - will be the parent directory of
- the result.
-
- The result is just the directory '.distlib' in the parent directory as
- determined above, or with the name specified with ``suffix``.
- """
- if suffix is None:
- suffix = '.distlib'
- if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
- result = os.path.expandvars('$localappdata')
- else:
- # Assume posix, or old Windows
- result = os.path.expanduser('~')
- # we use 'isdir' instead of 'exists', because we want to
- # fail if there's a file with that name
- if os.path.isdir(result):
- usable = os.access(result, os.W_OK)
- if not usable:
- logger.warning('Directory exists but is not writable: %s', result)
- else:
- try:
- os.makedirs(result)
- usable = True
- except OSError:
- logger.warning('Unable to create %s', result, exc_info=True)
- usable = False
- if not usable:
- result = tempfile.mkdtemp()
- logger.warning('Default location unusable, using %s', result)
- return os.path.join(result, suffix)
-
-
-def path_to_cache_dir(path):
- """
- Convert an absolute path to a directory name for use in a cache.
-
- The algorithm used is:
-
- #. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
- #. Any occurrence of ``os.sep`` is replaced with ``'--'``.
- #. ``'.cache'`` is appended.
- """
- d, p = os.path.splitdrive(os.path.abspath(path))
- if d:
- d = d.replace(':', '---')
- p = p.replace(os.sep, '--')
- return d + p + '.cache'
-
-
-def ensure_slash(s):
- if not s.endswith('/'):
- return s + '/'
- return s
-
-
-def parse_credentials(netloc):
- username = password = None
- if '@' in netloc:
- prefix, netloc = netloc.rsplit('@', 1)
- if ':' not in prefix:
- username = prefix
- else:
- username, password = prefix.split(':', 1)
- if username:
- username = unquote(username)
- if password:
- password = unquote(password)
- return username, password, netloc
-
-
-def get_process_umask():
- result = os.umask(0o22)
- os.umask(result)
- return result
-
-def is_string_sequence(seq):
- result = True
- i = None
- for i, s in enumerate(seq):
- if not isinstance(s, string_types):
- result = False
- break
- assert i is not None
- return result
-
-PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
- '([a-z0-9_.+-]+)', re.I)
-PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
-
-
-def split_filename(filename, project_name=None):
- """
- Extract name, version, python version from a filename (no extension)
-
- Return name, version, pyver or None
- """
- result = None
- pyver = None
- filename = unquote(filename).replace(' ', '-')
- m = PYTHON_VERSION.search(filename)
- if m:
- pyver = m.group(1)
- filename = filename[:m.start()]
- if project_name and len(filename) > len(project_name) + 1:
- m = re.match(re.escape(project_name) + r'\b', filename)
- if m:
- n = m.end()
- result = filename[:n], filename[n + 1:], pyver
- if result is None:
- m = PROJECT_NAME_AND_VERSION.match(filename)
- if m:
- result = m.group(1), m.group(3), pyver
- return result
-
-# Allow spaces in name because of legacy dists like "Twisted Core"
-NAME_VERSION_RE = re.compile(r'(?P[\w .-]+)\s*'
- r'\(\s*(?P[^\s)]+)\)$')
-
-def parse_name_and_version(p):
- """
- A utility method used to get name and version from a string.
-
- From e.g. a Provides-Dist value.
-
- :param p: A value in a form 'foo (1.0)'
- :return: The name and version as a tuple.
- """
- m = NAME_VERSION_RE.match(p)
- if not m:
- raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
- d = m.groupdict()
- return d['name'].strip().lower(), d['ver']
-
-def get_extras(requested, available):
- result = set()
- requested = set(requested or [])
- available = set(available or [])
- if '*' in requested:
- requested.remove('*')
- result |= available
- for r in requested:
- if r == '-':
- result.add(r)
- elif r.startswith('-'):
- unwanted = r[1:]
- if unwanted not in available:
- logger.warning('undeclared extra: %s' % unwanted)
- if unwanted in result:
- result.remove(unwanted)
- else:
- if r not in available:
- logger.warning('undeclared extra: %s' % r)
- result.add(r)
- return result
-#
-# Extended metadata functionality
-#
-
-def _get_external_data(url):
- result = {}
- try:
- # urlopen might fail if it runs into redirections,
- # because of Python issue #13696. Fixed in locators
- # using a custom redirect handler.
- resp = urlopen(url)
- headers = resp.info()
- ct = headers.get('Content-Type')
- if not ct.startswith('application/json'):
- logger.debug('Unexpected response for JSON request: %s', ct)
- else:
- reader = codecs.getreader('utf-8')(resp)
- #data = reader.read().decode('utf-8')
- #result = json.loads(data)
- result = json.load(reader)
- except Exception as e:
- logger.exception('Failed to get external data for %s: %s', url, e)
- return result
-
-_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
-
-def get_project_data(name):
- url = '%s/%s/project.json' % (name[0].upper(), name)
- url = urljoin(_external_data_base_url, url)
- result = _get_external_data(url)
- return result
-
-def get_package_data(name, version):
- url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
- url = urljoin(_external_data_base_url, url)
- return _get_external_data(url)
-
-
-class Cache(object):
- """
- A class implementing a cache for resources that need to live in the file system
- e.g. shared libraries. This class was moved from resources to here because it
- could be used by other modules, e.g. the wheel module.
- """
-
- def __init__(self, base):
- """
- Initialise an instance.
-
- :param base: The base directory where the cache should be located.
- """
- # we use 'isdir' instead of 'exists', because we want to
- # fail if there's a file with that name
- if not os.path.isdir(base): # pragma: no cover
- os.makedirs(base)
- if (os.stat(base).st_mode & 0o77) != 0:
- logger.warning('Directory \'%s\' is not private', base)
- self.base = os.path.abspath(os.path.normpath(base))
-
- def prefix_to_dir(self, prefix):
- """
- Converts a resource prefix to a directory name in the cache.
- """
- return path_to_cache_dir(prefix)
-
- def clear(self):
- """
- Clear the cache.
- """
- not_removed = []
- for fn in os.listdir(self.base):
- fn = os.path.join(self.base, fn)
- try:
- if os.path.islink(fn) or os.path.isfile(fn):
- os.remove(fn)
- elif os.path.isdir(fn):
- shutil.rmtree(fn)
- except Exception:
- not_removed.append(fn)
- return not_removed
-
-
-class EventMixin(object):
- """
- A very simple publish/subscribe system.
- """
- def __init__(self):
- self._subscribers = {}
-
- def add(self, event, subscriber, append=True):
- """
- Add a subscriber for an event.
-
- :param event: The name of an event.
- :param subscriber: The subscriber to be added (and called when the
- event is published).
- :param append: Whether to append or prepend the subscriber to an
- existing subscriber list for the event.
- """
- subs = self._subscribers
- if event not in subs:
- subs[event] = deque([subscriber])
- else:
- sq = subs[event]
- if append:
- sq.append(subscriber)
- else:
- sq.appendleft(subscriber)
-
- def remove(self, event, subscriber):
- """
- Remove a subscriber for an event.
-
- :param event: The name of an event.
- :param subscriber: The subscriber to be removed.
- """
- subs = self._subscribers
- if event not in subs:
- raise ValueError('No subscribers: %r' % event)
- subs[event].remove(subscriber)
-
- def get_subscribers(self, event):
- """
- Return an iterator for the subscribers for an event.
- :param event: The event to return subscribers for.
- """
- return iter(self._subscribers.get(event, ()))
-
- def publish(self, event, *args, **kwargs):
- """
- Publish a event and return a list of values returned by its
- subscribers.
-
- :param event: The event to publish.
- :param args: The positional arguments to pass to the event's
- subscribers.
- :param kwargs: The keyword arguments to pass to the event's
- subscribers.
- """
- result = []
- for subscriber in self.get_subscribers(event):
- try:
- value = subscriber(event, *args, **kwargs)
- except Exception:
- logger.exception('Exception during event publication')
- value = None
- result.append(value)
- logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
- event, args, kwargs, result)
- return result
-
-#
-# Simple sequencing
-#
-class Sequencer(object):
- def __init__(self):
- self._preds = {}
- self._succs = {}
- self._nodes = set() # nodes with no preds/succs
-
- def add_node(self, node):
- self._nodes.add(node)
-
- def remove_node(self, node, edges=False):
- if node in self._nodes:
- self._nodes.remove(node)
- if edges:
- for p in set(self._preds.get(node, ())):
- self.remove(p, node)
- for s in set(self._succs.get(node, ())):
- self.remove(node, s)
- # Remove empties
- for k, v in list(self._preds.items()):
- if not v:
- del self._preds[k]
- for k, v in list(self._succs.items()):
- if not v:
- del self._succs[k]
-
- def add(self, pred, succ):
- assert pred != succ
- self._preds.setdefault(succ, set()).add(pred)
- self._succs.setdefault(pred, set()).add(succ)
-
- def remove(self, pred, succ):
- assert pred != succ
- try:
- preds = self._preds[succ]
- succs = self._succs[pred]
- except KeyError: # pragma: no cover
- raise ValueError('%r not a successor of anything' % succ)
- try:
- preds.remove(pred)
- succs.remove(succ)
- except KeyError: # pragma: no cover
- raise ValueError('%r not a successor of %r' % (succ, pred))
-
- def is_step(self, step):
- return (step in self._preds or step in self._succs or
- step in self._nodes)
-
- def get_steps(self, final):
- if not self.is_step(final):
- raise ValueError('Unknown: %r' % final)
- result = []
- todo = []
- seen = set()
- todo.append(final)
- while todo:
- step = todo.pop(0)
- if step in seen:
- # if a step was already seen,
- # move it to the end (so it will appear earlier
- # when reversed on return) ... but not for the
- # final step, as that would be confusing for
- # users
- if step != final:
- result.remove(step)
- result.append(step)
- else:
- seen.add(step)
- result.append(step)
- preds = self._preds.get(step, ())
- todo.extend(preds)
- return reversed(result)
-
- @property
- def strong_connections(self):
- #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
- index_counter = [0]
- stack = []
- lowlinks = {}
- index = {}
- result = []
-
- graph = self._succs
-
- def strongconnect(node):
- # set the depth index for this node to the smallest unused index
- index[node] = index_counter[0]
- lowlinks[node] = index_counter[0]
- index_counter[0] += 1
- stack.append(node)
-
- # Consider successors
- try:
- successors = graph[node]
- except Exception:
- successors = []
- for successor in successors:
- if successor not in lowlinks:
- # Successor has not yet been visited
- strongconnect(successor)
- lowlinks[node] = min(lowlinks[node],lowlinks[successor])
- elif successor in stack:
- # the successor is in the stack and hence in the current
- # strongly connected component (SCC)
- lowlinks[node] = min(lowlinks[node],index[successor])
-
- # If `node` is a root node, pop the stack and generate an SCC
- if lowlinks[node] == index[node]:
- connected_component = []
-
- while True:
- successor = stack.pop()
- connected_component.append(successor)
- if successor == node: break
- component = tuple(connected_component)
- # storing the result
- result.append(component)
-
- for node in graph:
- if node not in lowlinks:
- strongconnect(node)
-
- return result
-
- @property
- def dot(self):
- result = ['digraph G {']
- for succ in self._preds:
- preds = self._preds[succ]
- for pred in preds:
- result.append(' %s -> %s;' % (pred, succ))
- for node in self._nodes:
- result.append(' %s;' % node)
- result.append('}')
- return '\n'.join(result)
-
-#
-# Unarchiving functionality for zip, tar, tgz, tbz, whl
-#
-
-ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
- '.tgz', '.tbz', '.whl')
-
-def unarchive(archive_filename, dest_dir, format=None, check=True):
-
- def check_path(path):
- if not isinstance(path, text_type):
- path = path.decode('utf-8')
- p = os.path.abspath(os.path.join(dest_dir, path))
- if not p.startswith(dest_dir) or p[plen] != os.sep:
- raise ValueError('path outside destination: %r' % p)
-
- dest_dir = os.path.abspath(dest_dir)
- plen = len(dest_dir)
- archive = None
- if format is None:
- if archive_filename.endswith(('.zip', '.whl')):
- format = 'zip'
- elif archive_filename.endswith(('.tar.gz', '.tgz')):
- format = 'tgz'
- mode = 'r:gz'
- elif archive_filename.endswith(('.tar.bz2', '.tbz')):
- format = 'tbz'
- mode = 'r:bz2'
- elif archive_filename.endswith('.tar'):
- format = 'tar'
- mode = 'r'
- else: # pragma: no cover
- raise ValueError('Unknown format for %r' % archive_filename)
- try:
- if format == 'zip':
- archive = ZipFile(archive_filename, 'r')
- if check:
- names = archive.namelist()
- for name in names:
- check_path(name)
- else:
- archive = tarfile.open(archive_filename, mode)
- if check:
- names = archive.getnames()
- for name in names:
- check_path(name)
- if format != 'zip' and sys.version_info[0] < 3:
- # See Python issue 17153. If the dest path contains Unicode,
- # tarfile extraction fails on Python 2.x if a member path name
- # contains non-ASCII characters - it leads to an implicit
- # bytes -> unicode conversion using ASCII to decode.
- for tarinfo in archive.getmembers():
- if not isinstance(tarinfo.name, text_type):
- tarinfo.name = tarinfo.name.decode('utf-8')
- archive.extractall(dest_dir)
-
- finally:
- if archive:
- archive.close()
-
-
-def zip_dir(directory):
- """zip a directory tree into a BytesIO object"""
- result = io.BytesIO()
- dlen = len(directory)
- with ZipFile(result, "w") as zf:
- for root, dirs, files in os.walk(directory):
- for name in files:
- full = os.path.join(root, name)
- rel = root[dlen:]
- dest = os.path.join(rel, name)
- zf.write(full, dest)
- return result
-
-#
-# Simple progress bar
-#
-
-UNITS = ('', 'K', 'M', 'G','T','P')
-
-
-class Progress(object):
- unknown = 'UNKNOWN'
-
- def __init__(self, minval=0, maxval=100):
- assert maxval is None or maxval >= minval
- self.min = self.cur = minval
- self.max = maxval
- self.started = None
- self.elapsed = 0
- self.done = False
-
- def update(self, curval):
- assert self.min <= curval
- assert self.max is None or curval <= self.max
- self.cur = curval
- now = time.time()
- if self.started is None:
- self.started = now
- else:
- self.elapsed = now - self.started
-
- def increment(self, incr):
- assert incr >= 0
- self.update(self.cur + incr)
-
- def start(self):
- self.update(self.min)
- return self
-
- def stop(self):
- if self.max is not None:
- self.update(self.max)
- self.done = True
-
- @property
- def maximum(self):
- return self.unknown if self.max is None else self.max
-
- @property
- def percentage(self):
- if self.done:
- result = '100 %'
- elif self.max is None:
- result = ' ?? %'
- else:
- v = 100.0 * (self.cur - self.min) / (self.max - self.min)
- result = '%3d %%' % v
- return result
-
- def format_duration(self, duration):
- if (duration <= 0) and self.max is None or self.cur == self.min:
- result = '??:??:??'
- #elif duration < 1:
- # result = '--:--:--'
- else:
- result = time.strftime('%H:%M:%S', time.gmtime(duration))
- return result
-
- @property
- def ETA(self):
- if self.done:
- prefix = 'Done'
- t = self.elapsed
- #import pdb; pdb.set_trace()
- else:
- prefix = 'ETA '
- if self.max is None:
- t = -1
- elif self.elapsed == 0 or (self.cur == self.min):
- t = 0
- else:
- #import pdb; pdb.set_trace()
- t = float(self.max - self.min)
- t /= self.cur - self.min
- t = (t - 1) * self.elapsed
- return '%s: %s' % (prefix, self.format_duration(t))
-
- @property
- def speed(self):
- if self.elapsed == 0:
- result = 0.0
- else:
- result = (self.cur - self.min) / self.elapsed
- for unit in UNITS:
- if result < 1000:
- break
- result /= 1000.0
- return '%d %sB/s' % (result, unit)
-
-#
-# Glob functionality
-#
-
-RICH_GLOB = re.compile(r'\{([^}]*)\}')
-_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
-_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
-
-
-def iglob(path_glob):
- """Extended globbing function that supports ** and {opt1,opt2,opt3}."""
- if _CHECK_RECURSIVE_GLOB.search(path_glob):
- msg = """invalid glob %r: recursive glob "**" must be used alone"""
- raise ValueError(msg % path_glob)
- if _CHECK_MISMATCH_SET.search(path_glob):
- msg = """invalid glob %r: mismatching set marker '{' or '}'"""
- raise ValueError(msg % path_glob)
- return _iglob(path_glob)
-
-
-def _iglob(path_glob):
- rich_path_glob = RICH_GLOB.split(path_glob, 1)
- if len(rich_path_glob) > 1:
- assert len(rich_path_glob) == 3, rich_path_glob
- prefix, set, suffix = rich_path_glob
- for item in set.split(','):
- for path in _iglob(''.join((prefix, item, suffix))):
- yield path
- else:
- if '**' not in path_glob:
- for item in std_iglob(path_glob):
- yield item
- else:
- prefix, radical = path_glob.split('**', 1)
- if prefix == '':
- prefix = '.'
- if radical == '':
- radical = '*'
- else:
- # we support both
- radical = radical.lstrip('/')
- radical = radical.lstrip('\\')
- for path, dir, files in os.walk(prefix):
- path = os.path.normpath(path)
- for fn in _iglob(os.path.join(path, radical)):
- yield fn
-
-if ssl:
- from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
- CertificateError)
-
-
-#
-# HTTPSConnection which verifies certificates/matches domains
-#
-
- class HTTPSConnection(httplib.HTTPSConnection):
- ca_certs = None # set this to the path to the certs file (.pem)
- check_domain = True # only used if ca_certs is not None
-
- # noinspection PyPropertyAccess
- def connect(self):
- sock = socket.create_connection((self.host, self.port), self.timeout)
- if getattr(self, '_tunnel_host', False):
- self.sock = sock
- self._tunnel()
-
- context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- if hasattr(ssl, 'OP_NO_SSLv2'):
- context.options |= ssl.OP_NO_SSLv2
- if self.cert_file:
- context.load_cert_chain(self.cert_file, self.key_file)
- kwargs = {}
- if self.ca_certs:
- context.verify_mode = ssl.CERT_REQUIRED
- context.load_verify_locations(cafile=self.ca_certs)
- if getattr(ssl, 'HAS_SNI', False):
- kwargs['server_hostname'] = self.host
-
- self.sock = context.wrap_socket(sock, **kwargs)
- if self.ca_certs and self.check_domain:
- try:
- match_hostname(self.sock.getpeercert(), self.host)
- logger.debug('Host verified: %s', self.host)
- except CertificateError: # pragma: no cover
- self.sock.shutdown(socket.SHUT_RDWR)
- self.sock.close()
- raise
-
- class HTTPSHandler(BaseHTTPSHandler):
- def __init__(self, ca_certs, check_domain=True):
- BaseHTTPSHandler.__init__(self)
- self.ca_certs = ca_certs
- self.check_domain = check_domain
-
- def _conn_maker(self, *args, **kwargs):
- """
- This is called to create a connection instance. Normally you'd
- pass a connection class to do_open, but it doesn't actually check for
- a class, and just expects a callable. As long as we behave just as a
- constructor would have, we should be OK. If it ever changes so that
- we *must* pass a class, we'll create an UnsafeHTTPSConnection class
- which just sets check_domain to False in the class definition, and
- choose which one to pass to do_open.
- """
- result = HTTPSConnection(*args, **kwargs)
- if self.ca_certs:
- result.ca_certs = self.ca_certs
- result.check_domain = self.check_domain
- return result
-
- def https_open(self, req):
- try:
- return self.do_open(self._conn_maker, req)
- except URLError as e:
- if 'certificate verify failed' in str(e.reason):
- raise CertificateError('Unable to verify server certificate '
- 'for %s' % req.host)
- else:
- raise
-
- #
- # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
- # Middle proxy using HTTP listens on port 443, or an index mistakenly serves
- # HTML containing a http://xyz link when it should be https://xyz),
- # you can use the following handler class, which does not allow HTTP traffic.
- #
- # It works by inheriting from HTTPHandler - so build_opener won't add a
- # handler for HTTP itself.
- #
- class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
- def http_open(self, req):
- raise URLError('Unexpected HTTP request on what should be a secure '
- 'connection: %s' % req)
-
-#
-# XML-RPC with timeouts
-#
-class Transport(xmlrpclib.Transport):
- def __init__(self, timeout, use_datetime=0):
- self.timeout = timeout
- xmlrpclib.Transport.__init__(self, use_datetime)
-
- def make_connection(self, host):
- h, eh, x509 = self.get_host_info(host)
- if not self._connection or host != self._connection[0]:
- self._extra_headers = eh
- self._connection = host, httplib.HTTPConnection(h)
- return self._connection[1]
-
-if ssl:
- class SafeTransport(xmlrpclib.SafeTransport):
- def __init__(self, timeout, use_datetime=0):
- self.timeout = timeout
- xmlrpclib.SafeTransport.__init__(self, use_datetime)
-
- def make_connection(self, host):
- h, eh, kwargs = self.get_host_info(host)
- if not kwargs:
- kwargs = {}
- kwargs['timeout'] = self.timeout
- if not self._connection or host != self._connection[0]:
- self._extra_headers = eh
- self._connection = host, httplib.HTTPSConnection(h, None,
- **kwargs)
- return self._connection[1]
-
-
-class ServerProxy(xmlrpclib.ServerProxy):
- def __init__(self, uri, **kwargs):
- self.timeout = timeout = kwargs.pop('timeout', None)
- # The above classes only come into play if a timeout
- # is specified
- if timeout is not None:
- # scheme = splittype(uri) # deprecated as of Python 3.8
- scheme = urlparse(uri)[0]
- use_datetime = kwargs.get('use_datetime', 0)
- if scheme == 'https':
- tcls = SafeTransport
- else:
- tcls = Transport
- kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
- self.transport = t
- xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
-
-#
-# CSV functionality. This is provided because on 2.x, the csv module can't
-# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
-#
-
-def _csv_open(fn, mode, **kwargs):
- if sys.version_info[0] < 3:
- mode += 'b'
- else:
- kwargs['newline'] = ''
- # Python 3 determines encoding from locale. Force 'utf-8'
- # file encoding to match other forced utf-8 encoding
- kwargs['encoding'] = 'utf-8'
- return open(fn, mode, **kwargs)
-
-
-class CSVBase(object):
- defaults = {
- 'delimiter': str(','), # The strs are used because we need native
- 'quotechar': str('"'), # str in the csv API (2.x won't take
- 'lineterminator': str('\n') # Unicode)
- }
-
- def __enter__(self):
- return self
-
- def __exit__(self, *exc_info):
- self.stream.close()
-
-
-class CSVReader(CSVBase):
- def __init__(self, **kwargs):
- if 'stream' in kwargs:
- stream = kwargs['stream']
- if sys.version_info[0] >= 3:
- # needs to be a text stream
- stream = codecs.getreader('utf-8')(stream)
- self.stream = stream
- else:
- self.stream = _csv_open(kwargs['path'], 'r')
- self.reader = csv.reader(self.stream, **self.defaults)
-
- def __iter__(self):
- return self
-
- def next(self):
- result = next(self.reader)
- if sys.version_info[0] < 3:
- for i, item in enumerate(result):
- if not isinstance(item, text_type):
- result[i] = item.decode('utf-8')
- return result
-
- __next__ = next
-
-class CSVWriter(CSVBase):
- def __init__(self, fn, **kwargs):
- self.stream = _csv_open(fn, 'w')
- self.writer = csv.writer(self.stream, **self.defaults)
-
- def writerow(self, row):
- if sys.version_info[0] < 3:
- r = []
- for item in row:
- if isinstance(item, text_type):
- item = item.encode('utf-8')
- r.append(item)
- row = r
- self.writer.writerow(row)
-
-#
-# Configurator functionality
-#
-
-class Configurator(BaseConfigurator):
-
- value_converters = dict(BaseConfigurator.value_converters)
- value_converters['inc'] = 'inc_convert'
-
- def __init__(self, config, base=None):
- super(Configurator, self).__init__(config)
- self.base = base or os.getcwd()
-
- def configure_custom(self, config):
- def convert(o):
- if isinstance(o, (list, tuple)):
- result = type(o)([convert(i) for i in o])
- elif isinstance(o, dict):
- if '()' in o:
- result = self.configure_custom(o)
- else:
- result = {}
- for k in o:
- result[k] = convert(o[k])
- else:
- result = self.convert(o)
- return result
-
- c = config.pop('()')
- if not callable(c):
- c = self.resolve(c)
- props = config.pop('.', None)
- # Check for valid identifiers
- args = config.pop('[]', ())
- if args:
- args = tuple([convert(o) for o in args])
- items = [(k, convert(config[k])) for k in config if valid_ident(k)]
- kwargs = dict(items)
- result = c(*args, **kwargs)
- if props:
- for n, v in props.items():
- setattr(result, n, convert(v))
- return result
-
- def __getitem__(self, key):
- result = self.config[key]
- if isinstance(result, dict) and '()' in result:
- self.config[key] = result = self.configure_custom(result)
- return result
-
- def inc_convert(self, value):
- """Default converter for the inc:// protocol."""
- if not os.path.isabs(value):
- value = os.path.join(self.base, value)
- with codecs.open(value, 'r', encoding='utf-8') as f:
- result = json.load(f)
- return result
-
-
-class SubprocessMixin(object):
- """
- Mixin for running subprocesses and capturing their output
- """
- def __init__(self, verbose=False, progress=None):
- self.verbose = verbose
- self.progress = progress
-
- def reader(self, stream, context):
- """
- Read lines from a subprocess' output stream and either pass to a progress
- callable (if specified) or write progress information to sys.stderr.
- """
- progress = self.progress
- verbose = self.verbose
- while True:
- s = stream.readline()
- if not s:
- break
- if progress is not None:
- progress(s, context)
- else:
- if not verbose:
- sys.stderr.write('.')
- else:
- sys.stderr.write(s.decode('utf-8'))
- sys.stderr.flush()
- stream.close()
-
- def run_command(self, cmd, **kwargs):
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, **kwargs)
- t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
- t1.start()
- t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
- t2.start()
- p.wait()
- t1.join()
- t2.join()
- if self.progress is not None:
- self.progress('done.', 'main')
- elif self.verbose:
- sys.stderr.write('done.\n')
- return p
-
-
-def normalize_name(name):
- """Normalize a python package name a la PEP 503"""
- # https://www.python.org/dev/peps/pep-0503/#normalized-names
- return re.sub('[-_.]+', '-', name).lower()
-
-# def _get_pypirc_command():
- # """
- # Get the distutils command for interacting with PyPI configurations.
- # :return: the command.
- # """
- # from distutils.core import Distribution
- # from distutils.config import PyPIRCCommand
- # d = Distribution()
- # return PyPIRCCommand(d)
-
-class PyPIRCFile(object):
-
- DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
- DEFAULT_REALM = 'pypi'
-
- def __init__(self, fn=None, url=None):
- if fn is None:
- fn = os.path.join(os.path.expanduser('~'), '.pypirc')
- self.filename = fn
- self.url = url
-
- def read(self):
- result = {}
-
- if os.path.exists(self.filename):
- repository = self.url or self.DEFAULT_REPOSITORY
-
- config = configparser.RawConfigParser()
- config.read(self.filename)
- sections = config.sections()
- if 'distutils' in sections:
- # let's get the list of servers
- index_servers = config.get('distutils', 'index-servers')
- _servers = [server.strip() for server in
- index_servers.split('\n')
- if server.strip() != '']
- if _servers == []:
- # nothing set, let's try to get the default pypi
- if 'pypi' in sections:
- _servers = ['pypi']
- else:
- for server in _servers:
- result = {'server': server}
- result['username'] = config.get(server, 'username')
-
- # optional params
- for key, default in (('repository', self.DEFAULT_REPOSITORY),
- ('realm', self.DEFAULT_REALM),
- ('password', None)):
- if config.has_option(server, key):
- result[key] = config.get(server, key)
- else:
- result[key] = default
-
- # work around people having "repository" for the "pypi"
- # section of their config set to the HTTP (rather than
- # HTTPS) URL
- if (server == 'pypi' and
- repository in (self.DEFAULT_REPOSITORY, 'pypi')):
- result['repository'] = self.DEFAULT_REPOSITORY
- elif (result['server'] != repository and
- result['repository'] != repository):
- result = {}
- elif 'server-login' in sections:
- # old format
- server = 'server-login'
- if config.has_option(server, 'repository'):
- repository = config.get(server, 'repository')
- else:
- repository = self.DEFAULT_REPOSITORY
- result = {
- 'username': config.get(server, 'username'),
- 'password': config.get(server, 'password'),
- 'repository': repository,
- 'server': server,
- 'realm': self.DEFAULT_REALM
- }
- return result
-
- def update(self, username, password):
- # import pdb; pdb.set_trace()
- config = configparser.RawConfigParser()
- fn = self.filename
- config.read(fn)
- if not config.has_section('pypi'):
- config.add_section('pypi')
- config.set('pypi', 'username', username)
- config.set('pypi', 'password', password)
- with open(fn, 'w') as f:
- config.write(f)
-
-def _load_pypirc(index):
- """
- Read the PyPI access configuration as supported by distutils.
- """
- return PyPIRCFile(url=index.url).read()
-
-def _store_pypirc(index):
- PyPIRCFile().update(index.username, index.password)
-
-#
-# get_platform()/get_host_platform() copied from Python 3.10.a0 source, with some minor
-# tweaks
-#
-
-def get_host_platform():
- """Return a string that identifies the current platform. This is used mainly to
- distinguish platform-specific build directories and platform-specific built
- distributions. Typically includes the OS name and version and the
- architecture (as supplied by 'os.uname()'), although the exact information
- included depends on the OS; eg. on Linux, the kernel version isn't
- particularly important.
-
- Examples of returned values:
- linux-i586
- linux-alpha (?)
- solaris-2.6-sun4u
-
- Windows will return one of:
- win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
- win32 (all others - specifically, sys.platform is returned)
-
- For other non-POSIX platforms, currently just returns 'sys.platform'.
-
- """
- if os.name == 'nt':
- if 'amd64' in sys.version.lower():
- return 'win-amd64'
- if '(arm)' in sys.version.lower():
- return 'win-arm32'
- if '(arm64)' in sys.version.lower():
- return 'win-arm64'
- return sys.platform
-
- # Set for cross builds explicitly
- if "_PYTHON_HOST_PLATFORM" in os.environ:
- return os.environ["_PYTHON_HOST_PLATFORM"]
-
- if os.name != 'posix' or not hasattr(os, 'uname'):
- # XXX what about the architecture? NT is Intel or Alpha,
- # Mac OS is M68k or PPC, etc.
- return sys.platform
-
- # Try to distinguish various flavours of Unix
-
- (osname, host, release, version, machine) = os.uname()
-
- # Convert the OS name to lowercase, remove '/' characters, and translate
- # spaces (for "Power Macintosh")
- osname = osname.lower().replace('/', '')
- machine = machine.replace(' ', '_').replace('/', '-')
-
- if osname[:5] == 'linux':
- # At least on Linux/Intel, 'machine' is the processor --
- # i386, etc.
- # XXX what about Alpha, SPARC, etc?
- return "%s-%s" % (osname, machine)
-
- elif osname[:5] == 'sunos':
- if release[0] >= '5': # SunOS 5 == Solaris 2
- osname = 'solaris'
- release = '%d.%s' % (int(release[0]) - 3, release[2:])
- # We can't use 'platform.architecture()[0]' because a
- # bootstrap problem. We use a dict to get an error
- # if some suspicious happens.
- bitness = {2147483647:'32bit', 9223372036854775807:'64bit'}
- machine += '.%s' % bitness[sys.maxsize]
- # fall through to standard osname-release-machine representation
- elif osname[:3] == 'aix':
- from _aix_support import aix_platform
- return aix_platform()
- elif osname[:6] == 'cygwin':
- osname = 'cygwin'
- rel_re = re.compile (r'[\d.]+', re.ASCII)
- m = rel_re.match(release)
- if m:
- release = m.group()
- elif osname[:6] == 'darwin':
- import _osx_support, distutils.sysconfig
- osname, release, machine = _osx_support.get_platform_osx(
- distutils.sysconfig.get_config_vars(),
- osname, release, machine)
-
- return '%s-%s-%s' % (osname, release, machine)
-
-
-_TARGET_TO_PLAT = {
- 'x86' : 'win32',
- 'x64' : 'win-amd64',
- 'arm' : 'win-arm32',
-}
-
-
-def get_platform():
- if os.name != 'nt':
- return get_host_platform()
- cross_compilation_target = os.environ.get('VSCMD_ARG_TGT_ARCH')
- if cross_compilation_target not in _TARGET_TO_PLAT:
- return get_host_platform()
- return _TARGET_TO_PLAT[cross_compilation_target]
diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/progress/__init__.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/progress/__init__.py
deleted file mode 100644
index b434b300ad75a980774c64d5b22edaad4ac83dc1..0000000000000000000000000000000000000000
--- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/progress/__init__.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# Copyright (c) 2012 Georgios Verigakis
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-from __future__ import division, print_function
-
-from collections import deque
-from datetime import timedelta
-from math import ceil
-from sys import stderr
-try:
- from time import monotonic
-except ImportError:
- from time import time as monotonic
-
-
-__version__ = '1.6'
-
-HIDE_CURSOR = '\x1b[?25l'
-SHOW_CURSOR = '\x1b[?25h'
-
-
-class Infinite(object):
- file = stderr
- sma_window = 10 # Simple Moving Average window
- check_tty = True
- hide_cursor = True
-
- def __init__(self, message='', **kwargs):
- self.index = 0
- self.start_ts = monotonic()
- self.avg = 0
- self._avg_update_ts = self.start_ts
- self._ts = self.start_ts
- self._xput = deque(maxlen=self.sma_window)
- for key, val in kwargs.items():
- setattr(self, key, val)
-
- self._max_width = 0
- self._hidden_cursor = False
- self.message = message
-
- if self.file and self.is_tty():
- if self.hide_cursor:
- print(HIDE_CURSOR, end='', file=self.file)
- self._hidden_cursor = True
- self.writeln('')
-
- def __del__(self):
- if self._hidden_cursor:
- print(SHOW_CURSOR, end='', file=self.file)
-
- def __getitem__(self, key):
- if key.startswith('_'):
- return None
- return getattr(self, key, None)
-
- @property
- def elapsed(self):
- return int(monotonic() - self.start_ts)
-
- @property
- def elapsed_td(self):
- return timedelta(seconds=self.elapsed)
-
- def update_avg(self, n, dt):
- if n > 0:
- xput_len = len(self._xput)
- self._xput.append(dt / n)
- now = monotonic()
- # update when we're still filling _xput, then after every second
- if (xput_len < self.sma_window or
- now - self._avg_update_ts > 1):
- self.avg = sum(self._xput) / len(self._xput)
- self._avg_update_ts = now
-
- def update(self):
- pass
-
- def start(self):
- pass
-
- def writeln(self, line):
- if self.file and self.is_tty():
- width = len(line)
- if width < self._max_width:
- # Add padding to cover previous contents
- line += ' ' * (self._max_width - width)
- else:
- self._max_width = width
- print('\r' + line, end='', file=self.file)
- self.file.flush()
-
- def finish(self):
- if self.file and self.is_tty():
- print(file=self.file)
- if self._hidden_cursor:
- print(SHOW_CURSOR, end='', file=self.file)
- self._hidden_cursor = False
-
- def is_tty(self):
- try:
- return self.file.isatty() if self.check_tty else True
- except AttributeError:
- msg = "%s has no attribute 'isatty'. Try setting check_tty=False." % self
- raise AttributeError(msg)
-
- def next(self, n=1):
- now = monotonic()
- dt = now - self._ts
- self.update_avg(n, dt)
- self._ts = now
- self.index = self.index + n
- self.update()
-
- def iter(self, it):
- self.iter_value = None
- with self:
- for x in it:
- self.iter_value = x
- yield x
- self.next()
- del self.iter_value
-
- def __enter__(self):
- self.start()
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.finish()
-
-
-class Progress(Infinite):
- def __init__(self, *args, **kwargs):
- super(Progress, self).__init__(*args, **kwargs)
- self.max = kwargs.get('max', 100)
-
- @property
- def eta(self):
- return int(ceil(self.avg * self.remaining))
-
- @property
- def eta_td(self):
- return timedelta(seconds=self.eta)
-
- @property
- def percent(self):
- return self.progress * 100
-
- @property
- def progress(self):
- if self.max == 0:
- return 0
- return min(1, self.index / self.max)
-
- @property
- def remaining(self):
- return max(self.max - self.index, 0)
-
- def start(self):
- self.update()
-
- def goto(self, index):
- incr = index - self.index
- self.next(incr)
-
- def iter(self, it):
- try:
- self.max = len(it)
- except TypeError:
- pass
-
- self.iter_value = None
- with self:
- for x in it:
- self.iter_value = x
- yield x
- self.next()
- del self.iter_value
diff --git a/spaces/ali-ghamdan/deoldify/fastai/gen_doc/doctest.py b/spaces/ali-ghamdan/deoldify/fastai/gen_doc/doctest.py
deleted file mode 100644
index 13dbcc20a4c7575cc16f65c8d18cd43515faa74f..0000000000000000000000000000000000000000
--- a/spaces/ali-ghamdan/deoldify/fastai/gen_doc/doctest.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import sys, re, json, pprint
-from pathlib import Path
-from collections import defaultdict
-from inspect import currentframe, getframeinfo, ismodule
-
-__all__ = ['this_tests']
-
-DB_NAME = 'test_registry.json'
-
-def _json_set_default(obj):
- if isinstance(obj, set): return list(obj)
- raise TypeError
-
-class TestRegistry:
- "Tests register which API they validate using this class."
- registry = defaultdict(list)
- this_tests_check = None
- missing_this_tests = set()
-
- # logic for checking whether each test calls `this_tests`:
- # 1. `this_tests_check` is set to True during test's 'setup' stage if it wasn't skipped
- # 2. if the test is dynamically skipped `this_tests_check` is set to False
- # 3. `this_tests` sets this flag to False when it's successfully completes
- # 4. if during the 'teardown' stage `this_tests_check` is still True then we
- # know that this test needs `this_tests_check`
-
- @staticmethod
- def this_tests(*funcs):
- prev_frame = currentframe().f_back.f_back
- file_name, lineno, test_name, _, _ = getframeinfo(prev_frame)
- parent_func_lineno, _ = get_parent_func(lineno, get_lines(file_name))
- entry = {'file': relative_test_path(file_name), 'test': test_name , 'line': parent_func_lineno}
- for func in funcs:
- if func == 'na':
- # special case when we can't find a function to declare, e.g.
- # when attributes are tested
- continue
- try:
- func_fq = get_func_fq_name(func)
- except:
- raise Exception(f"'{func}' is not a function") from None
- if re.match(r'fastai\.', func_fq):
- if entry not in TestRegistry.registry[func_fq]:
- TestRegistry.registry[func_fq].append(entry)
- else:
- raise Exception(f"'{func}' is not in the fastai API") from None
- TestRegistry.this_tests_check = False
-
- def this_tests_check_on():
- TestRegistry.this_tests_check = True
-
- def this_tests_check_off():
- TestRegistry.this_tests_check = False
-
- def this_tests_check_run(file_name, test_name):
- if TestRegistry.this_tests_check:
- TestRegistry.missing_this_tests.add(f"{file_name}::{test_name}")
-
- def registry_save():
- if TestRegistry.registry:
- path = Path(__file__).parent.parent.resolve()/DB_NAME
- if path.exists():
- #print("\n*** Merging with the existing test registry")
- with open(path, 'r') as f: old_registry = json.load(f)
- TestRegistry.registry = merge_registries(old_registry, TestRegistry.registry)
- #print(f"\n*** Saving test registry @ {path}")
- with open(path, 'w') as f:
- json.dump(obj=TestRegistry.registry, fp=f, indent=4, sort_keys=True, default=_json_set_default)
-
- def missing_this_tests_alert():
- if TestRegistry.missing_this_tests:
- tests = '\n '.join(sorted(TestRegistry.missing_this_tests))
- print(f"""
-*** Attention ***
-Please include `this_tests` call in each of the following tests:
- {tests}
-For details see: https://docs.fast.ai/dev/test.html#test-registry""")
-
-# merge_registries helpers
-# merge dict of lists of dict
-def a2k(a): return '::'.join([a['file'], a['test']]), a['line']
-def k2a(k, v): f,t = k.split('::'); return {"file": f, "line": v, "test": t}
-# merge by key that is a combination of 2 values: test, file
-def merge_lists(a, b):
- x = dict(map(a2k, [*a, *b])) # pack + merge
- return [k2a(k, v) for k,v in x.items()] # unpack
-def merge_registries(a, b):
- for i in b: a[i] = merge_lists(a[i], b[i]) if i in a else b[i]
- return a
-
-def this_tests(*funcs): TestRegistry.this_tests(*funcs)
-
-def str2func(name):
- "Converts 'fastai.foo.bar' into an function 'object' if such exists"
- if isinstance(name, str): subpaths = name.split('.')
- else: return None
-
- module = subpaths.pop(0)
- if module in sys.modules: obj = sys.modules[module]
- else: return None
-
- for subpath in subpaths:
- obj = getattr(obj, subpath, None)
- if obj == None: return None
- return obj
-
-def get_func_fq_name(func):
- if ismodule(func): return func.__name__
- if isinstance(func, str): func = str2func(func)
- name = None
- if hasattr(func, '__qualname__'): name = func.__qualname__
- elif hasattr(func, '__name__'): name = func.__name__
- elif hasattr(func, '__wrapped__'): return get_func_fq_name(func.__wrapped__)
- elif hasattr(func, '__class__'): name = func.__class__.__name__
- else: raise Exception(f"'{func}' is not a func or class")
- return f'{func.__module__}.{name}'
-
-def get_parent_func(lineno, lines, ignore_missing=False):
- "Find any lines where `elt` is called and return the parent test function"
- for idx,l in enumerate(reversed(lines[:lineno])):
- if re.match(f'\s*def test', l): return (lineno - idx), l # 1 based index for github
- if re.match(f'\w+', l): break # top level indent - out of function scope
- if ignore_missing: return None
- raise LookupError('Could not find parent function for line:', lineno, lines[:lineno])
-
-def relative_test_path(test_file:Path)->str:
- "Path relative to the `fastai` parent directory"
- test_file = Path(test_file)
- testdir_idx = list(reversed(test_file.parts)).index('tests')
- return '/'.join(test_file.parts[-(testdir_idx+1):])
-
-def get_lines(file):
- with open(file, 'r') as f: return f.readlines()
diff --git a/spaces/ali-ghamdan/gfp-Gans/gfpgan/weights/README.md b/spaces/ali-ghamdan/gfp-Gans/gfpgan/weights/README.md
deleted file mode 100644
index 4d7b7e642591ef88575d9e6c360a4d29e0cc1a4f..0000000000000000000000000000000000000000
--- a/spaces/ali-ghamdan/gfp-Gans/gfpgan/weights/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Weights
-
-Put the downloaded weights to this folder.
diff --git a/spaces/ali-ghamdan/realesrgan-models/docs/anime_model.md b/spaces/ali-ghamdan/realesrgan-models/docs/anime_model.md
deleted file mode 100644
index 244cd92e3b141ca162140f226885c7ca795b10fd..0000000000000000000000000000000000000000
--- a/spaces/ali-ghamdan/realesrgan-models/docs/anime_model.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Anime Model
-
-:white_check_mark: We add [*RealESRGAN_x4plus_anime_6B.pth*](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth), which is optimized for **anime** images with much smaller model size.
-
-- [How to Use](#how-to-use)
- - [PyTorch Inference](#pytorch-inference)
- - [ncnn Executable File](#ncnn-executable-file)
-- [Comparisons with waifu2x](#comparisons-with-waifu2x)
-- [Comparisons with Sliding Bars](#comparisons-with-sliding-bars)
-
-
-
-
-
-The following is a video comparison with sliding bar. You may need to use the full-screen mode for better visual quality, as the original image is large; otherwise, you may encounter aliasing issue.
-
-
-
-## How to Use
-
-### PyTorch Inference
-
-Pre-trained models: [RealESRGAN_x4plus_anime_6B](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth)
-
-```bash
-# download model
-wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P experiments/pretrained_models
-# inference
-python inference_realesrgan.py -n RealESRGAN_x4plus_anime_6B -i inputs
-```
-
-### ncnn Executable File
-
-Download the latest portable [Windows](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-windows.zip) / [Linux](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-ubuntu.zip) / [MacOS](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-macos.zip) **executable files for Intel/AMD/Nvidia GPU**.
-
-Taking the Windows as example, run:
-
-```bash
-./realesrgan-ncnn-vulkan.exe -i input.jpg -o output.png -n realesrgan-x4plus-anime
-```
-
-## Comparisons with waifu2x
-
-We compare Real-ESRGAN-anime with [waifu2x](https://github.com/nihui/waifu2x-ncnn-vulkan). We use the `-n 2 -s 4` for waifu2x.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-## Comparisons with Sliding Bars
-
-The following are video comparisons with sliding bar. You may need to use the full-screen mode for better visual quality, as the original image is large; otherwise, you may encounter aliasing issue.
-
-
-
-
diff --git a/spaces/almakedon/faster-whisper-webui/cli.py b/spaces/almakedon/faster-whisper-webui/cli.py
deleted file mode 100644
index e0e21f2a6255db83bbc2c6e5ad08c56e85f7ac9b..0000000000000000000000000000000000000000
--- a/spaces/almakedon/faster-whisper-webui/cli.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import argparse
-import os
-import pathlib
-from urllib.parse import urlparse
-import warnings
-import numpy as np
-
-import torch
-from app import VadOptions, WhisperTranscriber
-from src.config import VAD_INITIAL_PROMPT_MODE_VALUES, ApplicationConfig, VadInitialPromptMode
-from src.download import download_url
-from src.languages import get_language_names
-
-from src.utils import optional_float, optional_int, str2bool
-from src.whisper.whisperFactory import create_whisper_container
-
-def cli():
- app_config = ApplicationConfig.create_default()
- whisper_models = app_config.get_model_names()
-
- # For the CLI, we fallback to saving the output to the current directory
- output_dir = app_config.output_dir if app_config.output_dir is not None else "."
-
- # Environment variable overrides
- default_whisper_implementation = os.environ.get("WHISPER_IMPLEMENTATION", app_config.whisper_implementation)
-
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument("audio", nargs="+", type=str, \
- help="audio file(s) to transcribe")
- parser.add_argument("--model", default=app_config.default_model_name, choices=whisper_models, \
- help="name of the Whisper model to use") # medium
- parser.add_argument("--model_dir", type=str, default=app_config.model_dir, \
- help="the path to save model files; uses ~/.cache/whisper by default")
- parser.add_argument("--device", default=app_config.device, \
- help="device to use for PyTorch inference")
- parser.add_argument("--output_dir", "-o", type=str, default=output_dir, \
- help="directory to save the outputs")
- parser.add_argument("--verbose", type=str2bool, default=app_config.verbose, \
- help="whether to print out the progress and debug messages")
- parser.add_argument("--whisper_implementation", type=str, default=default_whisper_implementation, choices=["whisper", "faster-whisper"],\
- help="the Whisper implementation to use")
-
- parser.add_argument("--task", type=str, default=app_config.task, choices=["transcribe", "translate"], \
- help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')")
- parser.add_argument("--language", type=str, default=app_config.language, choices=sorted(get_language_names()), \
- help="language spoken in the audio, specify None to perform language detection")
-
- parser.add_argument("--vad", type=str, default=app_config.default_vad, choices=["none", "silero-vad", "silero-vad-skip-gaps", "silero-vad-expand-into-gaps", "periodic-vad"], \
- help="The voice activity detection algorithm to use") # silero-vad
- parser.add_argument("--vad_initial_prompt_mode", type=str, default=app_config.vad_initial_prompt_mode, choices=VAD_INITIAL_PROMPT_MODE_VALUES, \
- help="Whether or not to prepend the initial prompt to each VAD segment (prepend_all_segments), or just the first segment (prepend_first_segment)") # prepend_first_segment
- parser.add_argument("--vad_merge_window", type=optional_float, default=app_config.vad_merge_window, \
- help="The window size (in seconds) to merge voice segments")
- parser.add_argument("--vad_max_merge_size", type=optional_float, default=app_config.vad_max_merge_size,\
- help="The maximum size (in seconds) of a voice segment")
- parser.add_argument("--vad_padding", type=optional_float, default=app_config.vad_padding, \
- help="The padding (in seconds) to add to each voice segment")
- parser.add_argument("--vad_prompt_window", type=optional_float, default=app_config.vad_prompt_window, \
- help="The window size of the prompt to pass to Whisper")
- parser.add_argument("--vad_cpu_cores", type=int, default=app_config.vad_cpu_cores, \
- help="The number of CPU cores to use for VAD pre-processing.") # 1
- parser.add_argument("--vad_parallel_devices", type=str, default=app_config.vad_parallel_devices, \
- help="A commma delimited list of CUDA devices to use for parallel processing. If None, disable parallel processing.") # ""
- parser.add_argument("--auto_parallel", type=bool, default=app_config.auto_parallel, \
- help="True to use all available GPUs and CPU cores for processing. Use vad_cpu_cores/vad_parallel_devices to specify the number of CPU cores/GPUs to use.") # False
-
- parser.add_argument("--temperature", type=float, default=app_config.temperature, \
- help="temperature to use for sampling")
- parser.add_argument("--best_of", type=optional_int, default=app_config.best_of, \
- help="number of candidates when sampling with non-zero temperature")
- parser.add_argument("--beam_size", type=optional_int, default=app_config.beam_size, \
- help="number of beams in beam search, only applicable when temperature is zero")
- parser.add_argument("--patience", type=float, default=app_config.patience, \
- help="optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search")
- parser.add_argument("--length_penalty", type=float, default=app_config.length_penalty, \
- help="optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple lengt normalization by default")
-
- parser.add_argument("--suppress_tokens", type=str, default=app_config.suppress_tokens, \
- help="comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations")
- parser.add_argument("--initial_prompt", type=str, default=app_config.initial_prompt, \
- help="optional text to provide as a prompt for the first window.")
- parser.add_argument("--condition_on_previous_text", type=str2bool, default=app_config.condition_on_previous_text, \
- help="if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop")
- parser.add_argument("--fp16", type=str2bool, default=app_config.fp16, \
- help="whether to perform inference in fp16; True by default")
- parser.add_argument("--compute_type", type=str, default=app_config.compute_type, choices=["default", "auto", "int8", "int8_float16", "int16", "float16", "float32"], \
- help="the compute type to use for inference")
-
- parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=app_config.temperature_increment_on_fallback, \
- help="temperature to increase when falling back when the decoding fails to meet either of the thresholds below")
- parser.add_argument("--compression_ratio_threshold", type=optional_float, default=app_config.compression_ratio_threshold, \
- help="if the gzip compression ratio is higher than this value, treat the decoding as failed")
- parser.add_argument("--logprob_threshold", type=optional_float, default=app_config.logprob_threshold, \
- help="if the average log probability is lower than this value, treat the decoding as failed")
- parser.add_argument("--no_speech_threshold", type=optional_float, default=app_config.no_speech_threshold, \
- help="if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence")
-
- parser.add_argument("--word_timestamps", type=str2bool, default=app_config.word_timestamps,
- help="(experimental) extract word-level timestamps and refine the results based on them")
- parser.add_argument("--prepend_punctuations", type=str, default=app_config.prepend_punctuations,
- help="if word_timestamps is True, merge these punctuation symbols with the next word")
- parser.add_argument("--append_punctuations", type=str, default=app_config.append_punctuations,
- help="if word_timestamps is True, merge these punctuation symbols with the previous word")
- parser.add_argument("--highlight_words", type=str2bool, default=app_config.highlight_words,
- help="(requires --word_timestamps True) underline each word as it is spoken in srt and vtt")
- parser.add_argument("--threads", type=optional_int, default=0,
- help="number of threads used by torch for CPU inference; supercedes MKL_NUM_THREADS/OMP_NUM_THREADS")
-
- args = parser.parse_args().__dict__
- model_name: str = args.pop("model")
- model_dir: str = args.pop("model_dir")
- output_dir: str = args.pop("output_dir")
- device: str = args.pop("device")
- os.makedirs(output_dir, exist_ok=True)
-
- if (threads := args.pop("threads")) > 0:
- torch.set_num_threads(threads)
-
- whisper_implementation = args.pop("whisper_implementation")
- print(f"Using {whisper_implementation} for Whisper")
-
- if model_name.endswith(".en") and args["language"] not in {"en", "English"}:
- warnings.warn(f"{model_name} is an English-only model but receipted '{args['language']}'; using English instead.")
- args["language"] = "en"
-
- temperature = args.pop("temperature")
- temperature_increment_on_fallback = args.pop("temperature_increment_on_fallback")
- if temperature_increment_on_fallback is not None:
- temperature = tuple(np.arange(temperature, 1.0 + 1e-6, temperature_increment_on_fallback))
- else:
- temperature = [temperature]
-
- vad = args.pop("vad")
- vad_initial_prompt_mode = args.pop("vad_initial_prompt_mode")
- vad_merge_window = args.pop("vad_merge_window")
- vad_max_merge_size = args.pop("vad_max_merge_size")
- vad_padding = args.pop("vad_padding")
- vad_prompt_window = args.pop("vad_prompt_window")
- vad_cpu_cores = args.pop("vad_cpu_cores")
- auto_parallel = args.pop("auto_parallel")
-
- compute_type = args.pop("compute_type")
- highlight_words = args.pop("highlight_words")
-
- transcriber = WhisperTranscriber(delete_uploaded_files=False, vad_cpu_cores=vad_cpu_cores, app_config=app_config)
- transcriber.set_parallel_devices(args.pop("vad_parallel_devices"))
- transcriber.set_auto_parallel(auto_parallel)
-
- model = create_whisper_container(whisper_implementation=whisper_implementation, model_name=model_name,
- device=device, compute_type=compute_type, download_root=model_dir, models=app_config.models)
-
- if (transcriber._has_parallel_devices()):
- print("Using parallel devices:", transcriber.parallel_device_list)
-
- for audio_path in args.pop("audio"):
- sources = []
-
- # Detect URL and download the audio
- if (uri_validator(audio_path)):
- # Download from YouTube/URL directly
- for source_path in download_url(audio_path, maxDuration=-1, destinationDirectory=output_dir, playlistItems=None):
- source_name = os.path.basename(source_path)
- sources.append({ "path": source_path, "name": source_name })
- else:
- sources.append({ "path": audio_path, "name": os.path.basename(audio_path) })
-
- for source in sources:
- source_path = source["path"]
- source_name = source["name"]
-
- vadOptions = VadOptions(vad, vad_merge_window, vad_max_merge_size, vad_padding, vad_prompt_window,
- VadInitialPromptMode.from_string(vad_initial_prompt_mode))
-
- result = transcriber.transcribe_file(model, source_path, temperature=temperature, vadOptions=vadOptions, **args)
-
- transcriber.write_result(result, source_name, output_dir, highlight_words)
-
- transcriber.close()
-
-def uri_validator(x):
- try:
- result = urlparse(x)
- return all([result.scheme, result.netloc])
- except:
- return False
-
-if __name__ == '__main__':
- cli()
\ No newline at end of file
diff --git a/spaces/almino/WhisperYoutube/README.md b/spaces/almino/WhisperYoutube/README.md
deleted file mode 100644
index a6a214290df3b2f6ce5d06ad0679ae5653f3950f..0000000000000000000000000000000000000000
--- a/spaces/almino/WhisperYoutube/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: WhisperYoutube
-emoji: 🐨
-colorFrom: gray
-colorTo: green
-sdk: gradio
-sdk_version: 3.12.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/amankishore/sjc/sd1/ldm/modules/encoders/modules.py b/spaces/amankishore/sjc/sd1/ldm/modules/encoders/modules.py
deleted file mode 100644
index 6a684e0efdaff06fff7c18bd2d733e4ad19ba03f..0000000000000000000000000000000000000000
--- a/spaces/amankishore/sjc/sd1/ldm/modules/encoders/modules.py
+++ /dev/null
@@ -1,406 +0,0 @@
-import torch
-import torch.nn as nn
-from functools import partial
-import clip
-from einops import rearrange, repeat
-from transformers import CLIPTokenizer, CLIPTextModel
-import kornia
-
-from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
-
-def _expand_mask(mask, dtype, tgt_len = None):
- """
- Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
- """
- bsz, src_len = mask.size()
- tgt_len = tgt_len if tgt_len is not None else src_len
-
- expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
-
- inverted_mask = 1.0 - expanded_mask
-
- return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
-
-def _build_causal_attention_mask(bsz, seq_len, dtype):
- # lazily create causal attention mask, with full attention between the vision tokens
- # pytorch uses additive attention mask; fill with -inf
- mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype)
- mask.fill_(torch.tensor(torch.finfo(dtype).min))
- mask.triu_(1) # zero out the lower diagonal
- mask = mask.unsqueeze(1) # expand mask
- return mask
-
-class AbstractEncoder(nn.Module):
- def __init__(self):
- super().__init__()
-
- def encode(self, *args, **kwargs):
- raise NotImplementedError
-
-
-
-class ClassEmbedder(nn.Module):
- def __init__(self, embed_dim, n_classes=1000, key='class'):
- super().__init__()
- self.key = key
- self.embedding = nn.Embedding(n_classes, embed_dim)
-
- def forward(self, batch, key=None):
- if key is None:
- key = self.key
- # this is for use in crossattn
- c = batch[key][:, None]
- c = self.embedding(c)
- return c
-
-
-class TransformerEmbedder(AbstractEncoder):
- """Some transformer encoder layers"""
- def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
- super().__init__()
- self.device = device
- self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
- attn_layers=Encoder(dim=n_embed, depth=n_layer))
-
- def forward(self, tokens):
- tokens = tokens.to(self.device) # meh
- z = self.transformer(tokens, return_embeddings=True)
- return z
-
- def encode(self, x):
- return self(x)
-
-
-class BERTTokenizer(AbstractEncoder):
- """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)"""
- def __init__(self, device="cuda", vq_interface=True, max_length=77):
- super().__init__()
- from transformers import BertTokenizerFast # TODO: add to reuquirements
- self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
- self.device = device
- self.vq_interface = vq_interface
- self.max_length = max_length
-
- def forward(self, text):
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
- tokens = batch_encoding["input_ids"].to(self.device)
- return tokens
-
- @torch.no_grad()
- def encode(self, text):
- tokens = self(text)
- if not self.vq_interface:
- return tokens
- return None, None, [None, None, tokens]
-
- def decode(self, text):
- return text
-
-
-class BERTEmbedder(AbstractEncoder):
- """Uses the BERT tokenizr model and add some transformer encoder layers"""
- def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77,
- device="cuda",use_tokenizer=True, embedding_dropout=0.0):
- super().__init__()
- self.use_tknz_fn = use_tokenizer
- if self.use_tknz_fn:
- self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len)
- self.device = device
- self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
- attn_layers=Encoder(dim=n_embed, depth=n_layer),
- emb_dropout=embedding_dropout)
-
- def forward(self, text, embedding_manager=None):
- if self.use_tknz_fn:
- tokens = self.tknz_fn(text)#.to(self.device)
- else:
- tokens = text
- z = self.transformer(tokens, return_embeddings=True, embedding_manager=embedding_manager)
- return z
-
- def encode(self, text, **kwargs):
- # output of length 77
- return self(text, **kwargs)
-
-class SpatialRescaler(nn.Module):
- def __init__(self,
- n_stages=1,
- method='bilinear',
- multiplier=0.5,
- in_channels=3,
- out_channels=None,
- bias=False):
- super().__init__()
- self.n_stages = n_stages
- assert self.n_stages >= 0
- assert method in ['nearest','linear','bilinear','trilinear','bicubic','area']
- self.multiplier = multiplier
- self.interpolator = partial(torch.nn.functional.interpolate, mode=method)
- self.remap_output = out_channels is not None
- if self.remap_output:
- print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.')
- self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias)
-
- def forward(self,x):
- for stage in range(self.n_stages):
- x = self.interpolator(x, scale_factor=self.multiplier)
-
-
- if self.remap_output:
- x = self.channel_mapper(x)
- return x
-
- def encode(self, x):
- return self(x)
-
-class FrozenCLIPEmbedder(AbstractEncoder):
- """Uses the CLIP transformer encoder for text (from Hugging Face)"""
- def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77):
- super().__init__()
- self.tokenizer = CLIPTokenizer.from_pretrained(version)
- self.transformer = CLIPTextModel.from_pretrained(version)
- self.device = device
- self.max_length = max_length
- self.freeze()
-
- def embedding_forward(
- self,
- input_ids = None,
- position_ids = None,
- inputs_embeds = None,
- embedding_manager = None,
- ) -> torch.Tensor:
-
- seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
-
- if position_ids is None:
- position_ids = self.position_ids[:, :seq_length]
-
- if inputs_embeds is None:
- inputs_embeds = self.token_embedding(input_ids)
-
- if embedding_manager is not None:
- inputs_embeds = embedding_manager(input_ids, inputs_embeds)
-
-
- position_embeddings = self.position_embedding(position_ids)
- embeddings = inputs_embeds + position_embeddings
-
- return embeddings
-
- self.transformer.text_model.embeddings.forward = embedding_forward.__get__(self.transformer.text_model.embeddings)
-
- def encoder_forward(
- self,
- inputs_embeds,
- attention_mask = None,
- causal_attention_mask = None,
- output_attentions = None,
- output_hidden_states = None,
- return_dict = None,
- ):
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- encoder_states = () if output_hidden_states else None
- all_attentions = () if output_attentions else None
-
- hidden_states = inputs_embeds
- for idx, encoder_layer in enumerate(self.layers):
- if output_hidden_states:
- encoder_states = encoder_states + (hidden_states,)
-
- layer_outputs = encoder_layer(
- hidden_states,
- attention_mask,
- causal_attention_mask,
- output_attentions=output_attentions,
- )
-
- hidden_states = layer_outputs[0]
-
- if output_attentions:
- all_attentions = all_attentions + (layer_outputs[1],)
-
- if output_hidden_states:
- encoder_states = encoder_states + (hidden_states,)
-
- return hidden_states
-
- self.transformer.text_model.encoder.forward = encoder_forward.__get__(self.transformer.text_model.encoder)
-
-
- def text_encoder_forward(
- self,
- input_ids = None,
- attention_mask = None,
- position_ids = None,
- output_attentions = None,
- output_hidden_states = None,
- return_dict = None,
- embedding_manager = None,
- ):
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if input_ids is None:
- raise ValueError("You have to specify either input_ids")
-
- input_shape = input_ids.size()
- input_ids = input_ids.view(-1, input_shape[-1])
-
- hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids, embedding_manager=embedding_manager)
-
- bsz, seq_len = input_shape
- # CLIP's text model uses causal mask, prepare it here.
- # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
- causal_attention_mask = _build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to(
- hidden_states.device
- )
-
- # expand attention_mask
- if attention_mask is not None:
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
- attention_mask = _expand_mask(attention_mask, hidden_states.dtype)
-
- last_hidden_state = self.encoder(
- inputs_embeds=hidden_states,
- attention_mask=attention_mask,
- causal_attention_mask=causal_attention_mask,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- last_hidden_state = self.final_layer_norm(last_hidden_state)
-
- # pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0]), input_ids.argmax(dim=-1)]
-
- return last_hidden_state
-
- self.transformer.text_model.forward = text_encoder_forward.__get__(self.transformer.text_model)
-
- def transformer_forward(
- self,
- input_ids = None,
- attention_mask = None,
- position_ids = None,
- output_attentions = None,
- output_hidden_states = None,
- return_dict = None,
- embedding_manager = None,
- ):
- return self.text_model(
- input_ids=input_ids,
- attention_mask=attention_mask,
- position_ids=position_ids,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- embedding_manager = embedding_manager
- )
-
- self.transformer.forward = transformer_forward.__get__(self.transformer)
-
-
- def freeze(self):
- self.transformer = self.transformer.eval()
- # self.vit = self.vit.eval()
- for param in self.parameters():
- param.requires_grad = False
-
-
-
- def forward(self, text, **kwargs):
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
- tokens = batch_encoding["input_ids"].to(self.device)
- z = self.transformer(input_ids=tokens, **kwargs)
- # from pdb import set_trace
- # set_trace()
- if kwargs.get('return_pooled', False):
- return z, z[torch.arange(z.shape[0]), tokens.argmax(dim=-1)]
- return z
-
- def encode(self, text, **kwargs):
- return self(text, **kwargs)
-
-
-
-class FrozenCLIPTextEmbedder(nn.Module):
- """
- Uses the CLIP transformer encoder for text.
- """
- def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True):
- super().__init__()
- self.model, _ = clip.load(version, jit=False, device="cpu")
- self.device = device
- self.max_length = max_length
- self.n_repeat = n_repeat
- self.normalize = normalize
-
- def freeze(self):
- self.model = self.model.eval()
- for param in self.parameters():
- param.requires_grad = False
-
- def forward(self, text):
- tokens = clip.tokenize(text).to(self.device)
- z = self.model.encode_text(tokens)
- if self.normalize:
- z = z / torch.linalg.norm(z, dim=1, keepdim=True)
- return z
-
- def encode(self, text):
- z = self(text)
- if z.ndim==2:
- z = z[:, None, :]
- z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat)
- return z
-
-
-class FrozenClipImageEmbedder(nn.Module):
- """
- Uses the CLIP image encoder.
- """
- def __init__(
- self,
- model,
- jit=False,
- device='cuda' if torch.cuda.is_available() else 'cpu',
- antialias=False,
- ):
- super().__init__()
- self.model, _ = clip.load(name=model, device=device, jit=jit)
-
- self.antialias = antialias
-
- self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)
- self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)
-
- def preprocess(self, x):
- # normalize to [0,1]
- x = kornia.geometry.resize(x, (224, 224),
- interpolation='bicubic',align_corners=True,
- antialias=self.antialias)
- x = (x + 1.) / 2.
- # renormalize according to clip
- x = kornia.enhance.normalize(x, self.mean, self.std)
- return x
-
- def forward(self, x):
- # x is assumed to be in range [-1,1]
- return self.model.encode_image(self.preprocess(x))
-
-
-if __name__ == "__main__":
- from ldm.util import count_params
- model = FrozenCLIPEmbedder()
- count_params(model, verbose=True)
\ No newline at end of file
diff --git a/spaces/andersab/QuijoBERT/README.md b/spaces/andersab/QuijoBERT/README.md
deleted file mode 100644
index 155b4b67aeefa817aa84900b4eec5c4899f58a84..0000000000000000000000000000000000000000
--- a/spaces/andersab/QuijoBERT/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-title: QuijoBERT
-emoji: 🏢
-colorFrom: green
-colorTo: green
-sdk: gradio
-sdk_version: 2.9.4
-app_file: app.py
-pinned: false
-license: gpl-3.0
----
-
-Using El Quijote in Spanish language a fill masked model.
-
-The tokenizer has been filled with the original text of boths volumes and has been trained for 1000 epochs.
-
-Enjoy (the book and) the model
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/andreasmartin/faq/app.py b/spaces/andreasmartin/faq/app.py
deleted file mode 100644
index b4c76ef65d74fe54e2e4a64feb1ea66d1f53e03c..0000000000000000000000000000000000000000
--- a/spaces/andreasmartin/faq/app.py
+++ /dev/null
@@ -1,89 +0,0 @@
-from fastapi import FastAPI
-from pydantic import BaseModel
-import faq as faq
-import util as util
-import uvicorn
-import gradio as gr
-from typing import List, Optional
-from fastapi.responses import JSONResponse
-
-app = FastAPI()
-
-
-class Request(BaseModel):
- question: Optional[str] = "?"
- sheet_url: str
- page_content_column: str
- k: Optional[int] = 20
- reload_collection: Optional[bool] = False
- id_column: Optional[str] = None
- synonyms: Optional[List[List[str]]] = None
-
-
-@app.post("/api")
-async def post_api(request: Request) -> JSONResponse:
- if request.id_column is not None:
- util.SPLIT_PAGE_BREAKS = True
- if request.synonyms is not None:
- util.SYNONYMS = request.synonyms
- vectordb = faq.load_vectordb(request.sheet_url, request.page_content_column)
- if request.reload_collection:
- faq.delete_vectordb_current_collection(vectordb)
- vectordb = faq.load_vectordb(request.sheet_url, request.page_content_column)
- documents = faq.similarity_search(vectordb, request.question, k=request.k)
- df_doc = util.transform_documents_to_dataframe(documents)
- if request.id_column is not None:
- df_doc = util.remove_duplicates_by_column(df_doc, request.id_column)
- return JSONResponse(util.dataframe_to_dict(df_doc))
-
-
-@app.put("/api")
-async def put_api(request: Request) -> bool:
- success = False
- if request.id_column is not None:
- util.SPLIT_PAGE_BREAKS = True
- if request.synonyms is not None:
- util.SYNONYMS = request.synonyms
- vectordb = faq.load_vectordb(request.sheet_url, request.page_content_column)
- if request.reload_collection:
- faq.delete_vectordb_current_collection(vectordb)
- vectordb = faq.load_vectordb(request.sheet_url, request.page_content_column)
- success = True
- return success
-
-
-@app.delete("/api")
-async def delete_vectordb_api() -> None:
- faq.delete_vectordb()
-
-
-def ask(sheet_url: str, page_content_column: str, k: int, reload_collection: bool, question: str):
- util.SPLIT_PAGE_BREAKS = False
- vectordb = faq.load_vectordb(sheet_url, page_content_column)
- if reload_collection:
- faq.delete_vectordb_current_collection(vectordb)
- vectordb = faq.load_vectordb(sheet_url, page_content_column)
- documents = faq.similarity_search(vectordb, question, k=k)
- df_doc = util.transform_documents_to_dataframe(documents)
- return util.dataframe_to_dict(df_doc), gr.Checkbox.update(False)
-
-
-with gr.Blocks() as block:
- sheet_url = gr.Textbox(label="Google Sheet URL")
- page_content_column = gr.Textbox(label="Question Column")
- k = gr.Slider(1, 30, step=1, label="K")
- reload_collection = gr.Checkbox(label="Reload Collection?")
- question = gr.Textbox(label="Question")
- ask_button = gr.Button("Ask")
- answer_output = gr.JSON(label="Answer")
- ask_button.click(
- ask,
- inputs=[sheet_url, page_content_column, k, reload_collection, question],
- outputs=[answer_output, reload_collection]
- )
-
-app = gr.mount_gradio_app(app, block, path="/")
-
-
-if __name__ == "__main__":
- uvicorn.run(app, host="0.0.0.0", port=7860)
diff --git a/spaces/artificialguybr/video-dubbing/TTS/docs/source/faq.md b/spaces/artificialguybr/video-dubbing/TTS/docs/source/faq.md
deleted file mode 100644
index fa48c4a9fbbaf1c77d847e4289645dceaf5aba91..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/TTS/docs/source/faq.md
+++ /dev/null
@@ -1,113 +0,0 @@
-# Humble FAQ
-We tried to collect common issues and questions we receive about 🐸TTS. It is worth checking before going deeper.
-
-## Errors with a pre-trained model. How can I resolve this?
-- Make sure you use the right commit version of 🐸TTS. Each pre-trained model has its corresponding version that needs to be used. It is defined on the model table.
-- If it is still problematic, post your problem on [Discussions](https://github.com/coqui-ai/TTS/discussions). Please give as many details as possible (error message, your TTS version, your TTS model and config.json etc.)
-- If you feel like it's a bug to be fixed, then prefer Github issues with the same level of scrutiny.
-
-## What are the requirements of a good 🐸TTS dataset?
-* {ref}`See this page `
-
-## How should I choose the right model?
-- First, train Tacotron. It is smaller and faster to experiment with. If it performs poorly, try Tacotron2.
-- Tacotron models produce the most natural voice if your dataset is not too noisy.
-- If both models do not perform well and especially the attention does not align, then try AlignTTS or GlowTTS.
-- If you need faster models, consider SpeedySpeech, GlowTTS or AlignTTS. Keep in mind that SpeedySpeech requires a pre-trained Tacotron or Tacotron2 model to compute text-to-speech alignments.
-
-## How can I train my own `tts` model?
-0. Check your dataset with notebooks in [dataset_analysis](https://github.com/coqui-ai/TTS/tree/master/notebooks/dataset_analysis) folder. Use [this notebook](https://github.com/coqui-ai/TTS/blob/master/notebooks/dataset_analysis/CheckSpectrograms.ipynb) to find the right audio processing parameters. A better set of parameters results in a better audio synthesis.
-
-1. Write your own dataset `formatter` in `datasets/formatters.py` or format your dataset as one of the supported datasets, like LJSpeech.
- A `formatter` parses the metadata file and converts a list of training samples.
-
-2. If you have a dataset with a different alphabet than English, you need to set your own character list in the ```config.json```.
- - If you use phonemes for training and your language is supported [here](https://github.com/rhasspy/gruut#supported-languages), you don't need to set your character list.
- - You can use `TTS/bin/find_unique_chars.py` to get characters used in your dataset.
-
-3. Write your own text cleaner in ```utils.text.cleaners```. It is not always necessary, except when you have a different alphabet or language-specific requirements.
- - A `cleaner` performs number and abbreviation expansion and text normalization. Basically, it converts the written text to its spoken format.
- - If you go lazy, you can try using ```basic_cleaners```.
-
-4. Fill in a ```config.json```. Go over each parameter one by one and consider it regarding the appended explanation.
- - Check the `Coqpit` class created for your target model. Coqpit classes for `tts` models are under `TTS/tts/configs/`.
- - You just need to define fields you need/want to change in your `config.json`. For the rest, their default values are used.
- - 'sample_rate', 'phoneme_language' (if phoneme enabled), 'output_path', 'datasets', 'text_cleaner' are the fields you need to edit in most of the cases.
- - Here is a sample `config.json` for training a `GlowTTS` network.
- ```json
- {
- "model": "glow_tts",
- "batch_size": 32,
- "eval_batch_size": 16,
- "num_loader_workers": 4,
- "num_eval_loader_workers": 4,
- "run_eval": true,
- "test_delay_epochs": -1,
- "epochs": 1000,
- "text_cleaner": "english_cleaners",
- "use_phonemes": false,
- "phoneme_language": "en-us",
- "phoneme_cache_path": "phoneme_cache",
- "print_step": 25,
- "print_eval": true,
- "mixed_precision": false,
- "output_path": "recipes/ljspeech/glow_tts/",
- "test_sentences": ["Test this sentence.", "This test sentence.", "Sentence this test."],
- "datasets":[{"formatter": "ljspeech", "meta_file_train":"metadata.csv", "path": "recipes/ljspeech/LJSpeech-1.1/"}]
- }
- ```
-
-6. Train your model.
- - SingleGPU training: ```CUDA_VISIBLE_DEVICES="0" python train_tts.py --config_path config.json```
- - MultiGPU training: ```python3 -m trainer.distribute --gpus "0,1" --script TTS/bin/train_tts.py --config_path config.json```
-
-**Note:** You can also train your model using pure 🐍 python. Check ```{eval-rst} :ref: 'tutorial_for_nervous_beginners'```.
-
-## How can I train in a different language?
-- Check steps 2, 3, 4, 5 above.
-
-## How can I train multi-GPUs?
-- Check step 5 above.
-
-## How can I check model performance?
-- You can inspect model training and performance using ```tensorboard```. It will show you loss, attention alignment, model output. Go with the order below to measure the model performance.
-1. Check ground truth spectrograms. If they do not look as they are supposed to, then check audio processing parameters in ```config.json```.
-2. Check train and eval losses and make sure that they all decrease smoothly in time.
-3. Check model spectrograms. Especially, training outputs should look similar to ground truth spectrograms after ~10K iterations.
-4. Your model would not work well at test time until the attention has a near diagonal alignment. This is the sublime art of TTS training.
- - Attention should converge diagonally after ~50K iterations.
- - If attention does not converge, the probabilities are;
- - Your dataset is too noisy or small.
- - Samples are too long.
- - Batch size is too small (batch_size < 32 would be having a hard time converging)
- - You can also try other attention algorithms like 'graves', 'bidirectional_decoder', 'forward_attn'.
- - 'bidirectional_decoder' is your ultimate savior, but it trains 2x slower and demands 1.5x more GPU memory.
- - You can also try the other models like AlignTTS or GlowTTS.
-
-## How do I know when to stop training?
-There is no single objective metric to decide the end of a training since the voice quality is a subjective matter.
-
-In our model trainings, we follow these steps;
-
-- Check test time audio outputs, if it does not improve more.
-- Check test time attention maps, if they look clear and diagonal.
-- Check validation loss, if it converged and smoothly went down or started to overfit going up.
-- If the answer is YES for all of the above, then test the model with a set of complex sentences. For English, you can use the `TestAttention` notebook.
-
-Keep in mind that the approach above only validates the model robustness. It is hard to estimate the voice quality without asking the actual people.
-The best approach is to pick a set of promising models and run a Mean-Opinion-Score study asking actual people to score the models.
-
-## My model does not learn. How can I debug?
-- Go over the steps under "How can I check model performance?"
-
-## Attention does not align. How can I make it work?
-- Check the 4th step under "How can I check model performance?"
-
-## How can I test a trained model?
-- The best way is to use `tts` or `tts-server` commands. For details check {ref}`here `.
-- If you need to code your own ```TTS.utils.synthesizer.Synthesizer``` class.
-
-## My Tacotron model does not stop - I see "Decoder stopped with 'max_decoder_steps" - Stopnet does not work.
-- In general, all of the above relates to the `stopnet`. It is the part of the model telling the `decoder` when to stop.
-- In general, a poor `stopnet` relates to something else that is broken in your model or dataset. Especially the attention module.
-- One common reason is the silent parts in the audio clips at the beginning and the ending. Check ```trim_db``` value in the config. You can find a better value for your dataset by using ```CheckSpectrogram``` notebook. If this value is too small, too much of the audio will be trimmed. If too big, then too much silence will remain. Both will curtail the `stopnet` performance.
diff --git a/spaces/awacke1/Art-Generator-and-Style-Mixer/examples/Readme.md b/spaces/awacke1/Art-Generator-and-Style-Mixer/examples/Readme.md
deleted file mode 100644
index fcc7658c86cfe6cc2a866a14df23b255c0723c39..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Art-Generator-and-Style-Mixer/examples/Readme.md
+++ /dev/null
@@ -1 +0,0 @@
-Add Examples images
\ No newline at end of file
diff --git a/spaces/awacke1/RLHF.Evals.Intake.Upvote.Downvote/app.py b/spaces/awacke1/RLHF.Evals.Intake.Upvote.Downvote/app.py
deleted file mode 100644
index 0f1b348df59042cc78f1e737452803efc48fb253..0000000000000000000000000000000000000000
--- a/spaces/awacke1/RLHF.Evals.Intake.Upvote.Downvote/app.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import streamlit as st
-import pandas as pd
-from py_thesaurus import Thesaurus
-import random
-import os.path
-
-def generate_sentence():
- words = ["apple", "banana", "grape", "orange", "watermelon", "pineapple", "cherry", "strawberry", "blueberry", "mango"]
- random_words = random.sample(words, 3)
- question = f"What did the {random_words[0]} say to the {random_words[1]}?"
- answer = f"The {random_words[0]} said, 'Let's hang out with the {random_words[2]}!'"
- context = f"In the context of a fruit gathering, the {random_words[0]}, {random_words[1]}, and {random_words[2]} were having fun."
- return f"{question} {answer} {context}"
-
-def replace_with_synonym(sentence):
- words = sentence.split()
- index = random.randint(0, len(words) - 1)
- word = words[index]
- synonyms = Thesaurus(word).get_synonym()
- if synonyms:
- replacement = random.choice(synonyms)
- words[index] = replacement
- return ' '.join(words)
-
-def load_or_create_scoreboard(filename):
- if os.path.isfile(filename):
- return pd.read_csv(filename)
- else:
- scoreboard = pd.DataFrame({'Upvotes': [0], 'Downvotes': [0]})
- scoreboard.to_csv(filename, index=False)
- return scoreboard
-
-def update_scoreboard(scoreboard, thumbs_up, thumbs_down):
- if thumbs_up:
- scoreboard.loc[0, 'Upvotes'] += 1
- elif thumbs_down:
- scoreboard.loc[0, 'Downvotes'] += 1
- return scoreboard
-
-def main():
- filename = 'output.csv'
- scoreboard = load_or_create_scoreboard(filename)
- st.title('Joke Parts Voting Game')
- thumbs_up = st.button('👍')
- thumbs_down = st.button('👎')
- scoreboard = update_scoreboard(scoreboard, thumbs_up, thumbs_down)
- scoreboard.to_csv(filename, index=False)
- col1, col2 = st.columns(2)
- with col1:
- st.write(f'👍 {scoreboard.loc[0, "Upvotes"]}')
- with col2:
- st.write(f'👎 {scoreboard.loc[0, "Downvotes"]}')
- original_text = generate_sentence()
- modified_text = replace_with_synonym(original_text)
- st.write(f'🤣 {modified_text}')
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/awacke1/SMART-FHIR-Assessment-BMI/app.py b/spaces/awacke1/SMART-FHIR-Assessment-BMI/app.py
deleted file mode 100644
index f4a872b7141677f8b59573aa144323a9fc6ffcd7..0000000000000000000000000000000000000000
--- a/spaces/awacke1/SMART-FHIR-Assessment-BMI/app.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import streamlit as st
-import pandas as pd
-from fhirclient import client
-from fhir.resources import (patient as fhirpatient,
- observation as fhirobservation)
-
-#import settings
-
-#pip install fhir.resources fhirclient
-
-#from fhirclient.models import (client as fhirclient,
-# bundle as fhirbundle,
-# patient as fhirpatient)
-
-settings = {
- 'app_id': 'my_app',
- 'api_base': 'https://hapi.fhir.org/baseR4',
- 'redirect_uri': 'http://localhost:8000/callback',
- 'scope': 'launch/patient openid fhirUser',
- 'client_secret': 'my_app_secret'
-}
-
-
-def bmi_calculator(height, weight):
- bmi = weight / ((height/100)**2)
- return bmi
-
-def get_patient_data(client):
- patient = fhirpatient.Patient.read('self', client.server)
-
- # Get the patient's weight and height observations
- weight_obs = client.server.request(fhirobservation.Observation
- .where({'subject': f'Patient/{patient.id}',
- 'code': '29463-7'}))
- height_obs = client.server.request(fhirobservation.Observation
- .where({'subject': f'Patient/{patient.id}',
- 'code': '8302-2'}))
-
- # Get the latest weight and height values
- weight = float(weight_obs.entry[-1].resource.valueQuantity.value)
- height = float(height_obs.entry[-1].resource.valueQuantity.value)
-
- return height, weight
-
-# smart = client.FHIRClient(settings=settings.settings)
-
-st.title("BMI Calculator")
-
-#if smart.ready:
-if (True==True):
- st.write("SMART on FHIR connection successful!")
- st.write("Loading patient data...")
- #height, weight = get_patient_data(smart)
- #st.write("Patient height:", height, "cm")
- #st.write("Patient weight:", weight, "kg")
- #st.write("Calculating BMI...")
- #bmi = bmi_calculator(height, weight)
- #st.write("Your BMI is:", round(bmi, 2))
-
- #if bmi < 18.5:
- # st.write("You are underweight.")
- #elif bmi >= 18.5 and bmi < 25:
- # st.write("You have a healthy weight.")
- #elif bmi >= 25 and bmi < 30:
- # st.write("You are overweight.")
- #else:
- # st.write("You are obese.")
-else:
- st.write("SMART on FHIR connection failed. Please check your app settings.")
-
-
-st.markdown("""
-There are several public SMART on FHIR test servers available that allow for public access:
-
-## HAPI FHIR Public Test Server:
-This is a free, public test server that allows users to test their FHIR apps.
-The server is available at the following
-URL:
-https://hapi.fhir.org/baseR4
-
-""")
\ No newline at end of file
diff --git a/spaces/banana-projects/web3d/node_modules/@tweenjs/tween.js/src/TweenThree.js b/spaces/banana-projects/web3d/node_modules/@tweenjs/tween.js/src/TweenThree.js
deleted file mode 100644
index cd940843fef1e2f939b383ddd2de2abcfd32cbca..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/@tweenjs/tween.js/src/TweenThree.js
+++ /dev/null
@@ -1,24 +0,0 @@
-TWEEN.THREE = {};
-
-TWEEN.THREE.DirectRotation = function(object3d) {
- this._object3d = object3d;
-
- this._originalRotation = object3d.
-
- this._object = {progress: 0};
- this._object.position;
-};
-
-TWEEN.THREE.DirectRotation.prototype = new TWEEN.Tween();
-
-TWEEN.THREE.DirectRotation.prototype._onUpdateCallback = function(object) {
- this._object3d // TODO
-
- this._other_onUpdateCallback(object);
-};
-
-TWEEN.THREE.DirectRotation.prototype.onUpdate = function(onUpdateCallback) {
- this._other_onUpdateCallback = onUpdateCallback;
-
- return this;
-};
\ No newline at end of file
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/KMZLoader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/KMZLoader.js
deleted file mode 100644
index 22e171fc6d4d5aa148adcf1d7f6dca16297f2fde..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/KMZLoader.js
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * @author mrdoob / http://mrdoob.com/
- */
-
-THREE.KMZLoader = function ( manager ) {
-
- this.manager = ( manager !== undefined ) ? manager : THREE.DefaultLoadingManager;
-
-};
-
-THREE.KMZLoader.prototype = {
-
- constructor: THREE.KMZLoader,
-
- load: function ( url, onLoad, onProgress, onError ) {
-
- var scope = this;
-
- var loader = new THREE.FileLoader( scope.manager );
- loader.setPath( scope.path );
- loader.setResponseType( 'arraybuffer' );
- loader.load( url, function ( text ) {
-
- onLoad( scope.parse( text ) );
-
- }, onProgress, onError );
-
- },
-
- setPath: function ( value ) {
-
- this.path = value;
- return this;
-
- },
-
- parse: function ( data ) {
-
- function findFile( url ) {
-
- for ( var path in zip.files ) {
-
- if ( path.substr( - url.length ) === url ) {
-
- return zip.files[ path ];
-
- }
-
- }
-
- }
-
- var manager = new THREE.LoadingManager();
- manager.setURLModifier( function ( url ) {
-
- var image = findFile( url );
-
- if ( image ) {
-
- console.log( 'Loading', url );
-
- var blob = new Blob( [ image.asArrayBuffer() ], { type: 'application/octet-stream' } );
- return URL.createObjectURL( blob );
-
- }
-
- return url;
-
- } );
-
- //
-
- var zip = new JSZip( data ); // eslint-disable-line no-undef
-
- if ( zip.files[ 'doc.kml' ] ) {
-
- var xml = new DOMParser().parseFromString( zip.files[ 'doc.kml' ].asText(), 'application/xml' );
-
- var model = xml.querySelector( 'Placemark Model Link href' );
-
- if ( model ) {
-
- var loader = new THREE.ColladaLoader( manager );
- return loader.parse( zip.files[ model.textContent ].asText() );
-
- }
-
- } else {
-
- console.warn( 'KMZLoader: Missing doc.kml file.' );
-
- for ( var path in zip.files ) {
-
- var extension = path.split( '.' ).pop().toLowerCase();
-
- if ( extension === 'dae' ) {
-
- var loader = new THREE.ColladaLoader( manager );
- return loader.parse( zip.files[ path ].asText() );
-
- }
-
- }
-
- }
-
- console.error( 'KMZLoader: Couldn\'t find .dae file.' );
- return { scene: new THREE.Group() };
-
- }
-
-};
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/core/NodeFrame.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/core/NodeFrame.js
deleted file mode 100644
index c0e0433d51a5cd68a30e5c93de8961c0303bd73d..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/core/NodeFrame.js
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * @author sunag / http://www.sunag.com.br/
- */
-
-function NodeFrame( time ) {
-
- this.time = time !== undefined ? time : 0;
-
- this.id = 0;
-
-}
-
-NodeFrame.prototype = {
-
- constructor: NodeFrame,
-
- update: function ( delta ) {
-
- ++ this.id;
-
- this.time += delta;
- this.delta = delta;
-
- return this;
-
- },
-
- setRenderer: function ( renderer ) {
-
- this.renderer = renderer;
-
- return this;
-
- },
-
- setRenderTexture: function ( renderTexture ) {
-
- this.renderTexture = renderTexture;
-
- return this;
-
- },
-
- updateNode: function ( node ) {
-
- if ( node.frameId === this.id ) return this;
-
- node.updateFrame( this );
-
- node.frameId = this.id;
-
- return this;
-
- }
-
-};
-
-export { NodeFrame };
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/NormalMapShader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/NormalMapShader.js
deleted file mode 100644
index 957e818f509f4f373ed0f646236b2ccd5eb04b8f..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/NormalMapShader.js
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * @author alteredq / http://alteredqualia.com/
- *
- * Normal map shader
- * - compute normals from heightmap
- */
-
-THREE.NormalMapShader = {
-
- uniforms: {
-
- "heightMap": { value: null },
- "resolution": { value: new THREE.Vector2( 512, 512 ) },
- "scale": { value: new THREE.Vector2( 1, 1 ) },
- "height": { value: 0.05 }
-
- },
-
- vertexShader: [
-
- "varying vec2 vUv;",
-
- "void main() {",
-
- "vUv = uv;",
- "gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );",
-
- "}"
-
- ].join( "\n" ),
-
- fragmentShader: [
-
- "uniform float height;",
- "uniform vec2 resolution;",
- "uniform sampler2D heightMap;",
-
- "varying vec2 vUv;",
-
- "void main() {",
-
- "float val = texture2D( heightMap, vUv ).x;",
-
- "float valU = texture2D( heightMap, vUv + vec2( 1.0 / resolution.x, 0.0 ) ).x;",
- "float valV = texture2D( heightMap, vUv + vec2( 0.0, 1.0 / resolution.y ) ).x;",
-
- "gl_FragColor = vec4( ( 0.5 * normalize( vec3( val - valU, val - valV, height ) ) + 0.5 ), 1.0 );",
-
- "}"
-
- ].join( "\n" )
-
-};
diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/tests/test_gfpgan_arch.py b/spaces/beihai/GFPGAN-V1.3-whole-image/tests/test_gfpgan_arch.py
deleted file mode 100644
index cef14a435aa824a1b7c4baaf2d1fe0a2f6cc4441..0000000000000000000000000000000000000000
--- a/spaces/beihai/GFPGAN-V1.3-whole-image/tests/test_gfpgan_arch.py
+++ /dev/null
@@ -1,203 +0,0 @@
-import torch
-
-from gfpgan.archs.gfpganv1_arch import FacialComponentDiscriminator, GFPGANv1, StyleGAN2GeneratorSFT
-from gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean, StyleGAN2GeneratorCSFT
-
-
-def test_stylegan2generatorsft():
- """Test arch: StyleGAN2GeneratorSFT."""
-
- # model init and forward (gpu)
- if torch.cuda.is_available():
- net = StyleGAN2GeneratorSFT(
- out_size=32,
- num_style_feat=512,
- num_mlp=8,
- channel_multiplier=1,
- resample_kernel=(1, 3, 3, 1),
- lr_mlp=0.01,
- narrow=1,
- sft_half=False).cuda().eval()
- style = torch.rand((1, 512), dtype=torch.float32).cuda()
- condition1 = torch.rand((1, 512, 8, 8), dtype=torch.float32).cuda()
- condition2 = torch.rand((1, 512, 16, 16), dtype=torch.float32).cuda()
- condition3 = torch.rand((1, 512, 32, 32), dtype=torch.float32).cuda()
- conditions = [condition1, condition1, condition2, condition2, condition3, condition3]
- output = net([style], conditions)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
- # -------------------- with return_latents ----------------------- #
- output = net([style], conditions, return_latents=True)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 1
- # check latent
- assert output[1][0].shape == (8, 512)
-
- # -------------------- with randomize_noise = False ----------------------- #
- output = net([style], conditions, randomize_noise=False)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
- # -------------------- with truncation = 0.5 and mixing----------------------- #
- output = net([style, style], conditions, truncation=0.5, truncation_latent=style)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
-
-def test_gfpganv1():
- """Test arch: GFPGANv1."""
-
- # model init and forward (gpu)
- if torch.cuda.is_available():
- net = GFPGANv1(
- out_size=32,
- num_style_feat=512,
- channel_multiplier=1,
- resample_kernel=(1, 3, 3, 1),
- decoder_load_path=None,
- fix_decoder=True,
- # for stylegan decoder
- num_mlp=8,
- lr_mlp=0.01,
- input_is_latent=False,
- different_w=False,
- narrow=1,
- sft_half=True).cuda().eval()
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
- output = net(img)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 3
- # check out_rgbs for intermediate loss
- assert output[1][0].shape == (1, 3, 8, 8)
- assert output[1][1].shape == (1, 3, 16, 16)
- assert output[1][2].shape == (1, 3, 32, 32)
-
- # -------------------- with different_w = True ----------------------- #
- net = GFPGANv1(
- out_size=32,
- num_style_feat=512,
- channel_multiplier=1,
- resample_kernel=(1, 3, 3, 1),
- decoder_load_path=None,
- fix_decoder=True,
- # for stylegan decoder
- num_mlp=8,
- lr_mlp=0.01,
- input_is_latent=False,
- different_w=True,
- narrow=1,
- sft_half=True).cuda().eval()
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
- output = net(img)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 3
- # check out_rgbs for intermediate loss
- assert output[1][0].shape == (1, 3, 8, 8)
- assert output[1][1].shape == (1, 3, 16, 16)
- assert output[1][2].shape == (1, 3, 32, 32)
-
-
-def test_facialcomponentdiscriminator():
- """Test arch: FacialComponentDiscriminator."""
-
- # model init and forward (gpu)
- if torch.cuda.is_available():
- net = FacialComponentDiscriminator().cuda().eval()
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
- output = net(img)
- assert len(output) == 2
- assert output[0].shape == (1, 1, 8, 8)
- assert output[1] is None
-
- # -------------------- return intermediate features ----------------------- #
- output = net(img, return_feats=True)
- assert len(output) == 2
- assert output[0].shape == (1, 1, 8, 8)
- assert len(output[1]) == 2
- assert output[1][0].shape == (1, 128, 16, 16)
- assert output[1][1].shape == (1, 256, 8, 8)
-
-
-def test_stylegan2generatorcsft():
- """Test arch: StyleGAN2GeneratorCSFT."""
-
- # model init and forward (gpu)
- if torch.cuda.is_available():
- net = StyleGAN2GeneratorCSFT(
- out_size=32, num_style_feat=512, num_mlp=8, channel_multiplier=1, narrow=1, sft_half=False).cuda().eval()
- style = torch.rand((1, 512), dtype=torch.float32).cuda()
- condition1 = torch.rand((1, 512, 8, 8), dtype=torch.float32).cuda()
- condition2 = torch.rand((1, 512, 16, 16), dtype=torch.float32).cuda()
- condition3 = torch.rand((1, 512, 32, 32), dtype=torch.float32).cuda()
- conditions = [condition1, condition1, condition2, condition2, condition3, condition3]
- output = net([style], conditions)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
- # -------------------- with return_latents ----------------------- #
- output = net([style], conditions, return_latents=True)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 1
- # check latent
- assert output[1][0].shape == (8, 512)
-
- # -------------------- with randomize_noise = False ----------------------- #
- output = net([style], conditions, randomize_noise=False)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
- # -------------------- with truncation = 0.5 and mixing----------------------- #
- output = net([style, style], conditions, truncation=0.5, truncation_latent=style)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
-
-def test_gfpganv1clean():
- """Test arch: GFPGANv1Clean."""
-
- # model init and forward (gpu)
- if torch.cuda.is_available():
- net = GFPGANv1Clean(
- out_size=32,
- num_style_feat=512,
- channel_multiplier=1,
- decoder_load_path=None,
- fix_decoder=True,
- # for stylegan decoder
- num_mlp=8,
- input_is_latent=False,
- different_w=False,
- narrow=1,
- sft_half=True).cuda().eval()
-
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
- output = net(img)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 3
- # check out_rgbs for intermediate loss
- assert output[1][0].shape == (1, 3, 8, 8)
- assert output[1][1].shape == (1, 3, 16, 16)
- assert output[1][2].shape == (1, 3, 32, 32)
-
- # -------------------- with different_w = True ----------------------- #
- net = GFPGANv1Clean(
- out_size=32,
- num_style_feat=512,
- channel_multiplier=1,
- decoder_load_path=None,
- fix_decoder=True,
- # for stylegan decoder
- num_mlp=8,
- input_is_latent=False,
- different_w=True,
- narrow=1,
- sft_half=True).cuda().eval()
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda()
- output = net(img)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 3
- # check out_rgbs for intermediate loss
- assert output[1][0].shape == (1, 3, 8, 8)
- assert output[1][1].shape == (1, 3, 16, 16)
- assert output[1][2].shape == (1, 3, 32, 32)
diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/tests/test_stylegan2_clean_arch.py b/spaces/beihai/GFPGAN-V1.3-whole-image/tests/test_stylegan2_clean_arch.py
deleted file mode 100644
index 78bb920e73ce28cfec9ea89a4339cc5b87981b47..0000000000000000000000000000000000000000
--- a/spaces/beihai/GFPGAN-V1.3-whole-image/tests/test_stylegan2_clean_arch.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import torch
-
-from gfpgan.archs.stylegan2_clean_arch import StyleGAN2GeneratorClean
-
-
-def test_stylegan2generatorclean():
- """Test arch: StyleGAN2GeneratorClean."""
-
- # model init and forward (gpu)
- if torch.cuda.is_available():
- net = StyleGAN2GeneratorClean(
- out_size=32, num_style_feat=512, num_mlp=8, channel_multiplier=1, narrow=0.5).cuda().eval()
- style = torch.rand((1, 512), dtype=torch.float32).cuda()
- output = net([style], input_is_latent=False)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
- # -------------------- with return_latents ----------------------- #
- output = net([style], input_is_latent=True, return_latents=True)
- assert output[0].shape == (1, 3, 32, 32)
- assert len(output[1]) == 1
- # check latent
- assert output[1][0].shape == (8, 512)
-
- # -------------------- with randomize_noise = False ----------------------- #
- output = net([style], randomize_noise=False)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
- # -------------------- with truncation = 0.5 and mixing----------------------- #
- output = net([style, style], truncation=0.5, truncation_latent=style)
- assert output[0].shape == (1, 3, 32, 32)
- assert output[1] is None
-
- # ------------------ test make_noise ----------------------- #
- out = net.make_noise()
- assert len(out) == 7
- assert out[0].shape == (1, 1, 4, 4)
- assert out[1].shape == (1, 1, 8, 8)
- assert out[2].shape == (1, 1, 8, 8)
- assert out[3].shape == (1, 1, 16, 16)
- assert out[4].shape == (1, 1, 16, 16)
- assert out[5].shape == (1, 1, 32, 32)
- assert out[6].shape == (1, 1, 32, 32)
-
- # ------------------ test get_latent ----------------------- #
- out = net.get_latent(style)
- assert out.shape == (1, 512)
-
- # ------------------ test mean_latent ----------------------- #
- out = net.mean_latent(2)
- assert out.shape == (1, 512)
diff --git a/spaces/breezedeus/CnOCR-Demo/app.py b/spaces/breezedeus/CnOCR-Demo/app.py
deleted file mode 100644
index 7d002ef41262d9013088cde8b8ee90b21849037e..0000000000000000000000000000000000000000
--- a/spaces/breezedeus/CnOCR-Demo/app.py
+++ /dev/null
@@ -1,343 +0,0 @@
-# coding: utf-8
-# Copyright (C) 2023, [Breezedeus](https://github.com/breezedeus).
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# Ref: https://huggingface.co/spaces/hysts/Manga-OCR/blob/main/app.py
-
-import os
-import json
-import functools
-
-import gradio as gr
-import cv2
-import numpy as np
-from cnstd.utils import pil_to_numpy, imsave
-
-from cnocr import CnOcr, DET_AVAILABLE_MODELS, REC_AVAILABLE_MODELS
-from cnocr.utils import set_logger, draw_ocr_results, download
-
-
-logger = set_logger()
-MODELS = {}
-
-
-def plot_for_debugging(rotated_img, one_out, box_score_thresh, crop_ncols, prefix_fp):
- import matplotlib.pyplot as plt
- import math
-
- rotated_img = rotated_img.copy()
- crops = [info['cropped_img'] for info in one_out]
- print('%d boxes are found' % len(crops))
- if len(crops) < 1:
- return
- ncols = crop_ncols
- nrows = math.ceil(len(crops) / ncols)
- fig, ax = plt.subplots(nrows=nrows, ncols=ncols)
- for i, axi in enumerate(ax.flat):
- if i >= len(crops):
- break
- axi.imshow(crops[i])
- crop_fp = '%s-crops.png' % prefix_fp
- plt.savefig(crop_fp)
- print('cropped results are save to file %s' % crop_fp)
-
- for info in one_out:
- box, score = info.get('position'), info['score']
- if score < box_score_thresh: # score < 0.5
- continue
- if box is not None:
- box = box.astype(int).reshape(-1, 2)
- cv2.polylines(rotated_img, [box], True, color=(255, 0, 0), thickness=2)
- result_fp = '%s-result.png' % prefix_fp
- imsave(rotated_img, result_fp, normalized=False)
- print('boxes results are save to file %s' % result_fp)
-
-
-def get_ocr_model(det_model_name, rec_model_name, det_more_configs):
- global MODELS
- config_str = json.dumps(det_more_configs)
- if (det_model_name, rec_model_name, config_str) in MODELS:
- return MODELS[(det_model_name, rec_model_name, config_str)]
-
- det_model_name, det_model_backend = det_model_name.split('::')
- # rec_model_name, rec_model_backend = rec_model_name.split('::')
- rec_model_backend = 'onnx'
- model = CnOcr(
- det_model_name=det_model_name,
- det_model_backend=det_model_backend,
- rec_model_name=rec_model_name,
- rec_model_backend=rec_model_backend,
- det_more_configs=det_more_configs,
- )
- if len(MODELS) > 50:
- MODELS = {}
- MODELS[(det_model_name, rec_model_name, config_str)] = model
- return model
-
-
-def visualize_naive_result(img, det_model_name, std_out, box_score_thresh):
- if len(std_out) < 1:
- # gr.Warning(f'未检测到文本!')
- return []
- img = pil_to_numpy(img).transpose((1, 2, 0)).astype(np.uint8)
-
- # plot_for_debugging(img, std_out, box_score_thresh, 2, './streamlit-app')
- # gr.Markdown('## Detection Result')
- # if det_model_name == 'naive_det':
- # gr.Warning('⚠️ Warning: "naive_det" 检测模型不返回文本框位置!')
- # cols = st.columns([1, 7, 1])
- # cols[1].image('./streamlit-app-result.png')
- #
- # st.subheader('Recognition Result')
- # cols = st.columns([1, 7, 1])
- # cols[1].image('./streamlit-app-crops.png')
-
- return _visualize_ocr(std_out)
-
-
-def _visualize_ocr(ocr_outs):
- if len(ocr_outs) < 1:
- return
- ocr_res = []
- for out in ocr_outs:
- # cropped_img = out['cropped_img'] # 检测出的文本框
- ocr_res.append([out['score'], out['text']])
- return ocr_res
-
-
-def visualize_result(img, ocr_outs):
- out_draw_fp = './streamlit-app-det-result.png'
- font_path = 'docs/fonts/simfang.ttf'
- if not os.path.exists(font_path):
- url = 'https://huggingface.co/datasets/breezedeus/cnocr-wx-qr-code/resolve/main/fonts/simfang.ttf'
- os.makedirs(os.path.dirname(font_path), exist_ok=True)
- download(url, path=font_path, overwrite=True)
- draw_ocr_results(img, ocr_outs, out_draw_fp, font_path)
- return out_draw_fp
-
-
-def recognize(
- det_model_name,
- is_single_line,
- rec_model_name,
- rotated_bbox,
- use_angle_clf,
- new_size,
- box_score_thresh,
- min_box_size,
- image_file,
-):
- img = image_file.convert('RGB')
- det_more_configs = dict(rotated_bbox=rotated_bbox, use_angle_clf=use_angle_clf)
- ocr = get_ocr_model(det_model_name, rec_model_name, det_more_configs)
-
- if is_single_line:
- ocr_out = [ocr.ocr_for_single_line(np.array(img))]
- else:
- ocr_out = ocr.ocr(
- img,
- return_cropped_image=True,
- resized_shape=new_size,
- preserve_aspect_ratio=True,
- box_score_thresh=box_score_thresh,
- min_box_size=min_box_size,
- )
-
- det_model_name, det_model_backend = det_model_name.split('::')
- if is_single_line or det_model_name == 'naive_det':
- out_texts = visualize_naive_result(
- img, det_model_name, ocr_out, box_score_thresh
- )
- if is_single_line:
- return [
- gr.update(visible=False),
- gr.update(visible=False),
- gr.update(value=out_texts, visible=True),
- ]
- return [
- gr.update(visible=False),
- gr.update(visible=True),
- gr.update(value=out_texts, visible=True),
- ]
- else:
- out_img_path = visualize_result(img, ocr_out)
- return [
- gr.update(value=out_img_path, visible=True),
- gr.update(visible=False),
- gr.update(visible=False),
- ]
-
-
-def main():
- det_models = list(DET_AVAILABLE_MODELS.all_models())
- det_models.append(('naive_det', 'onnx'))
- det_models.sort()
- det_models = [f'{m}::{b}' for m, b in det_models]
-
- all_models = list(REC_AVAILABLE_MODELS.all_models())
- all_models.sort()
- all_models = [f'{m}' for m, b in all_models if b == 'onnx']
-
- title = '开源Python OCR工具:'
- desc = (
- '
'
- )
- gr.Markdown(desc)
- with gr.Row(equal_height=False):
- with gr.Column(min_width=200, variant='panel', scale=1):
- gr.Markdown('### 模型设置')
- det_model_name = gr.Dropdown(
- label='选择检测模型', choices=det_models, value='ch_PP-OCRv3_det::onnx',
- )
- is_single_line = gr.Checkbox(label='单行文字模式(不使用检测模型)', value=False)
-
- rec_model_name = gr.Dropdown(
- label='选择识别模型',
- choices=all_models,
- value='densenet_lite_136-fc',
- )
-
- gr.Markdown('### 检测参数')
- rotated_bbox = gr.Checkbox(label='检测带角度文本框', value=True)
- use_angle_clf = gr.Checkbox(label='使用角度预测模型校正文本框', value=False)
- new_size = gr.Slider(
- label='resize 后图片(长边)大小', minimum=124, maximum=4096, value=768
- )
- box_score_thresh = gr.Slider(
- label='得分阈值(低于阈值的结果会被过滤掉)', minimum=0.05, maximum=0.95, value=0.3
- )
- min_box_size = gr.Slider(
- label='框大小阈值(更小的文本框会被过滤掉)', minimum=4, maximum=50, value=10
- )
-
- with gr.Column(scale=3, variant='compact'):
- gr.Markdown('### 选择待识别图片')
- image_file = gr.Image(label='待识别图片', type="pil", image_mode='RGB')
- sub_btn = gr.Button("Submit", variant="primary")
- out_image = gr.Image(label='识别结果', interactive=False, visible=False)
- naive_warn = gr.Markdown(
- '**⚠️ Warning**: "naive_det" 检测模型不返回文本框位置!', visible=False
- )
- out_texts = gr.Dataframe(
- headers=['得分', '文本'], label='识别结果', interactive=False, visible=False
- )
- sub_btn.click(
- recognize,
- inputs=[
- det_model_name,
- is_single_line,
- rec_model_name,
- rotated_bbox,
- use_angle_clf,
- new_size,
- box_score_thresh,
- min_box_size,
- image_file,
- ],
- outputs=[out_image, naive_warn, out_texts],
- )
-
- gr.Examples(
- label='示例',
- examples=examples,
- inputs=[
- det_model_name,
- is_single_line,
- rec_model_name,
- use_angle_clf,
- image_file,
- ],
- outputs=[out_image, naive_warn, out_texts],
- fn=example_func,
- cache_examples=os.getenv('CACHE_EXAMPLES') == '1',
- )
-
- demo.queue(concurrency_count=4)
- demo.launch()
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/layers/csrc/cocoeval/cocoeval.h b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/layers/csrc/cocoeval/cocoeval.h
deleted file mode 100644
index db246e49a026b7cd989b305f4d3d98100be3c912..0000000000000000000000000000000000000000
--- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/layers/csrc/cocoeval/cocoeval.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (c) Facebook, Inc. and its affiliates.
-#pragma once
-
-#include
-#include
-#include
-#include
-#include
-
-namespace py = pybind11;
-
-namespace detectron2 {
-
-namespace COCOeval {
-
-// Annotation data for a single object instance in an image
-struct InstanceAnnotation {
- InstanceAnnotation(
- uint64_t id,
- double score,
- double area,
- bool is_crowd,
- bool ignore)
- : id{id}, score{score}, area{area}, is_crowd{is_crowd}, ignore{ignore} {}
- uint64_t id;
- double score = 0.;
- double area = 0.;
- bool is_crowd = false;
- bool ignore = false;
-};
-
-// Stores intermediate results for evaluating detection results for a single
-// image that has D detected instances and G ground truth instances. This stores
-// matches between detected and ground truth instances
-struct ImageEvaluation {
- // For each of the D detected instances, the id of the matched ground truth
- // instance, or 0 if unmatched
- std::vector detection_matches;
-
- // The detection score of each of the D detected instances
- std::vector detection_scores;
-
- // Marks whether or not each of G instances was ignored from evaluation (e.g.,
- // because it's outside area_range)
- std::vector ground_truth_ignores;
-
- // Marks whether or not each of D instances was ignored from evaluation (e.g.,
- // because it's outside aRng)
- std::vector detection_ignores;
-};
-
-template
-using ImageCategoryInstances = std::vector>>;
-
-// C++ implementation of COCO API cocoeval.py::COCOeval.evaluateImg(). For each
-// combination of image, category, area range settings, and IOU thresholds to
-// evaluate, it matches detected instances to ground truth instances and stores
-// the results into a vector of ImageEvaluation results, which will be
-// interpreted by the COCOeval::Accumulate() function to produce precion-recall
-// curves. The parameters of nested vectors have the following semantics:
-// image_category_ious[i][c][d][g] is the intersection over union of the d'th
-// detected instance and g'th ground truth instance of
-// category category_ids[c] in image image_ids[i]
-// image_category_ground_truth_instances[i][c] is a vector of ground truth
-// instances in image image_ids[i] of category category_ids[c]
-// image_category_detection_instances[i][c] is a vector of detected
-// instances in image image_ids[i] of category category_ids[c]
-std::vector EvaluateImages(
- const std::vector>& area_ranges, // vector of 2-tuples
- int max_detections,
- const std::vector& iou_thresholds,
- const ImageCategoryInstances>& image_category_ious,
- const ImageCategoryInstances&
- image_category_ground_truth_instances,
- const ImageCategoryInstances&
- image_category_detection_instances);
-
-// C++ implementation of COCOeval.accumulate(), which generates precision
-// recall curves for each set of category, IOU threshold, detection area range,
-// and max number of detections parameters. It is assumed that the parameter
-// evaluations is the return value of the functon COCOeval::EvaluateImages(),
-// which was called with the same parameter settings params
-py::dict Accumulate(
- const py::object& params,
- const std::vector& evalutations);
-
-} // namespace COCOeval
-} // namespace detectron2
diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/samplers/densepose_base.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/samplers/densepose_base.py
deleted file mode 100644
index 4d499d8f20d811fb8197d7bdae358540bb5b0dfc..0000000000000000000000000000000000000000
--- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/data/samplers/densepose_base.py
+++ /dev/null
@@ -1,203 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-from typing import Any, Dict, List, Tuple
-import torch
-from torch.nn import functional as F
-
-from detectron2.structures import BoxMode, Instances
-
-from densepose.converters import ToChartResultConverter
-from densepose.converters.base import IntTupleBox, make_int_box
-from densepose.structures import DensePoseDataRelative, DensePoseList
-
-
-class DensePoseBaseSampler:
- """
- Base DensePose sampler to produce DensePose data from DensePose predictions.
- Samples for each class are drawn according to some distribution over all pixels estimated
- to belong to that class.
- """
-
- def __init__(self, count_per_class: int = 8):
- """
- Constructor
-
- Args:
- count_per_class (int): the sampler produces at most `count_per_class`
- samples for each category
- """
- self.count_per_class = count_per_class
-
- def __call__(self, instances: Instances) -> DensePoseList:
- """
- Convert DensePose predictions (an instance of `DensePoseChartPredictorOutput`)
- into DensePose annotations data (an instance of `DensePoseList`)
- """
- boxes_xyxy_abs = instances.pred_boxes.tensor.clone().cpu()
- boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
- dp_datas = []
- for i in range(len(boxes_xywh_abs)):
- annotation_i = self._sample(instances[i], make_int_box(boxes_xywh_abs[i]))
- annotation_i[DensePoseDataRelative.S_KEY] = self._resample_mask( # pyre-ignore[6]
- instances[i].pred_densepose
- )
- dp_datas.append(DensePoseDataRelative(annotation_i))
- # create densepose annotations on CPU
- dp_list = DensePoseList(dp_datas, boxes_xyxy_abs, instances.image_size)
- return dp_list
-
- def _sample(self, instance: Instances, bbox_xywh: IntTupleBox) -> Dict[str, List[Any]]:
- """
- Sample DensPoseDataRelative from estimation results
- """
- labels, dp_result = self._produce_labels_and_results(instance)
- annotation = {
- DensePoseDataRelative.X_KEY: [],
- DensePoseDataRelative.Y_KEY: [],
- DensePoseDataRelative.U_KEY: [],
- DensePoseDataRelative.V_KEY: [],
- DensePoseDataRelative.I_KEY: [],
- }
- n, h, w = dp_result.shape
- for part_id in range(1, DensePoseDataRelative.N_PART_LABELS + 1):
- # indices - tuple of 3 1D tensors of size k
- # 0: index along the first dimension N
- # 1: index along H dimension
- # 2: index along W dimension
- indices = torch.nonzero(labels.expand(n, h, w) == part_id, as_tuple=True)
- # values - an array of size [n, k]
- # n: number of channels (U, V, confidences)
- # k: number of points labeled with part_id
- values = dp_result[indices].view(n, -1)
- k = values.shape[1]
- count = min(self.count_per_class, k)
- if count <= 0:
- continue
- index_sample = self._produce_index_sample(values, count)
- sampled_values = values[:, index_sample]
- sampled_y = indices[1][index_sample] + 0.5
- sampled_x = indices[2][index_sample] + 0.5
- # prepare / normalize data
- x = (sampled_x / w * 256.0).cpu().tolist()
- y = (sampled_y / h * 256.0).cpu().tolist()
- u = sampled_values[0].clamp(0, 1).cpu().tolist()
- v = sampled_values[1].clamp(0, 1).cpu().tolist()
- fine_segm_labels = [part_id] * count
- # extend annotations
- annotation[DensePoseDataRelative.X_KEY].extend(x)
- annotation[DensePoseDataRelative.Y_KEY].extend(y)
- annotation[DensePoseDataRelative.U_KEY].extend(u)
- annotation[DensePoseDataRelative.V_KEY].extend(v)
- annotation[DensePoseDataRelative.I_KEY].extend(fine_segm_labels)
- return annotation
-
- def _produce_index_sample(self, values: torch.Tensor, count: int):
- """
- Abstract method to produce a sample of indices to select data
- To be implemented in descendants
-
- Args:
- values (torch.Tensor): an array of size [n, k] that contains
- estimated values (U, V, confidences);
- n: number of channels (U, V, confidences)
- k: number of points labeled with part_id
- count (int): number of samples to produce, should be positive and <= k
-
- Return:
- list(int): indices of values (along axis 1) selected as a sample
- """
- raise NotImplementedError
-
- def _produce_labels_and_results(self, instance: Instances) -> Tuple[torch.Tensor, torch.Tensor]:
- """
- Method to get labels and DensePose results from an instance
-
- Args:
- instance (Instances): an instance of `DensePoseChartPredictorOutput`
-
- Return:
- labels (torch.Tensor): shape [H, W], DensePose segmentation labels
- dp_result (torch.Tensor): shape [2, H, W], stacked DensePose results u and v
- """
- converter = ToChartResultConverter
- chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)
- labels, dp_result = chart_result.labels.cpu(), chart_result.uv.cpu()
- return labels, dp_result
-
- def _resample_mask(self, output: Any) -> torch.Tensor:
- """
- Convert DensePose predictor output to segmentation annotation - tensors of size
- (256, 256) and type `int64`.
-
- Args:
- output: DensePose predictor output with the following attributes:
- - coarse_segm: tensor of size [N, D, H, W] with unnormalized coarse
- segmentation scores
- - fine_segm: tensor of size [N, C, H, W] with unnormalized fine
- segmentation scores
- Return:
- Tensor of size (S, S) and type `int64` with coarse segmentation annotations,
- where S = DensePoseDataRelative.MASK_SIZE
- """
- sz = DensePoseDataRelative.MASK_SIZE
- S = (
- F.interpolate(output.coarse_segm, (sz, sz), mode="bilinear", align_corners=False)
- .argmax(dim=1)
- .long()
- )
- I = (
- (
- F.interpolate(
- output.fine_segm,
- (sz, sz),
- mode="bilinear",
- align_corners=False,
- ).argmax(dim=1)
- * (S > 0).long()
- )
- .squeeze()
- .cpu()
- )
- # Map fine segmentation results to coarse segmentation ground truth
- # TODO: extract this into separate classes
- # coarse segmentation: 1 = Torso, 2 = Right Hand, 3 = Left Hand,
- # 4 = Left Foot, 5 = Right Foot, 6 = Upper Leg Right, 7 = Upper Leg Left,
- # 8 = Lower Leg Right, 9 = Lower Leg Left, 10 = Upper Arm Left,
- # 11 = Upper Arm Right, 12 = Lower Arm Left, 13 = Lower Arm Right,
- # 14 = Head
- # fine segmentation: 1, 2 = Torso, 3 = Right Hand, 4 = Left Hand,
- # 5 = Left Foot, 6 = Right Foot, 7, 9 = Upper Leg Right,
- # 8, 10 = Upper Leg Left, 11, 13 = Lower Leg Right,
- # 12, 14 = Lower Leg Left, 15, 17 = Upper Arm Left,
- # 16, 18 = Upper Arm Right, 19, 21 = Lower Arm Left,
- # 20, 22 = Lower Arm Right, 23, 24 = Head
- FINE_TO_COARSE_SEGMENTATION = {
- 1: 1,
- 2: 1,
- 3: 2,
- 4: 3,
- 5: 4,
- 6: 5,
- 7: 6,
- 8: 7,
- 9: 6,
- 10: 7,
- 11: 8,
- 12: 9,
- 13: 8,
- 14: 9,
- 15: 10,
- 16: 11,
- 17: 10,
- 18: 11,
- 19: 12,
- 20: 13,
- 21: 12,
- 22: 13,
- 23: 14,
- 24: 14,
- }
- mask = torch.zeros((sz, sz), dtype=torch.int64, device=torch.device("cpu"))
- for i in range(DensePoseDataRelative.N_PART_LABELS):
- mask[I == i + 1] = FINE_TO_COARSE_SEGMENTATION[i + 1]
- return mask
diff --git a/spaces/bzd4576/sovits-sin/data_utils.py b/spaces/bzd4576/sovits-sin/data_utils.py
deleted file mode 100644
index 8067f2fe66b9986db6daa2dcce1bcd31247dfc53..0000000000000000000000000000000000000000
--- a/spaces/bzd4576/sovits-sin/data_utils.py
+++ /dev/null
@@ -1,418 +0,0 @@
-import time
-import os
-import random
-import numpy as np
-import torch
-import torch.utils.data
-import numpy as np
-import commons
-from mel_processing import spectrogram_torch
-from utils import load_wav_to_torch, load_filepaths_and_text
-from text import text_to_sequence, cleaned_text_to_sequence
-
-
-def dropout1d(myarray, ratio=0.5):
- indices = np.random.choice(np.arange(myarray.size), replace=False,
- size=int(myarray.size * ratio))
- myarray[indices] = 0
- return myarray
-
-
-class TextAudioLoader(torch.utils.data.Dataset):
- """
- 1) loads audio, text pairs
- 2) normalizes text and converts them to sequences of integers
- 3) computes spectrograms from audio files.
- """
-
- def __init__(self, audiopaths_and_text, hparams):
- self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
- self.text_cleaners = hparams.text_cleaners
- self.max_wav_value = hparams.max_wav_value
- self.sampling_rate = hparams.sampling_rate
- self.filter_length = hparams.filter_length
- self.hop_length = hparams.hop_length
- self.win_length = hparams.win_length
- self.sampling_rate = hparams.sampling_rate
-
- self.cleaned_text = getattr(hparams, "cleaned_text", False)
-
- self.add_blank = hparams.add_blank
- self.min_text_len = getattr(hparams, "min_text_len", 1)
- self.max_text_len = getattr(hparams, "max_text_len", 190)
-
- random.seed(1234)
- random.shuffle(self.audiopaths_and_text)
- self._filter()
-
- def _filter(self):
- """
- Filter text & store spec lengths
- """
- # Store spectrogram lengths for Bucketing
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
- # spec_length = wav_length // hop_length
-
- audiopaths_and_text_new = []
- lengths = []
- for audiopath, text in self.audiopaths_and_text:
- if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
- audiopaths_and_text_new.append([audiopath, text])
- lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
- self.audiopaths_and_text = audiopaths_and_text_new
- self.lengths = lengths
-
- def get_audio_text_pair(self, audiopath_and_text):
- # separate filename and text
- audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
- text = self.get_text(text)
- spec, wav = self.get_audio(audiopath)
- return (text, spec, wav)
-
- def get_audio(self, filename):
- audio, sampling_rate = load_wav_to_torch(filename)
- if sampling_rate != self.sampling_rate:
- raise ValueError("{} {} SR doesn't match target {} SR".format(
- sampling_rate, self.sampling_rate))
- audio_norm = audio / self.max_wav_value
- audio_norm = audio_norm.unsqueeze(0)
- spec_filename = filename.replace(".wav", ".spec.pt")
- if os.path.exists(spec_filename):
- spec = torch.load(spec_filename)
- else:
- spec = spectrogram_torch(audio_norm, self.filter_length,
- self.sampling_rate, self.hop_length, self.win_length,
- center=False)
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename)
- return spec, audio_norm
-
- def get_text(self, text):
- # if self.cleaned_text:
- # text_norm = text
- # else:
- # text_norm = text_to_sequence(text, self.text_cleaners)
- # if self.add_blank:
- # text_norm = commons.intersperse(text_norm, 0)
- # text_norm = torch.LongTensor(text_norm)
-
- soft = np.load(text)
-
- # # 添加F0信息
- # head, rear = text.split(".")
- # f0 = np.load(head+".f0."+rear)
- # soft[:,0] = f0/10
-
- text_norm = torch.FloatTensor(soft)
- return text_norm
-
- def __getitem__(self, index):
- return self.get_audio_text_pair(self.audiopaths_and_text[index])
-
- def __len__(self):
- return len(self.audiopaths_and_text)
-
-
-class TextAudioCollate():
- """ Zero-pads model inputs and targets
- """
-
- def __init__(self, return_ids=False):
- self.return_ids = return_ids
-
- def __call__(self, batch):
- """Collate's training batch from normalized text and aduio
- PARAMS
- ------
- batch: [text_normalized, spec_normalized, wav_normalized]
- """
- # Right zero-pad all one-hot text sequences to max input length
- _, ids_sorted_decreasing = torch.sort(
- torch.LongTensor([x[1].size(1) for x in batch]),
- dim=0, descending=True)
-
- max_text_len = max([len(x[0]) for x in batch])
- max_spec_len = max([x[1].size(1) for x in batch])
- max_wav_len = max([x[2].size(1) for x in batch])
-
- text_lengths = torch.LongTensor(len(batch))
- spec_lengths = torch.LongTensor(len(batch))
- wav_lengths = torch.LongTensor(len(batch))
-
- text_padded = torch.FloatTensor(len(batch), max_text_len, 256)
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
- text_padded.zero_()
- spec_padded.zero_()
- wav_padded.zero_()
- for i in range(len(ids_sorted_decreasing)):
- row = batch[ids_sorted_decreasing[i]]
-
- text = row[0]
- text_padded[i, :text.size(0), :] = text
- text_lengths[i] = text.size(0)
-
- spec = row[1]
- spec_padded[i, :, :spec.size(1)] = spec
- spec_lengths[i] = spec.size(1)
-
- wav = row[2]
- wav_padded[i, :, :wav.size(1)] = wav
- wav_lengths[i] = wav.size(1)
-
- if self.return_ids:
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths
-
-
-"""Multi speaker version"""
-
-
-class TextAudioSpeakerLoader(torch.utils.data.Dataset):
- """
- 1) loads audio, speaker_id, text pairs
- 2) normalizes text and converts them to sequences of integers
- 3) computes spectrograms from audio files.
- """
-
- def __init__(self, audiopaths_sid_text, hparams):
- self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
- self.text_cleaners = hparams.text_cleaners
- self.max_wav_value = hparams.max_wav_value
- self.sampling_rate = hparams.sampling_rate
- self.filter_length = hparams.filter_length
- self.hop_length = hparams.hop_length
- self.win_length = hparams.win_length
- self.sampling_rate = hparams.sampling_rate
-
- self.cleaned_text = getattr(hparams, "cleaned_text", False)
-
- self.add_blank = hparams.add_blank
- self.min_text_len = getattr(hparams, "min_text_len", 1)
- self.max_text_len = getattr(hparams, "max_text_len", 190)
-
- random.seed(1234)
- random.shuffle(self.audiopaths_sid_text)
- self._filter()
-
- def _filter(self):
- """
- Filter text & store spec lengths
- """
- # Store spectrogram lengths for Bucketing
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
- # spec_length = wav_length // hop_length
-
- audiopaths_sid_text_new = []
- lengths = []
- for audiopath, sid, text in self.audiopaths_sid_text:
- if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
- audiopaths_sid_text_new.append([audiopath, sid, text])
- lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
- self.audiopaths_sid_text = audiopaths_sid_text_new
- self.lengths = lengths
-
- def get_audio_text_speaker_pair(self, audiopath_sid_text):
- # separate filename, speaker_id and text
- audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2]
- text = self.get_text(text)
- spec, wav = self.get_audio(audiopath)
- sid = self.get_sid(sid)
- return (text, spec, wav, sid)
-
- def get_audio(self, filename):
- audio, sampling_rate = load_wav_to_torch(filename)
- if sampling_rate != self.sampling_rate:
- raise ValueError("{} {} SR doesn't match target {} SR".format(
- sampling_rate, self.sampling_rate))
- audio_norm = audio / self.max_wav_value
- audio_norm = audio_norm.unsqueeze(0)
- spec_filename = filename.replace(".wav", ".spec.pt")
- if os.path.exists(spec_filename):
- spec = torch.load(spec_filename)
- else:
- spec = spectrogram_torch(audio_norm, self.filter_length,
- self.sampling_rate, self.hop_length, self.win_length,
- center=False)
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename)
- return spec, audio_norm
-
- def get_text(self, text):
- soft = np.load(text)
- head, rear = text.split(".")
- f0 = np.load(head + ".f0." + rear)
- p = random.random()
- # print(p)
- if p < 0.3:
- f0 = dropout1d(f0, 0.6)
- # print(f0)
- soft[:, 0] = f0 / 10
- # soft = soft + np.expand_dims(np.log(f0),1)*0.2
- text_norm = torch.FloatTensor(soft)
- return text_norm
-
- def get_sid(self, sid):
- sid = torch.LongTensor([int(sid)])
- return sid
-
- def __getitem__(self, index):
- return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
-
- def __len__(self):
- return len(self.audiopaths_sid_text)
-
-
-class TextAudioSpeakerCollate():
- """ Zero-pads model inputs and targets
- """
-
- def __init__(self, return_ids=False):
- self.return_ids = return_ids
-
- def __call__(self, batch):
- """Collate's training batch from normalized text, audio and speaker identities
- PARAMS
- ------
- batch: [text_normalized, spec_normalized, wav_normalized, sid]
- """
- # Right zero-pad all one-hot text sequences to max input length
- _, ids_sorted_decreasing = torch.sort(
- torch.LongTensor([x[1].size(1) for x in batch]),
- dim=0, descending=True)
-
- max_text_len = max([len(x[0]) for x in batch])
- max_spec_len = max([x[1].size(1) for x in batch])
- max_wav_len = max([x[2].size(1) for x in batch])
-
- text_lengths = torch.LongTensor(len(batch))
- spec_lengths = torch.LongTensor(len(batch))
- wav_lengths = torch.LongTensor(len(batch))
- sid = torch.LongTensor(len(batch))
-
- text_padded = torch.FloatTensor(len(batch), max_text_len, 256)
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
- text_padded.zero_()
- spec_padded.zero_()
- wav_padded.zero_()
- for i in range(len(ids_sorted_decreasing)):
- row = batch[ids_sorted_decreasing[i]]
-
- text = row[0]
- text_padded[i, :text.size(0)] = text
- text_lengths[i] = text.size(0)
-
- spec = row[1]
- spec_padded[i, :, :spec.size(1)] = spec
- spec_lengths[i] = spec.size(1)
-
- wav = row[2]
- wav_padded[i, :, :wav.size(1)] = wav
- wav_lengths[i] = wav.size(1)
-
- sid[i] = row[3]
-
- if self.return_ids:
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid
-
-
-class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
- """
- Maintain similar input lengths in a batch.
- Length groups are specified by boundaries.
- Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
-
- It removes samples which are not included in the boundaries.
- Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
- """
-
- def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
- super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
- self.lengths = dataset.lengths
- self.batch_size = batch_size
- self.boundaries = boundaries
-
- self.buckets, self.num_samples_per_bucket = self._create_buckets()
- self.total_size = sum(self.num_samples_per_bucket)
- self.num_samples = self.total_size // self.num_replicas
-
- def _create_buckets(self):
- buckets = [[] for _ in range(len(self.boundaries) - 1)]
- for i in range(len(self.lengths)):
- length = self.lengths[i]
- idx_bucket = self._bisect(length)
- if idx_bucket != -1:
- buckets[idx_bucket].append(i)
-
- for i in range(len(buckets) - 1, 0, -1):
- if len(buckets[i]) == 0:
- buckets.pop(i)
- self.boundaries.pop(i + 1)
-
- num_samples_per_bucket = []
- for i in range(len(buckets)):
- len_bucket = len(buckets[i])
- total_batch_size = self.num_replicas * self.batch_size
- rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
- num_samples_per_bucket.append(len_bucket + rem)
- return buckets, num_samples_per_bucket
-
- def __iter__(self):
- # deterministically shuffle based on epoch
- g = torch.Generator()
- g.manual_seed(self.epoch)
-
- indices = []
- if self.shuffle:
- for bucket in self.buckets:
- indices.append(torch.randperm(len(bucket), generator=g).tolist())
- else:
- for bucket in self.buckets:
- indices.append(list(range(len(bucket))))
-
- batches = []
- for i in range(len(self.buckets)):
- bucket = self.buckets[i]
- len_bucket = len(bucket)
- ids_bucket = indices[i]
- num_samples_bucket = self.num_samples_per_bucket[i]
-
- # add extra samples to make it evenly divisible
- rem = num_samples_bucket - len_bucket
- ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
-
- # subsample
- ids_bucket = ids_bucket[self.rank::self.num_replicas]
-
- # batching
- for j in range(len(ids_bucket) // self.batch_size):
- batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]
- batches.append(batch)
-
- if self.shuffle:
- batch_ids = torch.randperm(len(batches), generator=g).tolist()
- batches = [batches[i] for i in batch_ids]
- self.batches = batches
-
- assert len(self.batches) * self.batch_size == self.num_samples
- return iter(self.batches)
-
- def _bisect(self, x, lo=0, hi=None):
- if hi is None:
- hi = len(self.boundaries) - 1
-
- if hi > lo:
- mid = (hi + lo) // 2
- if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
- return mid
- elif x <= self.boundaries[mid]:
- return self._bisect(x, lo, mid)
- else:
- return self._bisect(x, mid + 1, hi)
- else:
- return -1
-
- def __len__(self):
- return self.num_samples // self.batch_size
diff --git a/spaces/bzd4576/sovits-sin/utils.py b/spaces/bzd4576/sovits-sin/utils.py
deleted file mode 100644
index c60894b52072a9293eb797b21e79f74e7d60dbb6..0000000000000000000000000000000000000000
--- a/spaces/bzd4576/sovits-sin/utils.py
+++ /dev/null
@@ -1,261 +0,0 @@
-import os
-import glob
-import sys
-import argparse
-import logging
-import json
-import subprocess
-import numpy as np
-from scipy.io.wavfile import read
-import torch
-
-MATPLOTLIB_FLAG = False
-
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
-logger = logging
-
-
-def load_checkpoint(checkpoint_path, model, optimizer=None):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
- iteration = checkpoint_dict['iteration']
- learning_rate = checkpoint_dict['learning_rate']
- if optimizer is not None:
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
- # print(1111)
- saved_state_dict = checkpoint_dict['model']
- # print(1111)
-
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict= {}
- for k, v in state_dict.items():
- try:
- new_state_dict[k] = saved_state_dict[k]
- except:
- logger.info("%s is not in the checkpoint" % k)
- new_state_dict[k] = v
- if hasattr(model, 'module'):
- model.module.load_state_dict(new_state_dict)
- else:
- model.load_state_dict(new_state_dict)
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
- checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
- logger.info("Saving model and optimizer state at iteration {} to {}".format(
- iteration, checkpoint_path))
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- torch.save({'model': state_dict,
- 'iteration': iteration,
- 'optimizer': optimizer.state_dict(),
- 'learning_rate': learning_rate}, checkpoint_path)
-
-
-def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
- for k, v in scalars.items():
- writer.add_scalar(k, v, global_step)
- for k, v in histograms.items():
- writer.add_histogram(k, v, global_step)
- for k, v in images.items():
- writer.add_image(k, v, global_step, dataformats='HWC')
- for k, v in audios.items():
- writer.add_audio(k, v, global_step, audio_sampling_rate)
-
-
-def latest_checkpoint_path(dir_path, regex="G_*.pth"):
- f_list = glob.glob(os.path.join(dir_path, regex))
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
- x = f_list[-1]
- print(x)
- return x
-
-
-def plot_spectrogram_to_numpy(spectrogram):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10,2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
- plt.xlabel("Frames")
- plt.ylabel("Channels")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def plot_alignment_to_numpy(alignment, info=None):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(6, 4))
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
- interpolation='none')
- fig.colorbar(im, ax=ax)
- xlabel = 'Decoder timestep'
- if info is not None:
- xlabel += '\n\n' + info
- plt.xlabel(xlabel)
- plt.ylabel('Encoder timestep')
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def load_wav_to_torch(full_path):
- sampling_rate, data = read(full_path)
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
-
-
-def load_filepaths_and_text(filename, split="|"):
- with open(filename, encoding='utf-8') as f:
- filepaths_and_text = [line.strip().split(split) for line in f]
- return filepaths_and_text
-
-
-def get_hparams(init=True):
- parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
- help='JSON file for configuration')
- parser.add_argument('-m', '--model', type=str, required=True,
- help='Model name')
-
- args = parser.parse_args()
- model_dir = os.path.join("./logs", args.model)
-
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
-
- config_path = args.config
- config_save_path = os.path.join(model_dir, "config.json")
- if init:
- with open(config_path, "r") as f:
- data = f.read()
- with open(config_save_path, "w") as f:
- f.write(data)
- else:
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_dir(model_dir):
- config_save_path = os.path.join(model_dir, "config.json")
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_file(config_path):
- with open(config_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- return hparams
-
-
-def check_git_hash(model_dir):
- source_dir = os.path.dirname(os.path.realpath(__file__))
- if not os.path.exists(os.path.join(source_dir, ".git")):
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
- source_dir
- ))
- return
-
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
-
- path = os.path.join(model_dir, "githash")
- if os.path.exists(path):
- saved_hash = open(path).read()
- if saved_hash != cur_hash:
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
- saved_hash[:8], cur_hash[:8]))
- else:
- open(path, "w").write(cur_hash)
-
-
-def get_logger(model_dir, filename="train.log"):
- global logger
- logger = logging.getLogger(os.path.basename(model_dir))
- logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- h = logging.FileHandler(os.path.join(model_dir, filename))
- h.setLevel(logging.DEBUG)
- h.setFormatter(formatter)
- logger.addHandler(h)
- return logger
-
-
-class HParams():
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
diff --git a/spaces/candlend/vits-hoshimi/sovits/vdecoder/parallel_wavegan/models/melgan.py b/spaces/candlend/vits-hoshimi/sovits/vdecoder/parallel_wavegan/models/melgan.py
deleted file mode 100644
index 78bb36d153926e5f3c93b0a8aca470e6def77434..0000000000000000000000000000000000000000
--- a/spaces/candlend/vits-hoshimi/sovits/vdecoder/parallel_wavegan/models/melgan.py
+++ /dev/null
@@ -1,427 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2020 Tomoki Hayashi
-# MIT License (https://opensource.org/licenses/MIT)
-
-"""MelGAN Modules."""
-
-import logging
-
-import numpy as np
-import torch
-
-from sovits.vdecoder.parallel_wavegan.layers import CausalConv1d
-from sovits.vdecoder.parallel_wavegan.layers import CausalConvTranspose1d
-from sovits.vdecoder.parallel_wavegan.layers import ResidualStack
-
-
-class MelGANGenerator(torch.nn.Module):
- """MelGAN generator module."""
-
- def __init__(self,
- in_channels=80,
- out_channels=1,
- kernel_size=7,
- channels=512,
- bias=True,
- upsample_scales=[8, 8, 2, 2],
- stack_kernel_size=3,
- stacks=3,
- nonlinear_activation="LeakyReLU",
- nonlinear_activation_params={"negative_slope": 0.2},
- pad="ReflectionPad1d",
- pad_params={},
- use_final_nonlinear_activation=True,
- use_weight_norm=True,
- use_causal_conv=False,
- ):
- """Initialize MelGANGenerator module.
-
- Args:
- in_channels (int): Number of input channels.
- out_channels (int): Number of output channels.
- kernel_size (int): Kernel size of initial and final conv layer.
- channels (int): Initial number of channels for conv layer.
- bias (bool): Whether to add bias parameter in convolution layers.
- upsample_scales (list): List of upsampling scales.
- stack_kernel_size (int): Kernel size of dilated conv layers in residual stack.
- stacks (int): Number of stacks in a single residual stack.
- nonlinear_activation (str): Activation function module name.
- nonlinear_activation_params (dict): Hyperparameters for activation function.
- pad (str): Padding function module name before dilated convolution layer.
- pad_params (dict): Hyperparameters for padding function.
- use_final_nonlinear_activation (torch.nn.Module): Activation function for the final layer.
- use_weight_norm (bool): Whether to use weight norm.
- If set to true, it will be applied to all of the conv layers.
- use_causal_conv (bool): Whether to use causal convolution.
-
- """
- super(MelGANGenerator, self).__init__()
-
- # check hyper parameters is valid
- assert channels >= np.prod(upsample_scales)
- assert channels % (2 ** len(upsample_scales)) == 0
- if not use_causal_conv:
- assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
-
- # add initial layer
- layers = []
- if not use_causal_conv:
- layers += [
- getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params),
- torch.nn.Conv1d(in_channels, channels, kernel_size, bias=bias),
- ]
- else:
- layers += [
- CausalConv1d(in_channels, channels, kernel_size,
- bias=bias, pad=pad, pad_params=pad_params),
- ]
-
- for i, upsample_scale in enumerate(upsample_scales):
- # add upsampling layer
- layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)]
- if not use_causal_conv:
- layers += [
- torch.nn.ConvTranspose1d(
- channels // (2 ** i),
- channels // (2 ** (i + 1)),
- upsample_scale * 2,
- stride=upsample_scale,
- padding=upsample_scale // 2 + upsample_scale % 2,
- output_padding=upsample_scale % 2,
- bias=bias,
- )
- ]
- else:
- layers += [
- CausalConvTranspose1d(
- channels // (2 ** i),
- channels // (2 ** (i + 1)),
- upsample_scale * 2,
- stride=upsample_scale,
- bias=bias,
- )
- ]
-
- # add residual stack
- for j in range(stacks):
- layers += [
- ResidualStack(
- kernel_size=stack_kernel_size,
- channels=channels // (2 ** (i + 1)),
- dilation=stack_kernel_size ** j,
- bias=bias,
- nonlinear_activation=nonlinear_activation,
- nonlinear_activation_params=nonlinear_activation_params,
- pad=pad,
- pad_params=pad_params,
- use_causal_conv=use_causal_conv,
- )
- ]
-
- # add final layer
- layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)]
- if not use_causal_conv:
- layers += [
- getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params),
- torch.nn.Conv1d(channels // (2 ** (i + 1)), out_channels, kernel_size, bias=bias),
- ]
- else:
- layers += [
- CausalConv1d(channels // (2 ** (i + 1)), out_channels, kernel_size,
- bias=bias, pad=pad, pad_params=pad_params),
- ]
- if use_final_nonlinear_activation:
- layers += [torch.nn.Tanh()]
-
- # define the model as a single function
- self.melgan = torch.nn.Sequential(*layers)
-
- # apply weight norm
- if use_weight_norm:
- self.apply_weight_norm()
-
- # reset parameters
- self.reset_parameters()
-
- def forward(self, c):
- """Calculate forward propagation.
-
- Args:
- c (Tensor): Input tensor (B, channels, T).
-
- Returns:
- Tensor: Output tensor (B, 1, T ** prod(upsample_scales)).
-
- """
- return self.melgan(c)
-
- def remove_weight_norm(self):
- """Remove weight normalization module from all of the layers."""
- def _remove_weight_norm(m):
- try:
- logging.debug(f"Weight norm is removed from {m}.")
- torch.nn.utils.remove_weight_norm(m)
- except ValueError: # this module didn't have weight norm
- return
-
- self.apply(_remove_weight_norm)
-
- def apply_weight_norm(self):
- """Apply weight normalization module from all of the layers."""
- def _apply_weight_norm(m):
- if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
- torch.nn.utils.weight_norm(m)
- logging.debug(f"Weight norm is applied to {m}.")
-
- self.apply(_apply_weight_norm)
-
- def reset_parameters(self):
- """Reset parameters.
-
- This initialization follows official implementation manner.
- https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py
-
- """
- def _reset_parameters(m):
- if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
- m.weight.data.normal_(0.0, 0.02)
- logging.debug(f"Reset parameters in {m}.")
-
- self.apply(_reset_parameters)
-
-
-class MelGANDiscriminator(torch.nn.Module):
- """MelGAN discriminator module."""
-
- def __init__(self,
- in_channels=1,
- out_channels=1,
- kernel_sizes=[5, 3],
- channels=16,
- max_downsample_channels=1024,
- bias=True,
- downsample_scales=[4, 4, 4, 4],
- nonlinear_activation="LeakyReLU",
- nonlinear_activation_params={"negative_slope": 0.2},
- pad="ReflectionPad1d",
- pad_params={},
- ):
- """Initilize MelGAN discriminator module.
-
- Args:
- in_channels (int): Number of input channels.
- out_channels (int): Number of output channels.
- kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,
- and the first and the second kernel sizes will be used for the last two layers.
- For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15,
- the last two layers' kernel size will be 5 and 3, respectively.
- channels (int): Initial number of channels for conv layer.
- max_downsample_channels (int): Maximum number of channels for downsampling layers.
- bias (bool): Whether to add bias parameter in convolution layers.
- downsample_scales (list): List of downsampling scales.
- nonlinear_activation (str): Activation function module name.
- nonlinear_activation_params (dict): Hyperparameters for activation function.
- pad (str): Padding function module name before dilated convolution layer.
- pad_params (dict): Hyperparameters for padding function.
-
- """
- super(MelGANDiscriminator, self).__init__()
- self.layers = torch.nn.ModuleList()
-
- # check kernel size is valid
- assert len(kernel_sizes) == 2
- assert kernel_sizes[0] % 2 == 1
- assert kernel_sizes[1] % 2 == 1
-
- # add first layer
- self.layers += [
- torch.nn.Sequential(
- getattr(torch.nn, pad)((np.prod(kernel_sizes) - 1) // 2, **pad_params),
- torch.nn.Conv1d(in_channels, channels, np.prod(kernel_sizes), bias=bias),
- getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
- )
- ]
-
- # add downsample layers
- in_chs = channels
- for downsample_scale in downsample_scales:
- out_chs = min(in_chs * downsample_scale, max_downsample_channels)
- self.layers += [
- torch.nn.Sequential(
- torch.nn.Conv1d(
- in_chs, out_chs,
- kernel_size=downsample_scale * 10 + 1,
- stride=downsample_scale,
- padding=downsample_scale * 5,
- groups=in_chs // 4,
- bias=bias,
- ),
- getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
- )
- ]
- in_chs = out_chs
-
- # add final layers
- out_chs = min(in_chs * 2, max_downsample_channels)
- self.layers += [
- torch.nn.Sequential(
- torch.nn.Conv1d(
- in_chs, out_chs, kernel_sizes[0],
- padding=(kernel_sizes[0] - 1) // 2,
- bias=bias,
- ),
- getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
- )
- ]
- self.layers += [
- torch.nn.Conv1d(
- out_chs, out_channels, kernel_sizes[1],
- padding=(kernel_sizes[1] - 1) // 2,
- bias=bias,
- ),
- ]
-
- def forward(self, x):
- """Calculate forward propagation.
-
- Args:
- x (Tensor): Input noise signal (B, 1, T).
-
- Returns:
- List: List of output tensors of each layer.
-
- """
- outs = []
- for f in self.layers:
- x = f(x)
- outs += [x]
-
- return outs
-
-
-class MelGANMultiScaleDiscriminator(torch.nn.Module):
- """MelGAN multi-scale discriminator module."""
-
- def __init__(self,
- in_channels=1,
- out_channels=1,
- scales=3,
- downsample_pooling="AvgPool1d",
- # follow the official implementation setting
- downsample_pooling_params={
- "kernel_size": 4,
- "stride": 2,
- "padding": 1,
- "count_include_pad": False,
- },
- kernel_sizes=[5, 3],
- channels=16,
- max_downsample_channels=1024,
- bias=True,
- downsample_scales=[4, 4, 4, 4],
- nonlinear_activation="LeakyReLU",
- nonlinear_activation_params={"negative_slope": 0.2},
- pad="ReflectionPad1d",
- pad_params={},
- use_weight_norm=True,
- ):
- """Initilize MelGAN multi-scale discriminator module.
-
- Args:
- in_channels (int): Number of input channels.
- out_channels (int): Number of output channels.
- downsample_pooling (str): Pooling module name for downsampling of the inputs.
- downsample_pooling_params (dict): Parameters for the above pooling module.
- kernel_sizes (list): List of two kernel sizes. The sum will be used for the first conv layer,
- and the first and the second kernel sizes will be used for the last two layers.
- channels (int): Initial number of channels for conv layer.
- max_downsample_channels (int): Maximum number of channels for downsampling layers.
- bias (bool): Whether to add bias parameter in convolution layers.
- downsample_scales (list): List of downsampling scales.
- nonlinear_activation (str): Activation function module name.
- nonlinear_activation_params (dict): Hyperparameters for activation function.
- pad (str): Padding function module name before dilated convolution layer.
- pad_params (dict): Hyperparameters for padding function.
- use_causal_conv (bool): Whether to use causal convolution.
-
- """
- super(MelGANMultiScaleDiscriminator, self).__init__()
- self.discriminators = torch.nn.ModuleList()
-
- # add discriminators
- for _ in range(scales):
- self.discriminators += [
- MelGANDiscriminator(
- in_channels=in_channels,
- out_channels=out_channels,
- kernel_sizes=kernel_sizes,
- channels=channels,
- max_downsample_channels=max_downsample_channels,
- bias=bias,
- downsample_scales=downsample_scales,
- nonlinear_activation=nonlinear_activation,
- nonlinear_activation_params=nonlinear_activation_params,
- pad=pad,
- pad_params=pad_params,
- )
- ]
- self.pooling = getattr(torch.nn, downsample_pooling)(**downsample_pooling_params)
-
- # apply weight norm
- if use_weight_norm:
- self.apply_weight_norm()
-
- # reset parameters
- self.reset_parameters()
-
- def forward(self, x):
- """Calculate forward propagation.
-
- Args:
- x (Tensor): Input noise signal (B, 1, T).
-
- Returns:
- List: List of list of each discriminator outputs, which consists of each layer output tensors.
-
- """
- outs = []
- for f in self.discriminators:
- outs += [f(x)]
- x = self.pooling(x)
-
- return outs
-
- def remove_weight_norm(self):
- """Remove weight normalization module from all of the layers."""
- def _remove_weight_norm(m):
- try:
- logging.debug(f"Weight norm is removed from {m}.")
- torch.nn.utils.remove_weight_norm(m)
- except ValueError: # this module didn't have weight norm
- return
-
- self.apply(_remove_weight_norm)
-
- def apply_weight_norm(self):
- """Apply weight normalization module from all of the layers."""
- def _apply_weight_norm(m):
- if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
- torch.nn.utils.weight_norm(m)
- logging.debug(f"Weight norm is applied to {m}.")
-
- self.apply(_apply_weight_norm)
-
- def reset_parameters(self):
- """Reset parameters.
-
- This initialization follows official implementation manner.
- https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py
-
- """
- def _reset_parameters(m):
- if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
- m.weight.data.normal_(0.0, 0.02)
- logging.debug(f"Reset parameters in {m}.")
-
- self.apply(_reset_parameters)
diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/data/samplers/grouped_batch_sampler.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/data/samplers/grouped_batch_sampler.py
deleted file mode 100644
index 5b247730aacd04dd0c752664acde3257c4eddd71..0000000000000000000000000000000000000000
--- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/data/samplers/grouped_batch_sampler.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import numpy as np
-from torch.utils.data.sampler import BatchSampler, Sampler
-
-
-class GroupedBatchSampler(BatchSampler):
- """
- Wraps another sampler to yield a mini-batch of indices.
- It enforces that the batch only contain elements from the same group.
- It also tries to provide mini-batches which follows an ordering which is
- as close as possible to the ordering from the original sampler.
- """
-
- def __init__(self, sampler, group_ids, batch_size):
- """
- Args:
- sampler (Sampler): Base sampler.
- group_ids (list[int]): If the sampler produces indices in range [0, N),
- `group_ids` must be a list of `N` ints which contains the group id of each sample.
- The group ids must be a set of integers in the range [0, num_groups).
- batch_size (int): Size of mini-batch.
- """
- if not isinstance(sampler, Sampler):
- raise ValueError(
- "sampler should be an instance of "
- "torch.utils.data.Sampler, but got sampler={}".format(sampler)
- )
- self.sampler = sampler
- self.group_ids = np.asarray(group_ids)
- assert self.group_ids.ndim == 1
- self.batch_size = batch_size
- groups = np.unique(self.group_ids).tolist()
-
- # buffer the indices of each group until batch size is reached
- self.buffer_per_group = {k: [] for k in groups}
-
- def __iter__(self):
- for idx in self.sampler:
- group_id = self.group_ids[idx]
- group_buffer = self.buffer_per_group[group_id]
- group_buffer.append(idx)
- if len(group_buffer) == self.batch_size:
- yield group_buffer[:] # yield a copy of the list
- del group_buffer[:]
-
- def __len__(self):
- raise NotImplementedError("len() of GroupedBatchSampler is not well-defined.")
diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/data/samplers/densepose_uniform.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/data/samplers/densepose_uniform.py
deleted file mode 100644
index 0d72cc30c9342b36efd6a7e80e55bf088b5c797c..0000000000000000000000000000000000000000
--- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/data/samplers/densepose_uniform.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import random
-import torch
-
-from .densepose_base import DensePoseBaseSampler
-
-
-class DensePoseUniformSampler(DensePoseBaseSampler):
- """
- Samples DensePose data from DensePose predictions.
- Samples for each class are drawn uniformly over all pixels estimated
- to belong to that class.
- """
-
- def __init__(self, count_per_class: int = 8):
- """
- Constructor
-
- Args:
- count_per_class (int): the sampler produces at most `count_per_class`
- samples for each category
- """
- super().__init__(count_per_class)
-
- def _produce_index_sample(self, values: torch.Tensor, count: int):
- """
- Produce a uniform sample of indices to select data
-
- Args:
- values (torch.Tensor): an array of size [n, k] that contains
- estimated values (U, V, confidences);
- n: number of channels (U, V, confidences)
- k: number of points labeled with part_id
- count (int): number of samples to produce, should be positive and <= k
-
- Return:
- list(int): indices of values (along axis 1) selected as a sample
- """
- k = values.shape[1]
- return random.sample(range(k), count)
diff --git a/spaces/chaninder/ds3-ml-model/README.md b/spaces/chaninder/ds3-ml-model/README.md
deleted file mode 100644
index c7ef59b6e3db5ce3c5f4ca21bdef65e1134106c4..0000000000000000000000000000000000000000
--- a/spaces/chaninder/ds3-ml-model/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Ds3 Ml Model
-emoji: 🏢
-colorFrom: pink
-colorTo: pink
-sdk: streamlit
-sdk_version: 1.9.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/chansung/co-write-with-llama2/README.md b/spaces/chansung/co-write-with-llama2/README.md
deleted file mode 100644
index 3c29d10dd32587c2103a232c703abd25df239cbc..0000000000000000000000000000000000000000
--- a/spaces/chansung/co-write-with-llama2/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Co Write With Llama2
-emoji: ✍🏼
-colorFrom: green
-colorTo: blue
-sdk: gradio
-sdk_version: 3.40.1
-app_file: app.py
-pinned: true
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/chansung/llama2-with-gradio-chat/llama2.py b/spaces/chansung/llama2-with-gradio-chat/llama2.py
deleted file mode 100644
index 73f085177389f3e6724d076eb3da2ca3db05178f..0000000000000000000000000000000000000000
--- a/spaces/chansung/llama2-with-gradio-chat/llama2.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import os
-import json
-import requests
-import sseclient
-
-from pingpong import PingPong
-from pingpong.pingpong import PPManager
-from pingpong.pingpong import PromptFmt
-from pingpong.pingpong import UIFmt
-from pingpong.gradio import GradioChatUIFmt
-
-class LLaMA2ChatPromptFmt(PromptFmt):
- @classmethod
- def ctx(cls, context):
- if context is None or context == "":
- return ""
- else:
- return f"""<>
-{context}
-<>
-"""
-
- @classmethod
- def prompt(cls, pingpong, truncate_size):
- ping = pingpong.ping[:truncate_size]
- pong = "" if pingpong.pong is None else pingpong.pong[:truncate_size]
- return f"""[INST] {ping} [/INST] {pong}"""
-
-class LLaMA2ChatPPManager(PPManager):
- def build_prompts(self, from_idx: int=0, to_idx: int=-1, fmt: PromptFmt=LLaMA2ChatPromptFmt, truncate_size: int=None):
- if to_idx == -1 or to_idx >= len(self.pingpongs):
- to_idx = len(self.pingpongs)
-
- results = fmt.ctx(self.ctx)
-
- for idx, pingpong in enumerate(self.pingpongs[from_idx:to_idx]):
- results += fmt.prompt(pingpong, truncate_size=truncate_size)
-
- return results
-
-class GradioLLaMA2ChatPPManager(LLaMA2ChatPPManager):
- def build_uis(self, from_idx: int=0, to_idx: int=-1, fmt: UIFmt=GradioChatUIFmt):
- if to_idx == -1 or to_idx >= len(self.pingpongs):
- to_idx = len(self.pingpongs)
-
- results = []
-
- for pingpong in self.pingpongs[from_idx:to_idx]:
- results.append(fmt.ui(pingpong))
-
- return results
-
-async def gen_text(
- prompt,
- hf_model='meta-llama/Llama-2-70b-chat-hf',
- hf_token=None,
- parameters=None
-):
- if hf_token is None:
- raise ValueError("Hugging Face Token is not set")
-
- if parameters is None:
- parameters = {
- 'max_new_tokens': 512,
- 'do_sample': True,
- 'return_full_text': False,
- 'temperature': 1.0,
- 'top_k': 50,
- # 'top_p': 1.0,
- 'repetition_penalty': 1.2
- }
-
- url = f'https://api-inference.huggingface.co/models/{hf_model}'
- headers={
- 'Authorization': f'Bearer {hf_token}',
- 'Content-type': 'application/json'
- }
- data = {
- 'inputs': prompt,
- 'stream': True,
- 'options': {
- 'use_cache': False,
- },
- 'parameters': parameters
- }
-
- r = requests.post(
- url,
- headers=headers,
- data=json.dumps(data),
- stream=True
- )
-
- client = sseclient.SSEClient(r)
- for event in client.events():
- yield json.loads(event.data)['token']['text']
-
-def gen_text_none_stream(
- prompt,
- hf_model='meta-llama/Llama-2-70b-chat-hf',
- hf_token=None,
-):
- parameters = {
- 'max_new_tokens': 64,
- 'do_sample': True,
- 'return_full_text': False,
- 'temperature': 0.7,
- 'top_k': 10,
- # 'top_p': 1.0,
- 'repetition_penalty': 1.2
- }
-
- url = f'https://api-inference.huggingface.co/models/{hf_model}'
- headers={
- 'Authorization': f'Bearer {hf_token}',
- 'Content-type': 'application/json'
- }
- data = {
- 'inputs': prompt,
- 'stream': False,
- 'options': {
- 'use_cache': False,
- },
- 'parameters': parameters
- }
-
- r = requests.post(
- url,
- headers=headers,
- data=json.dumps(data),
- )
-
- return json.loads(r.text)[0]["generated_text"]
\ No newline at end of file
diff --git a/spaces/charles0519/ChuanhuChatGPT/Dockerfile b/spaces/charles0519/ChuanhuChatGPT/Dockerfile
deleted file mode 100644
index 8cbd335b09b1d1975bfd83a053b5fcaf398147ea..0000000000000000000000000000000000000000
--- a/spaces/charles0519/ChuanhuChatGPT/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM python:3.9 as builder
-RUN apt-get update && apt-get install -y build-essential
-COPY requirements.txt .
-RUN pip install --user -r requirements.txt
-
-FROM python:3.9
-MAINTAINER iskoldt
-COPY --from=builder /root/.local /root/.local
-ENV PATH=/root/.local/bin:$PATH
-COPY . /app
-WORKDIR /app
-ENV my_api_key empty
-ENV dockerrun yes
-CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"]
diff --git a/spaces/charlesnchr/VSR-SIM/archs/swinir_rcab.py b/spaces/charlesnchr/VSR-SIM/archs/swinir_rcab.py
deleted file mode 100644
index fbf009ad3b3fd116793fe8c3a60b3db870e12327..0000000000000000000000000000000000000000
--- a/spaces/charlesnchr/VSR-SIM/archs/swinir_rcab.py
+++ /dev/null
@@ -1,1296 +0,0 @@
-# Modified from https://github.com/JingyunLiang/SwinIR
-# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
-# Originally Written by Ze Liu, Modified by Jingyun Liang.
-
-import collections.abc
-import math
-import torch
-import torch.nn as nn
-import torch.utils.checkpoint as checkpoint
-from itertools import repeat
-
-# from self_attention_cv import AxialAttentionBlock
-
-from functools import reduce, lru_cache
-from operator import mul
-from einops import rearrange
-import sys
-
-
-def make_layer(basic_block, num_basic_block, **kwarg):
- """Make layers by stacking the same blocks.
-
- Args:
- basic_block (nn.module): nn.module class for basic block.
- num_basic_block (int): number of blocks.
-
- Returns:
- nn.Sequential: Stacked blocks in nn.Sequential.
- """
- layers = []
- for _ in range(num_basic_block):
- layers.append(basic_block(**kwarg))
- return nn.Sequential(*layers)
-
-class Upsample(nn.Sequential):
- """Upsample module.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
- """
-
- def __init__(self, scale, num_feat):
- m = []
- if (scale & (scale - 1)) == 0: # scale = 2^n
- for _ in range(int(math.log(scale, 2))):
- m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(2))
- elif scale == 3:
- m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(3))
- else:
- raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
- super(Upsample, self).__init__(*m)
-
-
-# From PyTorch
-def _ntuple(n):
-
- def parse(x):
- if isinstance(x, collections.abc.Iterable):
- return x
- return tuple(repeat(x, n))
-
- return parse
-to_2tuple = _ntuple(2)
-
-def _no_grad_trunc_normal_(tensor, mean, std, a, b):
- # From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
- # Cut & paste from PyTorch official master until it's in a few official releases - RW
- # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
- def norm_cdf(x):
- # Computes standard normal cumulative distribution function
- return (1. + math.erf(x / math.sqrt(2.))) / 2.
-
- if (mean < a - 2 * std) or (mean > b + 2 * std):
- warnings.warn(
- 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
- 'The distribution of values may be incorrect.',
- stacklevel=2)
-
- with torch.no_grad():
- # Values are generated by using a truncated uniform distribution and
- # then using the inverse CDF for the normal distribution.
- # Get upper and lower cdf values
- low = norm_cdf((a - mean) / std)
- up = norm_cdf((b - mean) / std)
-
- # Uniformly fill tensor with values from [low, up], then translate to
- # [2l-1, 2u-1].
- tensor.uniform_(2 * low - 1, 2 * up - 1)
-
- # Use inverse cdf transform for normal distribution to get truncated
- # standard normal
- tensor.erfinv_()
-
- # Transform to proper mean, std
- tensor.mul_(std * math.sqrt(2.))
- tensor.add_(mean)
-
- # Clamp to ensure it's in the proper range
- tensor.clamp_(min=a, max=b)
- return tensor
-
-
-def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
- r"""Fills the input Tensor with values drawn from a truncated
- normal distribution.
-
- From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
-
- The values are effectively drawn from the
- normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
- with values outside :math:`[a, b]` redrawn until they are within
- the bounds. The method used for generating the random values works
- best when :math:`a \leq \text{mean} \leq b`.
-
- Args:
- tensor: an n-dimensional `torch.Tensor`
- mean: the mean of the normal distribution
- std: the standard deviation of the normal distribution
- a: the minimum cutoff value
- b: the maximum cutoff value
-
- Examples:
- >>> w = torch.empty(3, 5)
- >>> nn.init.trunc_normal_(w)
- """
- return _no_grad_trunc_normal_(tensor, mean, std, a, b)
-
-class ChannelAttention(nn.Module):
- """Channel attention used in RCAN.
-
- Args:
- num_feat (int): Channel number of intermediate features.
- squeeze_factor (int): Channel squeeze factor. Default: 16.
- """
-
- def __init__(self, num_feat, squeeze_factor=16):
- super(ChannelAttention, self).__init__()
- self.attention = nn.Sequential(
- nn.AdaptiveAvgPool2d(1), nn.Conv2d(num_feat, num_feat // squeeze_factor, 1, padding=0),
- nn.ReLU(inplace=True), nn.Conv2d(num_feat // squeeze_factor, num_feat, 1, padding=0), nn.Sigmoid())
-
- def forward(self, x):
- y = self.attention(x)
- return x * y
-
-
-class RCAB(nn.Module):
- """Residual Channel Attention Block (RCAB) used in RCAN.
-
- Args:
- num_feat (int): Channel number of intermediate features.
- squeeze_factor (int): Channel squeeze factor. Default: 16.
- res_scale (float): Scale the residual. Default: 1.
- """
-
- def __init__(self, num_feat, squeeze_factor=16, res_scale=1):
- super(RCAB, self).__init__()
- self.res_scale = res_scale
-
- self.rcab = nn.Sequential(
- nn.Conv2d(num_feat, num_feat, 3, 1, 1), nn.ReLU(True), nn.Conv2d(num_feat, num_feat, 3, 1, 1),
- ChannelAttention(num_feat, squeeze_factor))
-
- def forward(self, x):
- res = self.rcab(x) * self.res_scale
- return res + x
-
-
-class ResidualGroup(nn.Module):
- """Residual Group of RCAB.
-
- Args:
- num_feat (int): Channel number of intermediate features.
- num_block (int): Block number in the body network.
- squeeze_factor (int): Channel squeeze factor. Default: 16.
- res_scale (float): Scale the residual. Default: 1.
- """
-
- def __init__(self, num_feat, num_block, squeeze_factor=16, res_scale=1):
- super(ResidualGroup, self).__init__()
-
- self.residual_group = make_layer(
- RCAB, num_block, num_feat=num_feat, squeeze_factor=squeeze_factor, res_scale=res_scale)
- self.conv = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
-
- def forward(self, x):
- res = self.conv(self.residual_group(x))
- return res + x
-
-
-
-
-def drop_path(x, drop_prob: float = 0., training: bool = False):
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
-
- From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
- """
- if drop_prob == 0. or not training:
- return x
- keep_prob = 1 - drop_prob
- shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
- random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
- random_tensor.floor_() # binarize
- output = x.div(keep_prob) * random_tensor
- return output
-
-
-class DropPath(nn.Module):
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
-
- From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
- """
-
- def __init__(self, drop_prob=None):
- super(DropPath, self).__init__()
- self.drop_prob = drop_prob
-
- def forward(self, x):
- return drop_path(x, self.drop_prob, self.training)
-
-
-class Mlp(nn.Module):
-
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-def window_partition(x, window_size):
- """
- Args:
- x: (b, h, w, c)
- window_size (int): window size
-
- Returns:
- windows: (num_windows*b, window_size, window_size, c)
- """
- b, h, w, c = x.shape
- x = x.view(b, h // window_size, window_size, w // window_size, window_size, c)
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, c)
- return windows
-
-
-def window_reverse(windows, window_size, h, w):
- """
- Args:
- windows: (num_windows*b, window_size, window_size, c)
- window_size (int): Window size
- h (int): Height of image
- w (int): Width of image
-
- Returns:
- x: (b, h, w, c)
- """
- b = int(windows.shape[0] / (h * w / window_size / window_size))
- x = windows.view(b, h // window_size, w // window_size, window_size, window_size, -1)
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(b, h, w, -1)
- return x
-
-
-class WindowAttention(nn.Module):
- r""" Window based multi-head self attention (W-MSA) module with relative position bias.
- It supports both of shifted and non-shifted window.
-
- Args:
- dim (int): Number of input channels.
- window_size (tuple[int]): The height and width of the window.
- num_heads (int): Number of attention heads.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
- attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
- proj_drop (float, optional): Dropout ratio of output. Default: 0.0
- """
-
- def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
-
- super().__init__()
- self.dim = dim
- self.window_size = window_size # Wh, Ww
- self.num_heads = num_heads
- head_dim = dim // num_heads
- self.scale = qk_scale or head_dim**-0.5
-
- # define a parameter table of relative position bias
- self.relative_position_bias_table = nn.Parameter(
- torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
-
- # get pair-wise relative position index for each token inside the window
- coords_h = torch.arange(self.window_size[0])
- coords_w = torch.arange(self.window_size[1])
- coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
- coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
- relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
- relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
- relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
- relative_coords[:, :, 1] += self.window_size[1] - 1
- relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
- relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
- self.register_buffer('relative_position_index', relative_position_index)
-
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
-
- self.proj_drop = nn.Dropout(proj_drop)
-
- trunc_normal_(self.relative_position_bias_table, std=.02)
- self.softmax = nn.Softmax(dim=-1)
-
- def forward(self, x, mask=None):
- """
- Args:
- x: input features with shape of (num_windows*b, n, c)
- mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
- """
- b_, n, c = x.shape
- qkv = self.qkv(x).reshape(b_, n, 3, self.num_heads, c // self.num_heads).permute(2, 0, 3, 1, 4)
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
-
- q = q * self.scale
- attn = (q @ k.transpose(-2, -1))
-
- relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
- self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
- relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
- attn = attn + relative_position_bias.unsqueeze(0)
-
- if mask is not None:
- nw = mask.shape[0]
- attn = attn.view(b_ // nw, nw, self.num_heads, n, n) + mask.unsqueeze(1).unsqueeze(0)
- attn = attn.view(-1, self.num_heads, n, n)
- attn = self.softmax(attn)
- else:
- attn = self.softmax(attn)
-
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(b_, n, c)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
- def extra_repr(self) -> str:
- return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
-
- def flops(self, n):
- # calculate flops for 1 window with token length of n
- flops = 0
- # qkv = self.qkv(x)
- flops += n * self.dim * 3 * self.dim
- # attn = (q @ k.transpose(-2, -1))
- flops += self.num_heads * n * (self.dim // self.num_heads) * n
- # x = (attn @ v)
- flops += self.num_heads * n * n * (self.dim // self.num_heads)
- # x = self.proj(x)
- flops += n * self.dim * self.dim
- return flops
-
-
-class SwinTransformerBlock(nn.Module):
- r""" Swin Transformer Block.
-
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- num_heads (int): Number of attention heads.
- window_size (int): Window size.
- shift_size (int): Shift size for SW-MSA.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float, optional): Stochastic depth rate. Default: 0.0
- act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- """
-
- def __init__(self,
- dim,
- input_resolution,
- num_heads,
- window_size=7,
- shift_size=0,
- mlp_ratio=4.,
- qkv_bias=True,
- qk_scale=None,
- drop=0.,
- attn_drop=0.,
- drop_path=0.,
- act_layer=nn.GELU,
- norm_layer=nn.LayerNorm):
- super().__init__()
- self.dim = dim
- self.input_resolution = input_resolution
- self.num_heads = num_heads
- self.window_size = window_size
- self.shift_size = shift_size
- self.mlp_ratio = mlp_ratio
- if min(self.input_resolution) <= self.window_size:
- # if window size is larger than input resolution, we don't partition windows
- self.shift_size = 0
- self.window_size = min(self.input_resolution)
- assert 0 <= self.shift_size < self.window_size, 'shift_size must in 0-window_size'
-
- self.norm1 = norm_layer(dim)
- self.attn = WindowAttention(
- dim,
- window_size=to_2tuple(self.window_size),
- num_heads=num_heads,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- attn_drop=attn_drop,
- proj_drop=drop)
-
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- if self.shift_size > 0:
- attn_mask = self.calculate_mask(self.input_resolution)
- else:
- attn_mask = None
-
- self.register_buffer('attn_mask', attn_mask)
-
- def calculate_mask(self, x_size):
- # calculate attention mask for SW-MSA
- h, w = x_size
- img_mask = torch.zeros((1, h, w, 1)) # 1 h w 1
- h_slices = (slice(0, -self.window_size), slice(-self.window_size,
- -self.shift_size), slice(-self.shift_size, None))
- w_slices = (slice(0, -self.window_size), slice(-self.window_size,
- -self.shift_size), slice(-self.shift_size, None))
- cnt = 0
- for h in h_slices:
- for w in w_slices:
- img_mask[:, h, w, :] = cnt
- cnt += 1
-
- mask_windows = window_partition(img_mask, self.window_size) # nw, window_size, window_size, 1
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
- attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
- attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
-
- return attn_mask
-
- def forward(self, x, x_size):
- h, w = x_size
- b, _, c = x.shape
- # assert seq_len == h * w, "input feature has wrong size"
-
- shortcut = x
- x = self.norm1(x)
- x = x.view(b, h, w, c)
-
- # cyclic shift
- if self.shift_size > 0:
- shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
- else:
- shifted_x = x
-
- # partition windows
- x_windows = window_partition(shifted_x, self.window_size) # nw*b, window_size, window_size, c
- x_windows = x_windows.view(-1, self.window_size * self.window_size, c) # nw*b, window_size*window_size, c
-
- # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
- if self.input_resolution == x_size:
- attn_windows = self.attn(x_windows, mask=self.attn_mask) # nw*b, window_size*window_size, c
- else:
- attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
-
- # merge windows
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, c)
- shifted_x = window_reverse(attn_windows, self.window_size, h, w) # b h' w' c
-
- # reverse cyclic shift
- if self.shift_size > 0:
- x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
- else:
- x = shifted_x
- x = x.view(b, h * w, c)
-
- # FFN
- x = shortcut + self.drop_path(x)
- x = x + self.drop_path(self.mlp(self.norm2(x)))
-
- return x
-
- def extra_repr(self) -> str:
- return (f'dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, '
- f'window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}')
-
- def flops(self):
- flops = 0
- h, w = self.input_resolution
- # norm1
- flops += self.dim * h * w
- # W-MSA/SW-MSA
- nw = h * w / self.window_size / self.window_size
- flops += nw * self.attn.flops(self.window_size * self.window_size)
- # mlp
- flops += 2 * h * w * self.dim * self.dim * self.mlp_ratio
- # norm2
- flops += self.dim * h * w
- return flops
-
-
-class PatchMerging(nn.Module):
- r""" Patch Merging Layer.
-
- Args:
- input_resolution (tuple[int]): Resolution of input feature.
- dim (int): Number of input channels.
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- """
-
- def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
- super().__init__()
- self.input_resolution = input_resolution
- self.dim = dim
- self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
- self.norm = norm_layer(4 * dim)
-
- def forward(self, x):
- """
- x: b, h*w, c
- """
- h, w = self.input_resolution
- b, seq_len, c = x.shape
- assert seq_len == h * w, 'input feature has wrong size'
- assert h % 2 == 0 and w % 2 == 0, f'x size ({h}*{w}) are not even.'
-
- x = x.view(b, h, w, c)
-
- x0 = x[:, 0::2, 0::2, :] # b h/2 w/2 c
- x1 = x[:, 1::2, 0::2, :] # b h/2 w/2 c
- x2 = x[:, 0::2, 1::2, :] # b h/2 w/2 c
- x3 = x[:, 1::2, 1::2, :] # b h/2 w/2 c
- x = torch.cat([x0, x1, x2, x3], -1) # b h/2 w/2 4*c
- x = x.view(b, -1, 4 * c) # b h/2*w/2 4*c
-
- x = self.norm(x)
- x = self.reduction(x)
-
- return x
-
- def extra_repr(self) -> str:
- return f'input_resolution={self.input_resolution}, dim={self.dim}'
-
- def flops(self):
- h, w = self.input_resolution
- flops = h * w * self.dim
- flops += (h // 2) * (w // 2) * 4 * self.dim * 2 * self.dim
- return flops
-
-
-class BasicLayer(nn.Module):
- """ A basic Swin Transformer layer for one stage.
-
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- depth (int): Number of blocks.
- num_heads (int): Number of attention heads.
- window_size (int): Local window size.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- """
-
- def __init__(self,
- dim,
- input_resolution,
- depth,
- num_heads,
- window_size,
- mlp_ratio=4.,
- qkv_bias=True,
- qk_scale=None,
- drop=0.,
- attn_drop=0.,
- drop_path=0.,
- norm_layer=nn.LayerNorm,
- downsample=None,
- use_checkpoint=False):
-
- super().__init__()
- self.dim = dim
- self.input_resolution = input_resolution
- self.depth = depth
- self.use_checkpoint = use_checkpoint
-
- # build blocks
- self.blocks = nn.ModuleList([
- SwinTransformerBlock(
- dim=dim,
- input_resolution=input_resolution,
- num_heads=num_heads,
- window_size=window_size,
- shift_size=0 if (i % 2 == 0) else window_size // 2,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop,
- attn_drop=attn_drop,
- drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
- norm_layer=norm_layer) for i in range(depth)
- ])
-
- # patch merging layer
- if downsample is not None:
- self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
- else:
- self.downsample = None
-
- def forward(self, x, x_size):
- for blk in self.blocks:
- if self.use_checkpoint:
- x = checkpoint.checkpoint(blk, x)
- else:
- x = blk(x, x_size)
- if self.downsample is not None:
- x = self.downsample(x)
- return x
-
- def extra_repr(self) -> str:
- return f'dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}'
-
- def flops(self):
- flops = 0
- for blk in self.blocks:
- flops += blk.flops()
- if self.downsample is not None:
- flops += self.downsample.flops()
- return flops
-
-
-class RSTB(nn.Module):
- """Residual Swin Transformer Block (RSTB).
-
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- depth (int): Number of blocks.
- num_heads (int): Number of attention heads.
- window_size (int): Local window size.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- img_size: Input image size.
- patch_size: Patch size.
- resi_connection: The convolutional block before residual connection.
- """
-
- def __init__(self,
- dim,
- input_resolution,
- depth,
- num_heads,
- window_size,
- mlp_ratio=4.,
- qkv_bias=True,
- qk_scale=None,
- drop=0.,
- attn_drop=0.,
- drop_path=0.,
- norm_layer=nn.LayerNorm,
- downsample=None,
- use_checkpoint=False,
- img_size=224,
- patch_size=4,
- use_rcab=True,
- resi_connection='1conv'):
- super(RSTB, self).__init__()
-
- self.dim = dim
- self.input_resolution = input_resolution
-
- self.residual_group = BasicLayer(
- dim=dim,
- input_resolution=input_resolution,
- depth=depth,
- num_heads=num_heads,
- window_size=window_size,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop,
- attn_drop=attn_drop,
- drop_path=drop_path,
- norm_layer=norm_layer,
- downsample=downsample,
- use_checkpoint=use_checkpoint)
-
- # if resi_connection == '1conv':
- # # ML-SIM v1 v2 v3 v4 v6 v7 v8
- # self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
-
- # # ML-SIM v5
- # # self.conv = nn.Sequential(
- # # nn.PixelUnshuffle(2),
- # # nn.Conv2d(4*dim, 4*dim, 3, 1, 1),
- # # nn.PixelShuffle(2))
-
- # elif resi_connection == '3conv':
- # # to save parameters and memory
- # self.conv = nn.Sequential(
- # nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
- # nn.Conv2d(dim // 4, dim // 4, 1, 1, 0), nn.LeakyReLU(negative_slope=0.2, inplace=True),
- # nn.Conv2d(dim // 4, dim, 3, 1, 1))
-
- self.use_rcab = use_rcab
-
- self.resi_connection1 = nn.Conv2d(dim, dim, 3, 1, 1)
- if self.use_rcab:
- self.resi_connection2 = ResidualGroup(num_feat=dim,squeeze_factor=16,num_block=12)
- self.resi_connection3 = nn.Conv2d(dim, dim, 3, 1, 1)
-
- self.patch_embed = PatchEmbed(
- img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None)
-
- self.patch_unembed = PatchUnEmbed(
- img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None)
-
- def forward(self, x, x_size):
- # return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
- shortcut = x
- x = self.patch_unembed(self.residual_group(x, x_size), x_size)
- x = self.resi_connection1(x)
- if self.use_rcab:
- x = self.resi_connection2(x)
- x = self.resi_connection3(x)
- x = self.patch_embed(x) + shortcut
- return x
-
- def flops(self):
- flops = 0
- flops += self.residual_group.flops()
- h, w = self.input_resolution
- flops += h * w * self.dim * self.dim * 9
- flops += self.patch_embed.flops()
- flops += self.patch_unembed.flops()
-
- return flops
-
-
-class PatchEmbed(nn.Module):
- r""" Image to Patch Embedding
-
- Args:
- img_size (int): Image size. Default: 224.
- patch_size (int): Patch token size. Default: 4.
- in_chans (int): Number of input image channels. Default: 3.
- embed_dim (int): Number of linear projection output channels. Default: 96.
- norm_layer (nn.Module, optional): Normalization layer. Default: None
- """
-
- def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
- super().__init__()
- img_size = to_2tuple(img_size)
- patch_size = to_2tuple(patch_size)
- patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
- self.img_size = img_size
- self.patch_size = patch_size
- self.patches_resolution = patches_resolution
- self.num_patches = patches_resolution[0] * patches_resolution[1]
-
- self.in_chans = in_chans
- self.embed_dim = embed_dim
-
- if norm_layer is not None:
- self.norm = norm_layer(embed_dim)
- else:
- self.norm = None
-
- def forward(self, x):
- x = x.flatten(2).transpose(1, 2) # b Ph*Pw c
- if self.norm is not None:
- x = self.norm(x)
- return x
-
- def flops(self):
- flops = 0
- h, w = self.img_size
- if self.norm is not None:
- flops += h * w * self.embed_dim
- return flops
-
-
-class PatchUnEmbed(nn.Module):
- r""" Image to Patch Unembedding
-
- Args:
- img_size (int): Image size. Default: 224.
- patch_size (int): Patch token size. Default: 4.
- in_chans (int): Number of input image channels. Default: 3.
- embed_dim (int): Number of linear projection output channels. Default: 96.
- norm_layer (nn.Module, optional): Normalization layer. Default: None
- """
-
- def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
- super().__init__()
- img_size = to_2tuple(img_size)
- patch_size = to_2tuple(patch_size)
- patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
- self.img_size = img_size
- self.patch_size = patch_size
- self.patches_resolution = patches_resolution
- self.num_patches = patches_resolution[0] * patches_resolution[1]
-
- self.in_chans = in_chans
- self.embed_dim = embed_dim
-
- def forward(self, x, x_size):
- x = x.transpose(1, 2).view(x.shape[0], self.embed_dim, x_size[0], x_size[1]) # b Ph*Pw c
- return x
-
- def flops(self):
- flops = 0
- return flops
-
-
-class Upsample(nn.Sequential):
- """Upsample module.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
- """
-
- def __init__(self, scale, num_feat):
- m = []
- if (scale & (scale - 1)) == 0: # scale = 2^n
- for _ in range(int(math.log(scale, 2))):
- m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(2))
- elif scale == 3:
- m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(3))
- else:
- raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
- super(Upsample, self).__init__(*m)
-
-
-class UpsampleOneStep(nn.Sequential):
- """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
- Used in lightweight SR to save parameters.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
-
- """
-
- def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
- self.num_feat = num_feat
- self.input_resolution = input_resolution
- m = []
- m.append(nn.Conv2d(num_feat, (scale**2) * num_out_ch, 3, 1, 1))
- m.append(nn.PixelShuffle(scale))
- super(UpsampleOneStep, self).__init__(*m)
-
- def flops(self):
- h, w = self.input_resolution
- flops = h * w * self.num_feat * 3 * 9
- return flops
-
-
-class SwinIR_RCAB(nn.Module):
- r""" SwinIR
- A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
-
- Args:
- img_size (int | tuple(int)): Input image size. Default 64
- patch_size (int | tuple(int)): Patch size. Default: 1
- in_chans (int): Number of input image channels. Default: 3
- embed_dim (int): Patch embedding dimension. Default: 96
- depths (tuple(int)): Depth of each Swin Transformer layer.
- num_heads (tuple(int)): Number of attention heads in different layers.
- window_size (int): Window size. Default: 7
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
- qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
- drop_rate (float): Dropout rate. Default: 0
- attn_drop_rate (float): Attention dropout rate. Default: 0
- drop_path_rate (float): Stochastic depth rate. Default: 0.1
- norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
- ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
- patch_norm (bool): If True, add normalization after patch embedding. Default: True
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
- upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
- img_range: Image range. 1. or 255.
- upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
- resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
- """
-
- def __init__(self,
- opt,
- img_size=256,
- patch_size=1,
- in_chans=3,
- embed_dim=64,
- depths=(6, 6),
- num_heads=(8,8),
- window_size=4,
- mlp_ratio=2.,
- qkv_bias=True,
- qk_scale=None,
- drop_rate=0.,
- attn_drop_rate=0.,
- drop_path_rate=0.1,
- norm_layer=nn.LayerNorm,
- ape=False,
- patch_norm=True,
- use_checkpoint=False,
- upscale=2,
- img_range=1.,
- upsampler='',
- resi_connection='1conv',
- pixelshuffleFactor=1,
- use_rcab=True,
- out_chans=1,
- vis=False,
- **kwargs):
- super().__init__()
- num_in_ch = in_chans
- num_out_ch = out_chans#in_chans
- num_feat = 64
- self.img_range = img_range
- if in_chans == 3:
- rgb_mean = (0.4488, 0.4371, 0.4040)
- self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
- else:
- self.mean = torch.zeros(1, 1, 1, 1)
- self.upscale = upscale
- self.upsampler = upsampler
- print('received ',depths,use_rcab)
-
- # ------------------------- 1, shallow feature extraction ------------------------- #
- # ML-SIM v1 v2 v3 v6
- # self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
-
-
- # ML-SIM v4 v5 v8
- # print('received pixelshufflefactor',pixelshuffleFactor)
- self.conv_first = nn.Conv2d(round(pixelshuffleFactor**2*num_in_ch), embed_dim, 3, 1, 1)
- if pixelshuffleFactor >= 1:
- self.pixelshuffle_encode = nn.PixelUnshuffle(pixelshuffleFactor)
- self.pixelshuffle_decode = nn.PixelShuffle(pixelshuffleFactor)
- else: # e.g. 1/3
- self.pixelshuffle_encode = nn.PixelShuffle(round(1/pixelshuffleFactor))
- self.pixelshuffle_decode = nn.PixelUnshuffle(round(1/pixelshuffleFactor))
-
- # ML-SIM v7
- # pixelshuffleFactor = kwargs['pixelshuffleFactor']
- # self.conv_first = nn.Conv2d(round(3*pixelshuffleFactor**2*num_in_ch), embed_dim, 3, 1, 1)
- # if pixelshuffleFactor > 1:
- # self.pixelshuffle_encode = nn.PixelUnshuffle(pixelshuffleFactor)
- # self.pixelshuffle_decode = nn.PixelShuffle(pixelshuffleFactor)
- # else: # e.g. 1/3
- # self.pixelshuffle_encode = nn.PixelShuffle(round(1/pixelshuffleFactor))
- # self.pixelshuffle_decode = nn.PixelUnshuffle(round(1/pixelshuffleFactor))
-
-
-
-
- # ------------------------- 2, deep feature extraction ------------------------- #
- self.num_layers = len(depths)
- self.embed_dim = embed_dim
- self.ape = ape
- self.patch_norm = patch_norm
- self.num_features = embed_dim
- self.mlp_ratio = mlp_ratio
-
- # split image into non-overlapping patches
- self.patch_embed = PatchEmbed(
- img_size=img_size,
- patch_size=patch_size,
- in_chans=embed_dim,
- embed_dim=embed_dim,
- norm_layer=norm_layer if self.patch_norm else None)
- num_patches = self.patch_embed.num_patches
- patches_resolution = self.patch_embed.patches_resolution
- self.patches_resolution = patches_resolution
-
- # merge non-overlapping patches into image
- self.patch_unembed = PatchUnEmbed(
- img_size=img_size,
- patch_size=patch_size,
- in_chans=embed_dim,
- embed_dim=embed_dim,
- norm_layer=norm_layer if self.patch_norm else None)
-
- # absolute position embedding
- if self.ape:
- self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
- trunc_normal_(self.absolute_pos_embed, std=.02)
-
- self.pos_drop = nn.Dropout(p=drop_rate)
-
- # stochastic depth
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
-
- # build Residual Swin Transformer blocks (RSTB)
- self.layers = nn.ModuleList()
- for i_layer in range(self.num_layers):
- layer = RSTB(
- dim=embed_dim,
- input_resolution=(patches_resolution[0], patches_resolution[1]),
- depth=depths[i_layer],
- num_heads=num_heads[i_layer],
- window_size=window_size,
- mlp_ratio=self.mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
- norm_layer=norm_layer,
- downsample=None,
- use_checkpoint=use_checkpoint,
- img_size=img_size,
- patch_size=patch_size,
- use_rcab=use_rcab,
- resi_connection=resi_connection)
- self.layers.append(layer)
- self.norm = norm_layer(self.num_features)
-
- # build the last conv layer in deep feature extraction
- if resi_connection == '1conv':
- self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
- elif resi_connection == '3conv':
- # to save parameters and memory
- self.conv_after_body = nn.Sequential(
- nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0), nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
-
- # ------------------------- 3, high quality image reconstruction ------------------------- #
- if self.upsampler == 'pixelshuffle':
- # for classical SR
- self.conv_before_upsample = nn.Sequential(
- nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True))
- self.upsample = Upsample(upscale, num_feat)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- elif self.upsampler == 'pixelshuffledirect':
- # for lightweight SR (to save parameters)
- self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
- (patches_resolution[0], patches_resolution[1]))
- elif self.upsampler == 'nearest+conv':
- # for real-world SR (less artifacts)
- assert self.upscale == 4, 'only support x4 now.'
- self.conv_before_upsample = nn.Sequential(
- nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True))
- self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
- else:
- # for image denoising and JPEG compression artifact reduction
- # self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1) # original code
-
- # ML-SIM v1 v6
- # self.conv_last = nn.Conv2d(embed_dim, num_in_ch, 3, 1, 1)
- # self.conv_combine = nn.Conv2d(num_in_ch, num_out_ch, 3, 1, 1)
-
- # ML-SIM v2,v3
- # self.conv_last = nn.Conv2d(embed_dim, num_in_ch, 3, 1, 1)
- # self.conv_combine = nn.Conv2d(num_in_ch, num_out_ch, 3, 1, 1)
- # self.axial_att_block = AxialAttentionBlock(in_channels=9, dim=256, heads=8)
-
- # ML-SIM v4 v5
- # self.conv_last = nn.Conv2d(embed_dim, round(pixelshuffleFactor**2*num_in_ch), 3, 1, 1)
- # self.conv_combine = nn.Conv2d(num_in_ch, num_out_ch, 3, 1, 1)
-
- # ML-SIM v7
- # self.conv_last = nn.Conv2d(embed_dim, round(3*pixelshuffleFactor**2*num_in_ch), 3, 1, 1)
- # self.conv_combine = nn.Conv2d(3*num_in_ch, num_out_ch, 3, 1, 1)
-
- # ML-SIM v8
- self.conv_before_upsample = nn.Sequential(
- nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True))
- self.upsample = Upsample(upscale, num_feat)
- self.conv_last = nn.Conv2d(num_feat, round(pixelshuffleFactor**2*num_in_ch), 3, 1, 1)
- self.conv_combine = nn.Conv2d(num_in_ch, num_out_ch, 3, 1, 1)
-
- self.task = opt.task
- if self.task == 'segment':
- self.segmentation_decode = nn.Conv2d(num_in_ch, 4, 1)
- self.vis = vis
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
-
- @torch.jit.ignore
- def no_weight_decay(self):
- return {'absolute_pos_embed'}
-
- @torch.jit.ignore
- def no_weight_decay_keywords(self):
- return {'relative_position_bias_table'}
-
- def forward_features(self, x):
- x_size = (x.shape[2], x.shape[3])
- # print('before patch embed',x.shape)
- x = self.patch_embed(x)
- # print('after patch embed',x.shape)
- if self.ape:
- x = x + self.absolute_pos_embed
- x = self.pos_drop(x)
-
- for idx,layer in enumerate(self.layers):
- x = layer(x, x_size)
- if self.vis:
- x_unembed = self.patch_unembed(x, x_size)
- torch.save(x_unembed.detach().cpu(),'x_layer_%d.pth' % idx)
-
-
- x = self.norm(x) # b seq_len c
- # rint('before patch unembed',x.shape)
- x = self.patch_unembed(x, x_size)
- # print('before patch unembed',x.shape)
-
- return x
-
- def forward(self, x):
- # print('starting forward',x.shape)
- self.mean = self.mean.type_as(x)
- x = (x - self.mean) * self.img_range
-
- if self.upsampler == 'pixelshuffle':
- # for classical SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.conv_before_upsample(x)
- x = self.conv_last(self.upsample(x))
- elif self.upsampler == 'pixelshuffledirect':
- # for lightweight SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.upsample(x)
- elif self.upsampler == 'nearest+conv':
- # for real-world SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.conv_before_upsample(x)
- x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
- x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
- x = self.conv_last(self.lrelu(self.conv_hr(x)))
- else:
- # for image denoising and JPEG compression artifact reduction
-
- # ML-SIM v1 v2 v3
- # x_first = self.conv_first(x)
- # res = self.conv_after_body(self.forward_features(x_first)) + x_first
- # res = self.conv_last(res)
-
- # ML-SIM v1
- # x = self.conv_combine(x + res)
-
- # ML-SIM v2
- # x = self.axial_att_block(x)
- # x = self.conv_combine(x + res)
-
- # ML-SIM v3
- # res = self.axial_att_block(res)
- # x = self.conv_combine(x + res)
-
- # ML-SIM v4 v5
- # x_encoded = self.pixelshuffle_encode(x)
- # x_first = self.conv_first(x_encoded)
- # res = self.conv_after_body(self.forward_features(x_first)) + x_first
- # res = self.conv_last(res)
- # res_decoded = self.pixelshuffle_decode(res)
- # x = self.conv_combine(x + res_decoded)
-
-
- # ML-SIM v6
- # x_encoded = torch.fft.fft2(x,dim=(-1,-2)).real
- # x_first = self.conv_first(x_encoded + x)
- # res = self.conv_after_body(self.forward_features(x_first)) + x_first
- # res = self.conv_last(res)
- # x = self.conv_combine(x + res)
-
- # ML-SIM v7
- # x_cos = torch.cos(x)
- # x_sin = torch.sin(x)
- # x = torch.cat((x,x_cos,x_sin),dim=1)
- # x_encoded = self.pixelshuffle_encode(x)
- # x_first = self.conv_first(x_encoded)
- # res = self.conv_after_body(self.forward_features(x_first)) + x_first
- # res = self.conv_last(res)
- # res_decoded = self.pixelshuffle_decode(res)
- # x = self.conv_combine(x + res_decoded)
-
- # ML-SIM v8
- x_encoded = self.pixelshuffle_encode(x)
-
- # print('after pixelshuffle',x_encoded.shape)
- x_first = self.conv_first(x_encoded)
- # print('after conv first',x_first.shape)
- x_forwardfeat = self.forward_features(x_first)
- # print('after forward feat',x_forwardfeat.shape)
- res = self.conv_after_body(x_forwardfeat) + x_first
- # print('after conv after body',res.shape)
- x = self.conv_before_upsample(res)
- # print('after conv before upsample',x.shape)
- x = self.conv_last(self.upsample(x))
- # print('after conv last',x.shape)
-
-
- if self.task == 'segment':
- x = self.segmentation_decode(x) # assumes pixelshuffle = 1
- else:
- res_decoded = self.pixelshuffle_decode(x)
- # print('after pixel shuffle',res_decoded.shape)
- x = self.conv_combine(res_decoded)
- # print('after conv combine',x.shape)
-
-
- x = x / self.img_range + self.mean
-
- return x
-
- def flops(self):
- flops = 0
- h, w = self.patches_resolution
- flops += h * w * 3 * self.embed_dim * 9
- flops += self.patch_embed.flops()
- for layer in self.layers:
- flops += layer.flops()
- flops += h * w * 3 * self.embed_dim * self.embed_dim
- flops += self.upsample.flops()
- return flops
-
-
-if __name__ == '__main__':
- upscale = 4
- window_size = 8
- height = (1024 // upscale // window_size + 1) * window_size
- width = (720 // upscale // window_size + 1) * window_size
- model = SwinIR(
- upscale=2,
- img_size=(height, width),
- window_size=window_size,
- img_range=1.,
- depths=[6, 6, 6, 6],
- embed_dim=60,
- num_heads=[6, 6, 6, 6],
- mlp_ratio=2,
- upsampler='pixelshuffledirect')
- print(model)
- print(height, width, model.flops() / 1e9)
-
- x = torch.randn((1, 3, height, width))
- x = model(x)
- print(x.shape)
-
diff --git a/spaces/chasemcdo/hf_localai/examples/rwkv/scripts/build.sh b/spaces/chasemcdo/hf_localai/examples/rwkv/scripts/build.sh
deleted file mode 100644
index 37720582a92ae108d347fb7e05b0d37b4d2586f1..0000000000000000000000000000000000000000
--- a/spaces/chasemcdo/hf_localai/examples/rwkv/scripts/build.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-set -ex
-
-URL=$1
-OUT=$2
-FILENAME=$(basename $URL)
-
-wget -nc $URL -O /build/$FILENAME
-
-python3 /build/rwkv.cpp/rwkv/convert_pytorch_to_ggml.py /build/$FILENAME /build/float-model float16
-python3 /build/rwkv.cpp/rwkv/quantize.py /build/float-model $OUT Q4_0
diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/docs/demo/ncnn_android_readme.md b/spaces/chendl/compositional_test/multimodal/YOLOX/docs/demo/ncnn_android_readme.md
deleted file mode 100644
index b623071454b4e1b10fa311da5941aa2ab4a406a7..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/multimodal/YOLOX/docs/demo/ncnn_android_readme.md
+++ /dev/null
@@ -1 +0,0 @@
-../../demo/ncnn/android/README.md
\ No newline at end of file
diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/utils/logger.py b/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/utils/logger.py
deleted file mode 100644
index 1045a7b47c579041b3cef5c9a408a210caa5e64f..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/utils/logger.py
+++ /dev/null
@@ -1,440 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) Megvii Inc. All rights reserved.
-
-import inspect
-import os
-import sys
-from collections import defaultdict
-from loguru import logger
-
-import cv2
-import numpy as np
-
-import torch
-
-
-def get_caller_name(depth=0):
- """
- Args:
- depth (int): Depth of caller conext, use 0 for caller depth.
- Default value: 0.
-
- Returns:
- str: module name of the caller
- """
- # the following logic is a little bit faster than inspect.stack() logic
- frame = inspect.currentframe().f_back
- for _ in range(depth):
- frame = frame.f_back
-
- return frame.f_globals["__name__"]
-
-
-class StreamToLoguru:
- """
- stream object that redirects writes to a logger instance.
- """
-
- def __init__(self, level="INFO", caller_names=("apex", "pycocotools")):
- """
- Args:
- level(str): log level string of loguru. Default value: "INFO".
- caller_names(tuple): caller names of redirected module.
- Default value: (apex, pycocotools).
- """
- self.level = level
- self.linebuf = ""
- self.caller_names = caller_names
-
- def write(self, buf):
- full_name = get_caller_name(depth=1)
- module_name = full_name.rsplit(".", maxsplit=-1)[0]
- if module_name in self.caller_names:
- for line in buf.rstrip().splitlines():
- # use caller level log
- logger.opt(depth=2).log(self.level, line.rstrip())
- else:
- sys.__stdout__.write(buf)
-
- def flush(self):
- # flush is related with CPR(cursor position report) in terminal
- return sys.__stdout__.flush()
-
- def isatty(self):
- # when using colab, jax is installed by default and issue like
- # https://github.com/Megvii-BaseDetection/YOLOX/issues/1437 might be raised
- # due to missing attribute like`isatty`.
- # For more details, checked the following link:
- # https://github.com/google/jax/blob/10720258ea7fb5bde997dfa2f3f71135ab7a6733/jax/_src/pretty_printer.py#L54 # noqa
- return sys.__stdout__.isatty()
-
- def fileno(self):
- # To solve the issue when using debug tools like pdb
- return sys.__stdout__.fileno()
-
-
-def redirect_sys_output(log_level="INFO"):
- redirect_logger = StreamToLoguru(log_level)
- sys.stderr = redirect_logger
- sys.stdout = redirect_logger
-
-
-def setup_logger(save_dir, distributed_rank=0, filename="log.txt", mode="a"):
- """setup logger for training and testing.
- Args:
- save_dir(str): location to save log file
- distributed_rank(int): device rank when multi-gpu environment
- filename (string): log save name.
- mode(str): log file write mode, `append` or `override`. default is `a`.
-
- Return:
- logger instance.
- """
- loguru_format = (
- "{time:YYYY-MM-DD HH:mm:ss} | "
- "{level: <8} | "
- "{name}:{line} - {message}"
- )
-
- logger.remove()
- save_file = os.path.join(save_dir, filename)
- if mode == "o" and os.path.exists(save_file):
- os.remove(save_file)
- # only keep logger in rank0 process
- if distributed_rank == 0:
- logger.add(
- sys.stderr,
- format=loguru_format,
- level="INFO",
- enqueue=True,
- )
- logger.add(save_file)
-
- # redirect stdout/stderr to loguru
- redirect_sys_output("INFO")
-
-
-class WandbLogger(object):
- """
- Log training runs, datasets, models, and predictions to Weights & Biases.
- This logger sends information to W&B at wandb.ai.
- By default, this information includes hyperparameters,
- system configuration and metrics, model metrics,
- and basic data metrics and analyses.
-
- For more information, please refer to:
- https://docs.wandb.ai/guides/track
- https://docs.wandb.ai/guides/integrations/other/yolox
- """
- def __init__(self,
- project=None,
- name=None,
- id=None,
- entity=None,
- save_dir=None,
- config=None,
- val_dataset=None,
- num_eval_images=100,
- log_checkpoints=False,
- **kwargs):
- """
- Args:
- project (str): wandb project name.
- name (str): wandb run name.
- id (str): wandb run id.
- entity (str): wandb entity name.
- save_dir (str): save directory.
- config (dict): config dict.
- val_dataset (Dataset): validation dataset.
- num_eval_images (int): number of images from the validation set to log.
- log_checkpoints (bool): log checkpoints
- **kwargs: other kwargs.
-
- Usage:
- Any arguments for wandb.init can be provided on the command line using
- the prefix `wandb-`.
- Example
- ```
- python tools/train.py .... --logger wandb wandb-project \
- wandb-name \
- wandb-id \
- wandb-save_dir \
- wandb-num_eval_imges \
- wandb-log_checkpoints
- ```
- The val_dataset argument is not open to the command line.
- """
- try:
- import wandb
- self.wandb = wandb
- except ModuleNotFoundError:
- raise ModuleNotFoundError(
- "wandb is not installed."
- "Please install wandb using pip install wandb"
- )
-
- from yolox.data.datasets import VOCDetection
-
- self.project = project
- self.name = name
- self.id = id
- self.save_dir = save_dir
- self.config = config
- self.kwargs = kwargs
- self.entity = entity
- self._run = None
- self.val_artifact = None
- if num_eval_images == -1:
- self.num_log_images = len(val_dataset)
- else:
- self.num_log_images = min(num_eval_images, len(val_dataset))
- self.log_checkpoints = (log_checkpoints == "True" or log_checkpoints == "true")
- self._wandb_init = dict(
- project=self.project,
- name=self.name,
- id=self.id,
- entity=self.entity,
- dir=self.save_dir,
- resume="allow"
- )
- self._wandb_init.update(**kwargs)
-
- _ = self.run
-
- if self.config:
- self.run.config.update(self.config)
- self.run.define_metric("train/epoch")
- self.run.define_metric("val/*", step_metric="train/epoch")
- self.run.define_metric("train/step")
- self.run.define_metric("train/*", step_metric="train/step")
-
- self.voc_dataset = VOCDetection
-
- if val_dataset and self.num_log_images != 0:
- self.val_dataset = val_dataset
- self.cats = val_dataset.cats
- self.id_to_class = {
- cls['id']: cls['name'] for cls in self.cats
- }
- self._log_validation_set(val_dataset)
-
- @property
- def run(self):
- if self._run is None:
- if self.wandb.run is not None:
- logger.info(
- "There is a wandb run already in progress "
- "and newly created instances of `WandbLogger` will reuse"
- " this run. If this is not desired, call `wandb.finish()`"
- "before instantiating `WandbLogger`."
- )
- self._run = self.wandb.run
- else:
- self._run = self.wandb.init(**self._wandb_init)
- return self._run
-
- def _log_validation_set(self, val_dataset):
- """
- Log validation set to wandb.
-
- Args:
- val_dataset (Dataset): validation dataset.
- """
- if self.val_artifact is None:
- self.val_artifact = self.wandb.Artifact(name="validation_images", type="dataset")
- self.val_table = self.wandb.Table(columns=["id", "input"])
-
- for i in range(self.num_log_images):
- data_point = val_dataset[i]
- img = data_point[0]
- id = data_point[3]
- img = np.transpose(img, (1, 2, 0))
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
-
- if isinstance(id, torch.Tensor):
- id = id.item()
-
- self.val_table.add_data(
- id,
- self.wandb.Image(img)
- )
-
- self.val_artifact.add(self.val_table, "validation_images_table")
- self.run.use_artifact(self.val_artifact)
- self.val_artifact.wait()
-
- def _convert_prediction_format(self, predictions):
- image_wise_data = defaultdict(int)
-
- for key, val in predictions.items():
- img_id = key
-
- try:
- bboxes, cls, scores = val
- except KeyError:
- bboxes, cls, scores = val["bboxes"], val["categories"], val["scores"]
-
- # These store information of actual bounding boxes i.e. the ones which are not None
- act_box = []
- act_scores = []
- act_cls = []
-
- if bboxes is not None:
- for box, classes, score in zip(bboxes, cls, scores):
- if box is None or score is None or classes is None:
- continue
- act_box.append(box)
- act_scores.append(score)
- act_cls.append(classes)
-
- image_wise_data.update({
- int(img_id): {
- "bboxes": [box.numpy().tolist() for box in act_box],
- "scores": [score.numpy().item() for score in act_scores],
- "categories": [
- self.val_dataset.class_ids[int(act_cls[ind])]
- for ind in range(len(act_box))
- ],
- }
- })
-
- return image_wise_data
-
- def log_metrics(self, metrics, step=None):
- """
- Args:
- metrics (dict): metrics dict.
- step (int): step number.
- """
-
- for k, v in metrics.items():
- if isinstance(v, torch.Tensor):
- metrics[k] = v.item()
-
- if step is not None:
- metrics.update({"train/step": step})
- self.run.log(metrics)
- else:
- self.run.log(metrics)
-
- def log_images(self, predictions):
- if len(predictions) == 0 or self.val_artifact is None or self.num_log_images == 0:
- return
-
- table_ref = self.val_artifact.get("validation_images_table")
-
- columns = ["id", "predicted"]
- for cls in self.cats:
- columns.append(cls["name"])
-
- if isinstance(self.val_dataset, self.voc_dataset):
- predictions = self._convert_prediction_format(predictions)
-
- result_table = self.wandb.Table(columns=columns)
-
- for idx, val in table_ref.iterrows():
-
- avg_scores = defaultdict(int)
- num_occurrences = defaultdict(int)
-
- id = val[0]
- if isinstance(id, list):
- id = id[0]
-
- if id in predictions:
- prediction = predictions[id]
- boxes = []
- for i in range(len(prediction["bboxes"])):
- bbox = prediction["bboxes"][i]
- x0 = bbox[0]
- y0 = bbox[1]
- x1 = bbox[2]
- y1 = bbox[3]
- box = {
- "position": {
- "minX": min(x0, x1),
- "minY": min(y0, y1),
- "maxX": max(x0, x1),
- "maxY": max(y0, y1)
- },
- "class_id": prediction["categories"][i],
- "domain": "pixel"
- }
- avg_scores[
- self.id_to_class[prediction["categories"][i]]
- ] += prediction["scores"][i]
- num_occurrences[self.id_to_class[prediction["categories"][i]]] += 1
- boxes.append(box)
- else:
- boxes = []
- average_class_score = []
- for cls in self.cats:
- if cls["name"] not in num_occurrences:
- score = 0
- else:
- score = avg_scores[cls["name"]] / num_occurrences[cls["name"]]
- average_class_score.append(score)
- result_table.add_data(
- idx,
- self.wandb.Image(val[1], boxes={
- "prediction": {
- "box_data": boxes,
- "class_labels": self.id_to_class
- }
- }
- ),
- *average_class_score
- )
-
- self.wandb.log({"val_results/result_table": result_table})
-
- def save_checkpoint(self, save_dir, model_name, is_best, metadata=None):
- """
- Args:
- save_dir (str): save directory.
- model_name (str): model name.
- is_best (bool): whether the model is the best model.
- metadata (dict): metadata to save corresponding to the checkpoint.
- """
-
- if not self.log_checkpoints:
- return
-
- if "epoch" in metadata:
- epoch = metadata["epoch"]
- else:
- epoch = None
-
- filename = os.path.join(save_dir, model_name + "_ckpt.pth")
- artifact = self.wandb.Artifact(
- name=f"run_{self.run.id}_model",
- type="model",
- metadata=metadata
- )
- artifact.add_file(filename, name="model_ckpt.pth")
-
- aliases = ["latest"]
-
- if is_best:
- aliases.append("best")
-
- if epoch:
- aliases.append(f"epoch-{epoch}")
-
- self.run.log_artifact(artifact, aliases=aliases)
-
- def finish(self):
- self.run.finish()
-
- @classmethod
- def initialize_wandb_logger(cls, args, exp, val_dataset):
- wandb_params = dict()
- prefix = "wandb-"
- for k, v in zip(args.opts[0::2], args.opts[1::2]):
- if k.startswith("wandb-"):
- try:
- wandb_params.update({k[len(prefix):]: int(v)})
- except ValueError:
- wandb_params.update({k[len(prefix):]: v})
-
- return cls(config=vars(exp), val_dataset=val_dataset, **wandb_params)
diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/wav2vec2/finetune_large_lv60_timit_asr.sh b/spaces/chendl/compositional_test/transformers/examples/research_projects/wav2vec2/finetune_large_lv60_timit_asr.sh
deleted file mode 100644
index eb9671d015271e470bd31710ec86a04b3bcff453..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/research_projects/wav2vec2/finetune_large_lv60_timit_asr.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env bash
-python run_asr.py \
---output_dir="./wav2vec2-large-lv60-timit-asr" \
---num_train_epochs="30" \
---per_device_train_batch_size="2" \
---per_device_eval_batch_size="2" \
---gradient_accumulation_steps="4" \
---evaluation_strategy="steps" \
---save_steps="500" \
---eval_steps="100" \
---logging_steps="50" \
---learning_rate="5e-4" \
---warmup_steps="3000" \
---model_name_or_path="facebook/wav2vec2-large-lv60" \
---fp16 \
---dataset_name="timit_asr" \
---train_split_name="train" \
---validation_split_name="test" \
---orthography="timit" \
---preprocessing_num_workers="$(nproc)" \
---group_by_length \
---freeze_feature_extractor \
---verbose_logging \
diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/modeling_tf_utils.py b/spaces/chendl/compositional_test/transformers/src/transformers/modeling_tf_utils.py
deleted file mode 100644
index 756ab50467b4103a4536b05f83b580f096e04e4e..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/src/transformers/modeling_tf_utils.py
+++ /dev/null
@@ -1,3323 +0,0 @@
-# coding=utf-8
-# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
-# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""TF general model utils."""
-
-import functools
-import gc
-import inspect
-import json
-import os
-import pickle
-import re
-import warnings
-from collections.abc import Mapping
-from pathlib import Path
-from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
-
-import h5py
-import numpy as np
-import tensorflow as tf
-from huggingface_hub import Repository, list_repo_files
-from packaging.version import parse
-
-from . import DataCollatorWithPadding, DefaultDataCollator
-from .activations_tf import get_tf_activation
-from .configuration_utils import PretrainedConfig
-from .dynamic_module_utils import custom_object_save
-from .generation import GenerationConfig, TFGenerationMixin
-from .tf_utils import shape_list
-from .utils import (
- DUMMY_INPUTS,
- SAFE_WEIGHTS_INDEX_NAME,
- SAFE_WEIGHTS_NAME,
- TF2_WEIGHTS_INDEX_NAME,
- TF2_WEIGHTS_NAME,
- TF_WEIGHTS_NAME,
- WEIGHTS_INDEX_NAME,
- WEIGHTS_NAME,
- ModelOutput,
- PushToHubMixin,
- cached_file,
- download_url,
- find_labels,
- has_file,
- is_offline_mode,
- is_remote_url,
- is_safetensors_available,
- logging,
- requires_backends,
- working_or_temp_dir,
-)
-from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files
-
-
-if parse(tf.__version__) >= parse("2.11.0"):
- from keras import backend as K
- from keras.engine import data_adapter
- from keras.engine.keras_tensor import KerasTensor
- from keras.saving.legacy import hdf5_format
-else:
- from tensorflow.python.keras import backend as K
- from tensorflow.python.keras.engine import data_adapter
- from tensorflow.python.keras.engine.keras_tensor import KerasTensor
- from tensorflow.python.keras.saving import hdf5_format
-
-
-if is_safetensors_available():
- from safetensors import safe_open
- from safetensors.tensorflow import load_file as safe_load_file
- from safetensors.tensorflow import save_file as safe_save_file
-
-if TYPE_CHECKING:
- from . import PreTrainedTokenizerBase
-
-
-logger = logging.get_logger(__name__)
-tf_logger = tf.get_logger()
-
-TFModelInputType = Union[
- List[tf.Tensor],
- List[np.ndarray],
- List[KerasTensor],
- Dict[str, tf.Tensor],
- Dict[str, np.ndarray],
- Dict[str, KerasTensor],
- tf.Tensor,
- np.ndarray,
- KerasTensor,
-]
-
-
-def dummy_loss(y_true, y_pred):
- if y_pred.shape.rank <= 1:
- return y_pred
- else:
- reduction_axes = list(range(1, y_pred.shape.rank))
- return tf.reduce_mean(y_pred, axis=reduction_axes)
-
-
-class TFModelUtilsMixin:
- """
- A few utilities for `tf.keras.Model`, to be used as a mixin.
- """
-
- def num_parameters(self, only_trainable: bool = False) -> int:
- """
- Get the number of (optionally, trainable) parameters in the model.
-
- Args:
- only_trainable (`bool`, *optional*, defaults to `False`):
- Whether or not to return only the number of trainable parameters
-
- Returns:
- `int`: The number of parameters.
- """
- if only_trainable:
- return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables))
- else:
- return self.count_params()
-
-
-def keras_serializable(cls):
- """
- Decorate a Keras Layer class to support Keras serialization.
-
- This is done by:
-
- 1. Adding a `transformers_config` dict to the Keras config dictionary in `get_config` (called by Keras at
- serialization time.
- 2. Wrapping `__init__` to accept that `transformers_config` dict (passed by Keras at deserialization time) and
- convert it to a config object for the actual layer initializer.
- 3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not
- need to be supplied in `custom_objects` in the call to `tf.keras.models.load_model`.
-
- Args:
- cls (a `tf.keras.layers.Layers subclass`):
- Typically a `TF.MainLayer` class in this project, in general must accept a `config` argument to its
- initializer.
-
- Returns:
- The same class object, with modifications for Keras deserialization.
- """
- initializer = cls.__init__
-
- config_class = getattr(cls, "config_class", None)
- if config_class is None:
- raise AttributeError("Must set `config_class` to use @keras_serializable")
-
- @functools.wraps(initializer)
- def wrapped_init(self, *args, **kwargs):
- config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None)
-
- if isinstance(config, dict):
- config = config_class.from_dict(config)
- initializer(self, config, *args, **kwargs)
- elif isinstance(config, PretrainedConfig):
- if len(args) > 0:
- initializer(self, *args, **kwargs)
- else:
- initializer(self, config, *args, **kwargs)
- else:
- raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)")
-
- self._config = config
- self._kwargs = kwargs
-
- cls.__init__ = wrapped_init
-
- if not hasattr(cls, "get_config"):
- raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses")
- if hasattr(cls.get_config, "_is_default"):
-
- def get_config(self):
- cfg = super(cls, self).get_config()
- cfg["config"] = self._config.to_dict()
- cfg.update(self._kwargs)
- return cfg
-
- cls.get_config = get_config
-
- cls._keras_serializable = True
- if hasattr(tf.keras.utils, "register_keras_serializable"):
- cls = tf.keras.utils.register_keras_serializable()(cls)
- return cls
-
-
-class TFCausalLanguageModelingLoss:
- """
- Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token.
-
-
-
- Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
-
-
- """
-
- def hf_compute_loss(self, labels, logits):
- loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
- from_logits=True, reduction=tf.keras.losses.Reduction.NONE
- )
- if self.config.tf_legacy_loss:
- # make sure only labels that are not equal to -100 affect the loss
- active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
- reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
- labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
- return loss_fn(labels, reduced_logits)
-
- # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
- unmasked_loss = loss_fn(tf.nn.relu(labels), logits)
- # make sure only labels that are not equal to -100 affect the loss
- loss_mask = tf.cast(labels != -100, dtype=unmasked_loss.dtype)
- masked_loss = unmasked_loss * loss_mask
- reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask)
- return tf.reshape(reduced_masked_loss, (1,))
-
-
-class TFQuestionAnsweringLoss:
- """
- Loss function suitable for question answering.
- """
-
- def hf_compute_loss(self, labels, logits):
- loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
- from_logits=True, reduction=tf.keras.losses.Reduction.NONE
- )
- start_loss = loss_fn(labels["start_position"], logits[0])
- end_loss = loss_fn(labels["end_position"], logits[1])
-
- return (start_loss + end_loss) / 2.0
-
-
-class TFTokenClassificationLoss:
- """
- Loss function suitable for token classification.
-
-
-
- Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
-
-
- """
-
- def hf_compute_loss(self, labels, logits):
- loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
- from_logits=True, reduction=tf.keras.losses.Reduction.NONE
- )
- if tf.executing_eagerly(): # Data-dependent conditionals are forbidden in XLA
- if tf.math.reduce_any(labels == -1):
- tf.print("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.")
-
- if self.config.tf_legacy_loss:
- # make sure only labels that are not equal to -100
- # are taken into account as loss
- if tf.math.reduce_any(labels == -1):
- tf.print("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.")
- active_loss = tf.reshape(labels, (-1,)) != -1
- else:
- active_loss = tf.reshape(labels, (-1,)) != -100
- reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
- labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
-
- return loss_fn(labels, reduced_logits)
-
- # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
- unmasked_loss = loss_fn(tf.nn.relu(labels), logits)
- # make sure only labels that are not equal to -100 or -1
- # are taken into account as loss
- loss_mask = tf.cast(labels >= 0, dtype=unmasked_loss.dtype)
- # Avoid possible division by zero later
- # Masked positions will have a loss of NaN because -100 and -1 are not valid labels
- masked_loss = unmasked_loss * loss_mask
- reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask)
- return tf.reshape(reduced_masked_loss, (1,))
-
-
-class TFSequenceClassificationLoss:
- """
- Loss function suitable for sequence classification.
- """
-
- def hf_compute_loss(self, labels, logits):
- if logits.shape.rank == 1 or logits.shape[1] == 1:
- loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
- if labels.shape.rank == 1:
- # MeanSquaredError returns a scalar loss if the labels are 1D, so avoid that
- labels = tf.expand_dims(labels, axis=-1)
- else:
- loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
- from_logits=True, reduction=tf.keras.losses.Reduction.NONE
- )
-
- return loss_fn(labels, logits)
-
-
-class TFMultipleChoiceLoss:
- """Loss function suitable for multiple choice tasks."""
-
- def hf_compute_loss(self, labels, logits):
- loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
- from_logits=True, reduction=tf.keras.losses.Reduction.NONE
- )
- return loss_fn(labels, logits)
-
-
-class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss):
- """
- Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens.
-
-
-
- Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
-
-
- """
-
-
-class TFNextSentencePredictionLoss:
- """
- Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence.
-
-
-
- Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
-
-
- """
-
- def hf_compute_loss(self, labels, logits):
- loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
- from_logits=True, reduction=tf.keras.losses.Reduction.NONE
- )
- if self.config.tf_legacy_loss:
- # make sure only labels that are not equal to -100
- # are taken into account as loss
- next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
- next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss)
- next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss)
-
- return loss_fn(next_sentence_label, next_sentence_reduced_logits)
-
- # make sure only labels that are not equal to -100
- # are taken into account as loss
-
- # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
- unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels), y_pred=logits)
- ns_loss_mask = tf.cast(labels != -100, dtype=unmasked_ns_loss.dtype)
- # Just zero out samples where label is -100, no reduction
- masked_ns_loss = unmasked_ns_loss * ns_loss_mask
-
- return masked_ns_loss
-
-
-def booleans_processing(config, **kwargs):
- """
- Process the input booleans of each model.
-
- Args:
- config ([`PretrainedConfig`]):
- The config of the running model.
- **kwargs:
- The boolean parameters
-
- Returns:
- A dictionary with the proper values for each boolean
- """
- final_booleans = {}
-
- # Pure conv models (such as ConvNext) do not have `output_attentions`. If the signature has
- # `output_attentions`, it will be present here in `kwargs`, even if unset (in that case, as `None`)
- if "output_attentions" in kwargs:
- final_booleans["output_attentions"] = (
- kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
- )
- final_booleans["output_hidden_states"] = (
- kwargs["output_hidden_states"] if kwargs["output_hidden_states"] is not None else config.output_hidden_states
- )
- final_booleans["return_dict"] = kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
-
- if "use_cache" in kwargs:
- final_booleans["use_cache"] = (
- kwargs["use_cache"] if kwargs["use_cache"] is not None else getattr(config, "use_cache", None)
- )
- return final_booleans
-
-
-def unpack_inputs(func):
- """
- Decorator that processes the inputs to a Keras layer, passing them to the layer as keyword arguments. This enables
- downstream use of the inputs by their variable name, even if they arrive packed as a dictionary in the first input
- (common case in Keras).
-
- Args:
- func (`callable`):
- The callable function of the TensorFlow model.
-
-
- Returns:
- A callable that wraps the original `func` with the behavior described above.
- """
-
- original_signature = inspect.signature(func)
-
- @functools.wraps(func)
- def run_call_with_unpacked_inputs(self, *args, **kwargs):
- # isolates the actual `**kwargs` for the decorated function
- kwargs_call = {key: val for key, val in kwargs.items() if key not in dict(original_signature.parameters)}
- fn_args_and_kwargs = {key: val for key, val in kwargs.items() if key not in kwargs_call}
- fn_args_and_kwargs.update({"kwargs_call": kwargs_call})
-
- # move any arg into kwargs, if they exist
- fn_args_and_kwargs.update(dict(zip(func.__code__.co_varnames[1:], args)))
-
- # Encoder Decoder models delegate the application of the configuration options to their inner models.
- if "EncoderDecoder" in self.__class__.__name__:
- config = None
- else:
- config = self.config
-
- unpacked_inputs = input_processing(func, config, **fn_args_and_kwargs)
- return func(self, **unpacked_inputs)
-
- # Keras enforces the first layer argument to be passed, and checks it through `inspect.getfullargspec()`. This
- # function does not follow wrapper chains (i.e. ignores `functools.wraps()`), meaning that without the line below
- # Keras would attempt to check the first argument against the literal signature of the wrapper.
- run_call_with_unpacked_inputs.__signature__ = original_signature
-
- return run_call_with_unpacked_inputs
-
-
-def input_processing(func, config, **kwargs):
- """
- Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input
- has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32',
- name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training.
-
- Args:
- func (`callable`):
- The callable function of the TensorFlow model.
- config ([`PretrainedConfig`]):
- The config of the running model.
- **kwargs:
- The inputs of the model.
-
- Returns:
- Two lists, one for the missing layers, and another one for the unexpected layers.
- """
- signature = dict(inspect.signature(func).parameters)
- has_kwargs = bool(signature.pop("kwargs", None))
- signature.pop("self", None)
- parameter_names = list(signature.keys())
- main_input_name = parameter_names[0]
- main_input = kwargs.pop(main_input_name, None)
- output = {}
- allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray, KerasTensor)
-
- if "inputs" in kwargs["kwargs_call"]:
- warnings.warn(
- "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
- FutureWarning,
- )
-
- output["input_ids"] = kwargs["kwargs_call"].pop("inputs")
-
- if "decoder_cached_states" in kwargs["kwargs_call"]:
- warnings.warn(
- "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use"
- " `past_key_values` instead.",
- FutureWarning,
- )
- output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states")
-
- if "past" in kwargs["kwargs_call"] and "past_key_values" in parameter_names:
- warnings.warn(
- "The `past` argument is deprecated and will be removed in a future version, use `past_key_values`"
- " instead.",
- FutureWarning,
- )
- kwargs["past_key_values"] = kwargs["kwargs_call"].pop("past")
- elif "past_key_values" in kwargs["kwargs_call"] and "past" in parameter_names:
- kwargs["past"] = kwargs["kwargs_call"].pop("past_key_values")
-
- if has_kwargs:
- output["kwargs"] = kwargs.pop("kwargs_call", {})
- else:
- if len(kwargs["kwargs_call"]) > 0:
- raise ValueError(
- "The following keyword arguments are not supported by this model:"
- f" {list(kwargs['kwargs_call'].keys())}."
- )
- kwargs.pop("kwargs_call")
-
- for k, v in kwargs.items():
- if isinstance(v, allowed_types) or v is None:
- output[k] = v
- else:
- raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
-
- if isinstance(main_input, (tuple, list)):
- for i, input in enumerate(main_input):
- # EagerTensors don't allow to use the .name property so we check for a real Tensor
- if type(input) == tf.Tensor:
- # Tensor names have always the pattern `name:id` then we check only the
- # `name` part
- tensor_name = input.name.split(":")[0]
-
- if tensor_name in parameter_names:
- output[tensor_name] = input
- else:
- output[parameter_names[i]] = input
- elif isinstance(input, allowed_types) or input is None:
- output[parameter_names[i]] = input
- else:
- raise ValueError(
- f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for"
- f" {parameter_names[i]}."
- )
- elif isinstance(main_input, Mapping):
- if "inputs" in main_input:
- warnings.warn(
- "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids`"
- " instead.",
- FutureWarning,
- )
-
- output["input_ids"] = main_input.pop("inputs")
-
- if "decoder_cached_states" in main_input:
- warnings.warn(
- "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use"
- " `past_key_values` instead.",
- FutureWarning,
- )
- output["past_key_values"] = main_input.pop("decoder_cached_states")
-
- for k, v in dict(main_input).items():
- if isinstance(v, allowed_types) or v is None:
- output[k] = v
- elif k not in parameter_names and "args" not in parameter_names:
- logger.warning(
- f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
- )
- continue
- else:
- raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
- else:
- if isinstance(main_input, (tf.Tensor, KerasTensor)) or main_input is None:
- output[main_input_name] = main_input
- else:
- raise ValueError(
- f"Data of type {type(main_input)} is not allowed only {allowed_types} is accepted for"
- f" {main_input_name}."
- )
-
- # Populates any unspecified argument with their default value, according to the signature.
- for name in parameter_names:
- if name not in list(output.keys()) and name != "args":
- output[name] = kwargs.pop(name, signature[name].default)
-
- # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
- # So to respect the proper output we have to add this exception
- if "args" in output:
- if output["args"] is not None and type(output["args"]) == tf.Tensor:
- tensor_name = output["args"].name.split(":")[0]
- output[tensor_name] = output["args"]
- else:
- # `args` in this case is always the first parameter, then `input_ids`
- output["input_ids"] = output["args"]
-
- del output["args"]
-
- if "kwargs" in output:
- del output["kwargs"]
-
- cast_output = {}
- for key, val in output.items():
- if isinstance(val, tf.Tensor) and val.dtype == tf.int64:
- cast_output[key] = tf.cast(val, tf.int32)
- elif isinstance(val, np.ndarray) and val.dtype == np.int64:
- cast_output[key] = val.astype(np.int32)
- else:
- cast_output[key] = val
-
- output = cast_output
- del cast_output
-
- if config is not None:
- boolean_dict = {
- k: v
- for k, v in output.items()
- if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
- }
-
- output.update(
- booleans_processing(
- config=config,
- **boolean_dict,
- )
- )
-
- return output
-
-
-def dtype_byte_size(dtype):
- """
- Returns the size (in bytes) occupied by one parameter of type `dtype`.
-
- Example:
-
- ```py
- >>> dtype_byte_size(tf.float32)
- 4
- ```
- """
- if dtype == tf.bool:
- return 1 / 8
- bit_search = re.search(r"[^\d](\d+)$", dtype.name)
- if bit_search is None:
- raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
- bit_size = int(bit_search.groups()[0])
- return bit_size // 8
-
-
-def format_weight_name(name, _prefix=None):
- if "model." not in name and len(name.split("/")) > 1:
- name = "/".join(name.split("/")[1:])
- if _prefix is not None:
- name = _prefix + "/" + name
- return name
-
-
-def tf_shard_checkpoint(weights, max_shard_size="10GB"):
- """
- Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
- given size.
-
- The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no
- optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the
- limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],
- [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].
-
-
-
- If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will
- have a size greater than `max_shard_size`.
-
-
-
- Args:
- weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save.
- max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
- The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
- (like `"5MB"`).
- """
- max_shard_size = convert_file_size_to_int(max_shard_size)
-
- sharded_state_dicts = []
- current_block = []
- current_block_size = 0
- total_size = 0
-
- for item in weights:
- weight_size = item.numpy().size * dtype_byte_size(item.dtype)
-
- # If this weight is going to tip up over the maximal size, we split.
- if current_block_size + weight_size > max_shard_size:
- sharded_state_dicts.append(current_block)
- current_block = []
- current_block_size = 0
-
- current_block.append(item)
- current_block_size += weight_size
- total_size += weight_size
-
- # Add the last block
- sharded_state_dicts.append(current_block)
-
- # If we only have one shard, we return it
- if len(sharded_state_dicts) == 1:
- return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None
-
- # Otherwise, let's build the index
- weight_map = {}
- shards = {}
- for idx, shard in enumerate(sharded_state_dicts):
- shard_file = TF2_WEIGHTS_NAME.replace(".h5", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.h5")
- shards[shard_file] = shard
- for weight in shard:
- weight_name = weight.name
- weight_map[weight_name] = shard_file
-
- # Add the metadata
- metadata = {"total_size": total_size}
- index = {"metadata": metadata, "weight_map": weight_map}
- return shards, index
-
-
-def load_tf_sharded_weights(model, shard_files, ignore_mismatched_sizes=False, strict=False, _prefix=None):
- """
- This is the same as `load_tf_weights` but for a sharded checkpoint. Detect missing and unexpected layers and load
- the TF weights from the shard file accordingly to their names and shapes.
-
- This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being
- loaded in the model.
-
- Args:
- model (`tf.keras.models.Model`): The model in which to load the checkpoint.
- shard_files (`str` or `os.PathLike`): A list containing the sharded checkpoint names.
- ignore_mismatched_sizes`bool`, *optional`, defaults to `True`):
- Whether or not to ignore the mismatch between the sizes
- strict (`bool`, *optional*, defaults to `True`):
- Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint.
-
- Returns:
- Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the
- mismatched layers.
- """
-
- # Load the index
- unexpected_keys = set()
- saved_keys = set()
- mismatched_keys = set()
-
- # Since TF adds the name of the class to its weights, and uses the index and not the name of the layer to load
- # the weight, we have to get rid of the first prefix of the name of the layer.
- model_keys = set()
- model_layer_map = {}
- for i, k in enumerate(model.weights):
- layer_name = k.name
- if _prefix is not None and layer_name.startswith(_prefix):
- layer_name = layer_name[len(_prefix) :]
- layer_name = layer_name.lstrip("/")
- if not ("model." in layer_name or len(layer_name.split("/")) == 1):
- layer_name = "/".join(layer_name.split("/")[1:])
- model_keys.add(layer_name)
- model_layer_map[layer_name] = i
-
- for shard_file in shard_files:
- saved_weight_names_set, unexpected_keys_set, mismatched_keys_set = load_tf_shard(
- model,
- model_layer_map,
- shard_file,
- ignore_mismatched_sizes=ignore_mismatched_sizes,
- _prefix=_prefix,
- )
- saved_keys.update(saved_weight_names_set)
- unexpected_keys.update(unexpected_keys_set)
- mismatched_keys.update(mismatched_keys_set)
- gc.collect()
-
- missing_keys = model_keys - saved_keys
- if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0):
- error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}"
- if len(missing_keys) > 0:
- str_missing_keys = ",".join([f'"{k}"' for k in missing_keys])
- error_message += f"\nMissing key(s): {str_missing_keys}."
- if len(unexpected_keys) > 0:
- str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys])
- error_message += f"\nMissing key(s): {str_unexpected_keys}."
- raise RuntimeError(error_message)
-
- return missing_keys, unexpected_keys, mismatched_keys
-
-
-def load_tf_shard(model, model_layer_map, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
- """
- Loads a shard from a sharded checkpoint file. Handles the missing keys and unexpected keys.
-
- Args:
- model (`tf.keras.models.Model`): Model in which the weights are loaded
- model_layer_map (`Dict`): A dictionary mapping the layer name to the index of the layer in the model.
- resolved_archive_file (`str`): Path to the checkpoint file from which the weights will be loaded
- ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether to ignore the mismatched keys
-
- Returns:
- `tf.keras.models.Model`: Three lists, one for the layers that were found and succesfully restored (from the
- shard file), one for the mismatched layers, and another one for the unexpected layers.
- """
- saved_weight_names_set = set()
- saved_weights = {}
- mismatched_keys = set()
- unexpected_keys = set()
- # Read the H5 file
- try:
- with h5py.File(resolved_archive_file, "r") as sharded_checkpoint_file:
- # Retrieve the name of each layer from the H5 file
- saved_h5_model_layers_name = set(
- hdf5_format.load_attributes_from_hdf5_group(sharded_checkpoint_file, "layer_names")
- )
- weight_value_tuples = []
-
- # Compute missing and unexpected sub layers
- # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
- for layer_name in saved_h5_model_layers_name:
- h5_layer_object = sharded_checkpoint_file[layer_name]
- saved_weights[layer_name] = np.asarray(h5_layer_object)
-
- saved_weight_names_set.add(layer_name)
-
- if layer_name not in model_layer_map:
- unexpected_keys.add(layer_name)
- else:
- symbolic_weight = model.weights[model_layer_map[layer_name]]
-
- saved_weight_value = saved_weights[layer_name]
- # If the current weight is found
- if saved_weight_value is not None:
- # Check if the shape of the current weight and the one from the H5 file are different
- if K.int_shape(symbolic_weight) != saved_weight_value.shape:
- # If yes we reshape the weight from the H5 file accordingly to the current weight
- # If the two shapes are not compatible we raise an issue
- try:
- array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
- except ValueError as e:
- if ignore_mismatched_sizes:
- mismatched_keys.add(
- (layer_name, saved_weight_value.shape, K.int_shape(symbolic_weight))
- )
- continue
- else:
- raise e
- else:
- array = saved_weight_value
-
- # We create the tuple that will be loaded and add it to the final list
- weight_value_tuples.append((symbolic_weight, array))
-
- K.batch_set_value(weight_value_tuples)
-
- return saved_weight_names_set, unexpected_keys, mismatched_keys
-
- except Exception as e:
- try:
- with open(resolved_archive_file) as f:
- if f.read().startswith("version"):
- raise OSError(
- "You seem to have cloned a repository without having git-lfs installed. Please install "
- "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
- "you cloned."
- )
- else:
- raise ValueError(
- f"Unable to locate the file {resolved_archive_file} which is necessary to load this pretrained"
- " model. Make sure you have saved the model properly."
- ) from e
- except (UnicodeDecodeError, ValueError):
- raise OSError(
- f"Unable to load weights from TF checkpoint file for '{resolved_archive_file}' "
- f"at '{resolved_archive_file}'. "
- "If you tried to load a TF model from a sharded checkpoint, you should try converting the model"
- "by loading it in pytorch and saving it localy. A convertion script should be realeased soon."
- )
-
-
-def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
- """
- Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and
- shapes.
-
- Args:
- model (`tf.keras.models.Model`):
- The model to load the weights into.
- resolved_archive_file (`str`):
- The location of the H5 file.
- ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
- Whether or not to ignore weights with shapes that don't match between the checkpoint of the model.
-
- Returns:
- Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the
- mismatched layers.
- """
- if resolved_archive_file.endswith(".safetensors"):
- load_function = load_tf_weights_from_safetensors
- else:
- load_function = load_tf_weights_from_h5
-
- return load_function(
- model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix
- )
-
-
-def load_tf_weights_from_h5(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
- mismatched_layers = []
-
- # Read the H5 file
- with h5py.File(resolved_archive_file, "r") as sharded_checkpoint_file:
- # Retrieve the name of each layer from the H5 file
- saved_h5_model_layers_name = set(
- hdf5_format.load_attributes_from_hdf5_group(sharded_checkpoint_file, "layer_names")
- )
-
- # Find the missing layers from the high level list of layers
- missing_layers = list({layer.name for layer in model.layers} - saved_h5_model_layers_name)
-
- # Find the unexpected layers from the high level list of layers
- unexpected_layers = list(saved_h5_model_layers_name - {layer.name for layer in model.layers})
- saved_weight_names_set = set()
- symbolic_weights_names = set()
- weight_value_tuples = []
-
- # Compute missing and unexpected sub layers
- # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
- for layer in model.layers:
- # if layer_name from the H5 file belongs to the layers from the instantiated model
- if layer.name in saved_h5_model_layers_name:
- # Get the H5 layer object from its name
- h5_layer_object = sharded_checkpoint_file[layer.name]
- # Get all the weights as a list from the layer object
- symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
- saved_weights = {}
-
- # Create a dict from the H5 saved model that looks like {"weight_name": weight_value}
- # And a set with only the names
- for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"):
- # TF names always start with the model name so we ignore it
- name = "/".join(weight_name.split("/")[1:])
-
- if _prefix is not None:
- name = _prefix + "/" + name
-
- saved_weights[name] = np.asarray(h5_layer_object[weight_name])
-
- # Add the updated name to the final list for computing missing/unexpected values
- saved_weight_names_set.add(name)
-
- # Loop over each weights from the instantiated model and compare with the weights from the H5 file
- for symbolic_weight in symbolic_weights:
- # TF names always start with the model name so we ignore it
- if _prefix is not None:
- delimeter = len(_prefix.split("/"))
- symbolic_weight_name = "/".join(
- symbolic_weight.name.split("/")[:delimeter]
- + symbolic_weight.name.split("/")[delimeter + 1 :]
- )
- else:
- symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:])
-
- # here we check if the current weight is among the weights from the H5 file
- # If yes, get the weight_value of the corresponding weight from the H5 file
- # If not, make the value to None
- saved_weight_value = saved_weights.get(symbolic_weight_name, None)
-
- # Retrocompatibility patch: some embeddings are stored with the weights name (e.g. Bart's
- # `model.shared/embeddings:0` are stored as `model.shared/weights:0`)
- if saved_weight_value is None and symbolic_weight_name.endswith("embeddings:0"):
- symbolic_weight_name = symbolic_weight_name[:-12] + "weight:0"
- saved_weight_value = saved_weights.get(symbolic_weight_name, None)
-
- # Add the updated name to the final list for computing missing/unexpected values
- symbolic_weights_names.add(symbolic_weight_name)
-
- # If the current weight is found
- if saved_weight_value is not None:
- # Check if the shape of the current weight and the one from the H5 file are different
- if K.int_shape(symbolic_weight) != saved_weight_value.shape:
- # If yes we reshape the weight from the H5 file accordingly to the current weight
- # If the two shapes are not compatible we raise an issue
- try:
- array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
- except ValueError as e:
- if ignore_mismatched_sizes:
- mismatched_layers.append(
- (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight))
- )
- continue
- else:
- raise e
- else:
- array = saved_weight_value
-
- # We create the tuple that will be loaded and add it to the final list
- weight_value_tuples.append((symbolic_weight, array))
-
- # Load all the weights
- K.batch_set_value(weight_value_tuples)
-
- # Compute the missing and unexpected layers
- missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set))
- unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names))
-
- return missing_layers, unexpected_layers, mismatched_layers
-
-
-def load_tf_weights_from_safetensors(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
- # Read the safetensors file
- state_dict = safe_load_file(resolved_archive_file)
-
- weight_value_tuples = []
- mismatched_layers = []
-
- weight_names = [format_weight_name(w.name, _prefix=_prefix) for w in model.weights]
- loaded_weight_names = list(state_dict.keys())
-
- # Find the missing layers from the high level list of layers
- missing_layers = list(set(weight_names) - set(loaded_weight_names))
- # Find the unexpected layers from the high level list of layers
- unexpected_layers = list(set(loaded_weight_names) - set(weight_names))
-
- weight_value_tuples = []
- for weight in model.weights:
- weight_name = format_weight_name(weight.name, _prefix=_prefix)
- if weight_name in state_dict:
- weight_value = state_dict[weight_name]
- # Check if the shape of the current weight and the one from the H5 file are different
- if K.int_shape(weight) != weight_value.shape:
- # If yes we reshape the weight from the H5 file accordingly to the current weight
- # If the two shapes are not compatible we raise an issue
- try:
- weight_value = tf.reshape(weight_value, K.int_shape(weight))
- except ValueError as e:
- if ignore_mismatched_sizes:
- mismatched_layers.append((weight_name, weight_value.shape, K.int_shape(weight)))
- continue
- else:
- raise e
-
- weight_value_tuples.append((weight, weight_value))
-
- # Load all the weights
- K.batch_set_value(weight_value_tuples)
-
- return missing_layers, unexpected_layers, mismatched_layers
-
-
-def init_copy_embeddings(old_embeddings, new_num_tokens):
- r"""
- This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case
- new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be
- kept or not. Example:
-
- - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4]
-
- - mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1]
- - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5]
-
- - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4]
- """
- old_num_tokens, old_embedding_dim = shape_list(old_embeddings)
- size_diff = new_num_tokens - old_num_tokens
-
- # initialize new embeddings
- # Copy token embeddings from the previous ones
- if tf.math.greater(size_diff, 0):
- # if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size
- # and we create a mask to properly identify the padded values and be replaced by the values of the newly created
- # embeddings
- current_weights = tf.pad(
- old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1
- )
- num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
- mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True)
- mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False)
- else:
- # if the new size if lower than the old one, we take the current embeddings until the new size
- current_weights = tf.slice(
- old_embeddings.value(),
- tf.convert_to_tensor([0, 0]),
- tf.convert_to_tensor([new_num_tokens, old_embedding_dim]),
- )
- mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True)
-
- return mask, current_weights
-
-
-class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin):
- r"""
- Base class for all TF models.
-
- [`TFPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading,
- downloading and saving models as well as a few methods common to all models to:
-
- - resize the input embeddings,
- - prune heads in the self-attention heads.
-
- Class attributes (overridden by derived classes):
-
- - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class
- for this model architecture.
- - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived
- classes of the same architecture adding modules on top of the base model.
- - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP
- models, `pixel_values` for vision models and `input_values` for speech models).
- """
- config_class = None
- base_model_prefix = ""
- main_input_name = "input_ids"
- _auto_class = None
- _using_dummy_loss = None
- _label_to_output_map = None
-
- # a list of re pattern of tensor names to ignore from the model when loading the model weights
- # (and avoid unnecessary warnings).
- _keys_to_ignore_on_load_missing = None
- # a list of re pattern of tensor names to ignore from the weights when loading the model weights
- # (and avoid unnecessary warnings).
- _keys_to_ignore_on_load_unexpected = None
- _requires_load_weight_prefix = False
-
- @property
- def dummy_inputs(self) -> Dict[str, tf.Tensor]:
- """
- Dummy inputs to build the network.
-
- Returns:
- `Dict[str, tf.Tensor]`: The dummy inputs.
- """
- return {
- "input_ids": tf.constant(DUMMY_INPUTS, dtype=tf.int32),
- }
-
- @property
- def framework(self) -> str:
- """
- :str: Identifies that this is a TensorFlow model.
- """
- return "tf"
-
- def __init__(self, config, *inputs, **kwargs):
- super().__init__(*inputs, **kwargs)
- if not isinstance(config, PretrainedConfig):
- raise ValueError(
- f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
- "`PretrainedConfig`. To create a model from a pretrained model use "
- f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
- )
- # Save config and origin of the pretrained weights if given in model
- self.config = config
- self.name_or_path = config.name_or_path
- self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None
- # Set the serving spec quickly to ensure that Keras doesn't use the specific dummy input shapes as the spec
- self._set_save_spec(self.serving.input_signature[0])
-
- def get_config(self):
- return self.config.to_dict()
-
- @classmethod
- def from_config(cls, config, **kwargs):
- if isinstance(config, PretrainedConfig):
- return cls._from_config(config, **kwargs)
- return cls._from_config(cls.config_class.from_dict(config, **kwargs))
-
- @classmethod
- def _from_config(cls, config, **kwargs):
- """
- All context managers that the model should be initialized under go here.
- """
- return cls(config, **kwargs)
-
- def get_head_mask(self, head_mask: Optional[tf.Tensor], num_hidden_layers: int) -> tf.Tensor:
- """
- Prepare the head mask if needed.
-
- Args:
- head_mask (`tf.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*):
- The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
- num_hidden_layers (`int`):
- The number of hidden layers in the model.
-
- Returns:
- `tf.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with
- `[None]` for each layer.
- """
- if head_mask is not None:
- head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
- else:
- head_mask = [None] * num_hidden_layers
-
- return head_mask
-
- def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
- """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
- if head_mask.shape.rank == 1:
- head_mask = head_mask[None, None, :, None, None]
- head_mask = tf.repeat(head_mask, repeats=num_hidden_layers, axis=0)
- elif head_mask.shape.rank == 2:
- head_mask = head_mask[:, None, :, None, None]
- assert head_mask.shape.rank == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
- head_mask = tf.cast(head_mask, tf.float32) # switch to float if need + fp16 compatibility
- return head_mask
-
- def eager_serving(self, inputs):
- """
- Method used for serving the model. Intended not to be compiled with a tf.function decorator so that we can use
- it to generate multiple signatures later.
-
- Args:
- inputs (`Dict[str, tf.Tensor]`):
- The input of the saved model as a dictionary of tensors.
- """
- output = self.call(inputs)
-
- return self.serving_output(output)
-
- @tf.function(
- input_signature=[
- {
- "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
- "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
- "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
- }
- ]
- )
- def serving(self, inputs):
- """
- Method used for serving the model.
-
- Args:
- inputs (`Dict[str, tf.Tensor]`):
- The input of the saved model as a dictionary of tensors.
- """
- output = self.call(inputs)
-
- return self.serving_output(output)
-
- def serving_output(self, output):
- """
- Prepare the output of the saved model. Each model must implement this function.
-
- Args:
- output ([`TFBaseModelOutput`]):
- The output returned by the model.
- """
- raise NotImplementedError
-
- def can_generate(self) -> bool:
- """
- Returns whether this model can generate sequences with `.generate()`.
-
- Returns:
- `bool`: Whether this model can generate sequences with `.generate()`.
- """
- # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation
- if "GenerationMixin" in str(self.prepare_inputs_for_generation):
- return False
- return True
-
- def get_input_embeddings(self) -> tf.keras.layers.Layer:
- """
- Returns the model's input embeddings layer.
-
- Returns:
- `tf.Variable`: The embeddings layer mapping vocabulary to hidden states.
- """
- main_layer = getattr(self, self.base_model_prefix, self)
-
- if main_layer is not self:
- return main_layer.get_input_embeddings()
- else:
- raise NotImplementedError
-
- def _save_checkpoint(self, checkpoint_dir, epoch):
- if not os.path.isdir(checkpoint_dir):
- os.mkdir(checkpoint_dir)
- # We avoid tf.train.checkpoint or saving weights in TF format, even though that includes optimizer
- # state for us, because it requires special handling for objects like custom losses, which we use
- # internally and which users are likely to use too
- weights_path = os.path.join(checkpoint_dir, "weights.h5")
- self.save_weights(weights_path)
- extra_data = {"epoch": epoch, "optimizer_state": self.optimizer.get_weights()}
- extra_data_path = os.path.join(checkpoint_dir, "extra_data.pickle")
- with open(extra_data_path, "wb") as f:
- pickle.dump(extra_data, f)
-
- def load_repo_checkpoint(self, repo_path_or_name):
- """
- Loads a saved checkpoint (model weights and optimizer state) from a repo. Returns the current epoch count when
- the checkpoint was made.
-
- Args:
- repo_path_or_name (`str`):
- Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case
- the repository will have the name of that local folder).
-
- Returns:
- `dict`: A dictionary of extra metadata from the checkpoint, most commonly an "epoch" count.
- """
- if getattr(self, "optimizer", None) is None:
- raise RuntimeError(
- "Checkpoint loading failed as no optimizer is attached to the model. "
- "This is most likely caused by the model not being compiled."
- )
- if not os.path.isdir(repo_path_or_name):
- # If this isn't a local path, check that the remote repo exists and has a checkpoint in it
- repo_files = list_repo_files(repo_path_or_name)
- for file in ("checkpoint/weights.h5", "checkpoint/extra_data.pickle"):
- if file not in repo_files:
- raise FileNotFoundError(f"Repo {repo_path_or_name} does not contain checkpoint file {file}!")
- if "/" not in repo_path_or_name:
- model_id = repo_path_or_name
- repo_path_or_name = self.get_full_repo_name(repo_path_or_name)
- else:
- model_id = repo_path_or_name.split("/")[-1]
- repo = Repository(model_id, clone_from=f"https://huggingface.co/{repo_path_or_name}")
- local_dir = repo.local_dir
- else:
- local_dir = repo_path_or_name
-
- # Now make sure the repo actually has a checkpoint in it.
- checkpoint_dir = os.path.join(local_dir, "checkpoint")
- weights_file = os.path.join(checkpoint_dir, "weights.h5")
- if not os.path.isfile(weights_file):
- raise FileNotFoundError(f"Could not find checkpoint file weights.h5 in repo {repo_path_or_name}!")
- extra_data_file = os.path.join(checkpoint_dir, "extra_data.pickle")
- if not os.path.isfile(extra_data_file):
- raise FileNotFoundError(f"Could not find checkpoint file extra_data.pickle in repo {repo_path_or_name}!")
-
- # Assuming the repo is real and we got a checkpoint, load the weights and the optimizer state into the model.
- # The optimizer state includes the iteration count, so learning rate schedules should resume as normal too.
- self.load_weights(weights_file)
- with open(extra_data_file, "rb") as f:
- extra_data = pickle.load(f)
- self.optimizer.set_weights(extra_data["optimizer_state"])
-
- # Finally, return the epoch number from the checkpoint. This isn't a property of the model, so we can't
- # set it directly, but the user can pass it to fit().
- return {"epoch": extra_data["epoch"]}
-
- def prepare_tf_dataset(
- self,
- dataset: "datasets.Dataset", # noqa:F821
- batch_size: int = 8,
- shuffle: bool = True,
- tokenizer: Optional["PreTrainedTokenizerBase"] = None,
- collate_fn: Optional[Callable] = None,
- collate_fn_args: Optional[Dict[str, Any]] = None,
- drop_remainder: Optional[bool] = None,
- prefetch: bool = True,
- ):
- """
- Wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset` with collation and batching. This method is
- designed to create a "ready-to-use" dataset that can be passed directly to Keras methods like `fit()` without
- further modification. The method will drop columns from the dataset if they don't match input names for the
- model. If you want to specify the column names to return rather than using the names that match this model, we
- recommend using `Dataset.to_tf_dataset()` instead.
-
- Args:
- dataset (`Any`):
- A [~`datasets.Dataset`] to be wrapped as a `tf.data.Dataset`.
- batch_size (`int`, defaults to 8):
- The size of batches to return.
- shuffle (`bool`, defaults to `True`):
- Whether to return samples from the dataset in random order. Usually `True` for training datasets and
- `False` for validation/test datasets.
- tokenizer ([`PreTrainedTokenizerBase`], *optional*):
- A `PreTrainedTokenizer` that will be used to pad samples to create batches. Has no effect if a specific
- `collate_fn` is passed instead.
- collate_fn (`Callable`, *optional*):
- A function that collates samples from the dataset into a single batch. Defaults to
- `DefaultDataCollator` if no `tokenizer` is supplied or `DataCollatorWithPadding` if a `tokenizer` is
- passed.
- collate_fn_args (`Dict[str, Any]`, *optional*):
- A dict of arguments to pass to the `collate_fn` alongside the list of samples.
- drop_remainder (`bool`, *optional*):
- Whether to drop the final batch, if the batch_size does not evenly divide the dataset length. Defaults
- to the same setting as `shuffle`.
- prefetch (`bool`, defaults to `True`):
- Whether to add prefetching to the end of the `tf.data` pipeline. This is almost always beneficial for
- performance, but can be disabled in edge cases.
-
-
- Returns:
- `Dataset`: A `tf.data.Dataset` which is ready to pass to the Keras API.
- """
- requires_backends(self, ["datasets"])
- import datasets
-
- if collate_fn is None:
- if tokenizer is None:
- collate_fn = DefaultDataCollator(return_tensors="np")
- else:
- collate_fn = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="np")
- if collate_fn_args is None:
- collate_fn_args = {}
-
- if not isinstance(dataset, datasets.Dataset):
- raise TypeError("Dataset argument should be a datasets.Dataset!")
- model_inputs = list(dict(inspect.signature(self.call).parameters).keys())
- model_labels = find_labels(self.__class__)
- if "cols_to_retain" in list(inspect.signature(dataset._get_output_signature).parameters.keys()):
- output_signature, _ = dataset._get_output_signature(
- dataset,
- batch_size=None,
- collate_fn=collate_fn,
- collate_fn_args=collate_fn_args,
- cols_to_retain=model_inputs,
- )
- else:
- # TODO Matt: This is a workaround for older versions of datasets that are missing the `cols_to_retain`
- # argument. We should remove this once the minimum supported version of datasets is > 2.3.2
- unwanted_columns = [
- feature
- for feature in dataset.features
- if feature not in model_inputs and feature not in ("label_ids", "label")
- ]
- dataset = dataset.remove_columns(unwanted_columns)
- output_signature, _ = dataset._get_output_signature(
- dataset, batch_size=None, collate_fn=collate_fn, collate_fn_args=collate_fn_args
- )
- output_columns = list(output_signature.keys())
- feature_cols = [col for col in output_columns if col in model_inputs and col not in model_labels]
- label_cols = [col for col in output_columns if col in model_labels]
-
- if drop_remainder is None:
- drop_remainder = shuffle
- tf_dataset = dataset.to_tf_dataset(
- columns=feature_cols,
- label_cols=label_cols,
- batch_size=batch_size,
- shuffle=shuffle,
- drop_remainder=drop_remainder,
- collate_fn=collate_fn,
- collate_fn_args=collate_fn_args,
- prefetch=prefetch,
- )
- return tf_dataset
-
- def compile(
- self,
- optimizer="rmsprop",
- loss="passthrough",
- metrics=None,
- loss_weights=None,
- weighted_metrics=None,
- run_eagerly=None,
- steps_per_execution=None,
- **kwargs,
- ):
- """
- This is a thin wrapper that sets the model's loss output head as the loss if the user does not specify a loss
- function themselves.
- """
- if loss == "passthrough":
- logger.warning(
- "No loss specified in compile() - the model's internal loss computation will be used as the "
- "loss. Don't panic - this is a common way to train TensorFlow models in Transformers! "
- "To disable this behaviour please pass a loss argument, or explicitly pass "
- "`loss=None` if you do not want your model to compute a loss."
- )
- loss = dummy_loss
- self._using_dummy_loss = True
- else:
- self._using_dummy_loss = False
- parent_args = list(inspect.signature(tf.keras.Model.compile).parameters.keys())
- # This argument got renamed, we need to support both versions
- if "steps_per_execution" in parent_args:
- super().compile(
- optimizer=optimizer,
- loss=loss,
- metrics=metrics,
- loss_weights=loss_weights,
- weighted_metrics=weighted_metrics,
- run_eagerly=run_eagerly,
- steps_per_execution=steps_per_execution,
- **kwargs,
- )
- else:
- super().compile(
- optimizer=optimizer,
- loss=loss,
- metrics=metrics,
- loss_weights=loss_weights,
- weighted_metrics=weighted_metrics,
- run_eagerly=run_eagerly,
- experimental_steps_per_execution=steps_per_execution,
- **kwargs,
- )
-
- def compute_loss(self, *args, **kwargs):
- if hasattr(tf.keras.Model, "compute_loss"):
- # This will be true in TF 2.8 or greater
- return super().compute_loss(*args, **kwargs)
- else:
- warnings.warn(
- "The old compute_loss method is deprecated as it conflicts with the Keras compute_loss "
- "method added in TF 2.8. If you want the original HF compute_loss, please call "
- "hf_compute_loss() instead. From TF versions >= 2.8, or Transformers versions >= 5, "
- "calling compute_loss() will get the Keras method instead.",
- FutureWarning,
- )
- return self.hf_compute_loss(*args, **kwargs)
-
- def get_label_to_output_name_mapping(self):
- arg_names = list(dict(inspect.signature(self.call).parameters).keys())
- if self._label_to_output_map is not None:
- return self._label_to_output_map
- elif "start_positions" in arg_names:
- return {"start_positions": "start_logits", "end_positions": "end_logits"}
- elif "sentence_order_label" in arg_names:
- return {"labels": "prediction_logits", "sentence_order_label": "sop_logits"}
- elif "next_sentence_label" in arg_names:
- return {"labels": "prediction_logits", "next_sentence_label": "seq_relationship_logits"}
- elif "mc_labels" in arg_names:
- return {"labels": "logits", "mc_labels": "mc_logits"}
- else:
- return {}
-
- def train_step(self, data):
- """
- A modification of Keras's default `train_step` that correctly handles matching outputs to labels for our models
- and supports directly training on the loss output head. In addition, it ensures input keys are copied to the
- labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure
- that they are available to the model during the forward pass.
- """
-
- # We hardcode the most common renamings; models with weirder names can set `self._label_to_output_map`
- arg_names = list(dict(inspect.signature(self.call).parameters).keys())
- label_kwargs = find_labels(self.__class__)
- label_to_output = self.get_label_to_output_name_mapping()
- output_to_label = {val: key for key, val in label_to_output.items()}
- if not self._using_dummy_loss and parse(tf.__version__) < parse("2.11.0"):
- # Newer TF train steps leave this out
- data = data_adapter.expand_1d(data)
- x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
- # If the inputs are mutable dictionaries, make a shallow copy of them because we will modify
- # them during input/label pre-processing. This avoids surprising the user by wrecking their data.
- # In addition, modifying mutable Python inputs makes XLA compilation impossible.
- if isinstance(x, dict):
- x = x.copy()
- if isinstance(y, dict):
- y = y.copy()
-
- # When using a dummy loss, we ensure that separate labels are copied to the correct model arguments,
- # if those keys are not already present in the input dict
- if self._using_dummy_loss and y is not None:
- # If y is a tensor and the model only has one label-like input, map y to that input
- if len(label_kwargs) == 1 and isinstance(y, tf.Tensor):
- if isinstance(x, tf.Tensor):
- x = {arg_names[0]: x}
- label_kwarg = next(iter(label_kwargs))
- if label_kwarg not in x:
- x[label_kwarg] = y
- # Otherwise, copy keys from y to x as long as they weren't already present in x
- elif isinstance(y, dict):
- if isinstance(x, tf.Tensor):
- x = {arg_names[0]: x}
- for key, val in y.items():
- if key in arg_names and key not in x:
- x[key] = val
- elif output_to_label.get(key, None) in arg_names and key not in x:
- x[output_to_label[key]] = val
- if y is None:
- y = {key: val for key, val in x.items() if key in label_kwargs}
- if not y and not self._using_dummy_loss:
- raise ValueError("Could not find label column(s) in input dict and no separate labels were provided!")
-
- if isinstance(y, dict):
- # Rename labels at this point to match output heads
- y = {label_to_output.get(key, key): val for key, val in y.items()}
-
- # Run forward pass.
- with tf.GradientTape() as tape:
- if self._using_dummy_loss and "return_loss" in arg_names:
- y_pred = self(x, training=True, return_loss=True)
- else:
- y_pred = self(x, training=True)
- if self._using_dummy_loss:
- loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses)
- else:
- loss = None
-
- # This next block matches outputs to label keys. Tensorflow's standard method for doing this
- # can get very confused if any of the keys contain nested values (e.g. lists/tuples of Tensors)
- if isinstance(y, dict) and len(y) == 1:
- if list(y.keys())[0] in y_pred.keys():
- y_pred = y_pred[list(y.keys())[0]]
- elif list(y_pred.keys())[0] == "loss":
- y_pred = y_pred[1]
- else:
- y_pred = y_pred[0]
- _, y = y.popitem()
- elif isinstance(y, dict):
- # If the labels are a dict, match keys from the output by name
- y_pred = {key: val for key, val in y_pred.items() if key in y}
- elif isinstance(y, tuple) or isinstance(y, list):
- # If the labels are a tuple/list, match keys to the output by order, skipping the loss.
- if list(y_pred.keys())[0] == "loss":
- y_pred = y_pred.to_tuple()[1:]
- else:
- y_pred = y_pred.to_tuple()
- y_pred = y_pred[: len(y)] # Remove unused fields in case those cause problems
- else:
- # If the labels are a single tensor, match them to the first non-loss tensor in the output
- if list(y_pred.keys())[0] == "loss":
- y_pred = y_pred[1]
- else:
- y_pred = y_pred[0]
-
- if loss is None:
- loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)
-
- # Run backwards pass.
- self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
-
- self.compiled_metrics.update_state(y, y_pred, sample_weight)
- # Collect metrics to return
- return_metrics = {}
- for metric in self.metrics:
- result = metric.result()
- if isinstance(result, dict):
- return_metrics.update(result)
- else:
- return_metrics[metric.name] = result
- return return_metrics
-
- def test_step(self, data):
- """
- A modification of Keras's default `train_step` that correctly handles matching outputs to labels for our models
- and supports directly training on the loss output head. In addition, it ensures input keys are copied to the
- labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure
- that they are available to the model during the forward pass.
- """
- # We hardcode the most common renamings; models with weirder names can set `self._label_to_output_map`
- arg_names = list(dict(inspect.signature(self.call).parameters).keys())
- label_kwargs = find_labels(self.__class__)
- label_to_output = self.get_label_to_output_name_mapping()
- output_to_label = {val: key for key, val in label_to_output.items()}
- if not self._using_dummy_loss and parse(tf.__version__) < parse("2.11.0"):
- # Newer versions leave this out
- data = data_adapter.expand_1d(data)
- x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
- # If the inputs are mutable dictionaries, make a shallow copy of them because we will modify
- # them during input/label pre-processing. This avoids surprising the user by wrecking their data.
- # In addition, modifying mutable Python inputs makes XLA compilation impossible.
- if isinstance(x, dict):
- x = x.copy()
- if isinstance(y, dict):
- y = y.copy()
-
- # When using a dummy loss, we ensure that separate labels are copied to the correct model arguments,
- # if those keys are not already present in the input dict
- if self._using_dummy_loss and y is not None:
- arg_names = list(dict(inspect.signature(self.call).parameters).keys())
- # If y is a tensor and the model only has one label-like input, map y to that input
- if len(label_kwargs) == 1 and isinstance(y, tf.Tensor):
- if isinstance(x, tf.Tensor):
- x = {arg_names[0]: x}
- label_kwarg = next(iter(label_kwargs))
- if label_kwarg not in x:
- x[label_kwarg] = y
- # Otherwise, copy keys from y to x as long as they weren't already present in x
- elif isinstance(y, dict):
- if isinstance(x, tf.Tensor):
- x = {arg_names[0]: x}
- for key, val in y.items():
- if key in arg_names and key not in x:
- x[key] = val
- elif output_to_label.get(key, None) in arg_names and key not in x:
- x[output_to_label[key]] = val
- if y is None:
- y = {key: val for key, val in x.items() if key in label_kwargs}
- if not y and not self._using_dummy_loss:
- raise ValueError("Could not find label column(s) in input dict and no separate labels were provided!")
-
- if isinstance(y, dict):
- # Rename labels at this point to match output heads
- y = {label_to_output.get(key, key): val for key, val in y.items()}
-
- # Run forward pass.
- if self._using_dummy_loss and "return_loss" in arg_names:
- y_pred = self(x, return_loss=True, training=False)
- else:
- y_pred = self(x, training=False)
- if self._using_dummy_loss:
- loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses)
- else:
- loss = None
-
- # This next block matches outputs to label keys. Tensorflow's standard method for doing this
- # can get very confused if any of the keys contain nested values (e.g. lists/tuples of Tensors)
- if isinstance(y, dict) and len(y) == 1:
- if list(y.keys())[0] in y_pred.keys():
- y_pred = y_pred[list(y.keys())[0]]
- elif list(y_pred.keys())[0] == "loss":
- y_pred = y_pred[1]
- else:
- y_pred = y_pred[0]
- _, y = y.popitem()
- elif isinstance(y, dict):
- # If the labels are a dict, match keys from the output by name
- y_pred = {key: val for key, val in y_pred.items() if key in y}
- elif isinstance(y, tuple) or isinstance(y, list):
- # If the labels are a tuple/list, match keys to the output by order, skipping the loss.
- if list(y_pred.keys())[0] == "loss":
- y_pred = y_pred.to_tuple()[1:]
- else:
- y_pred = y_pred.to_tuple()
- y_pred = y_pred[: len(y)] # Remove unused fields in case those cause problems
- else:
- # If the labels are a single tensor, match them to the first non-loss tensor in the output
- if list(y_pred.keys())[0] == "loss":
- y_pred = y_pred[1]
- else:
- y_pred = y_pred[0]
-
- if loss is None:
- loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)
-
- self.compiled_metrics.update_state(y, y_pred, sample_weight)
- # Collect metrics to return
- return_metrics = {}
- for metric in self.metrics:
- result = metric.result()
- if isinstance(result, dict):
- return_metrics.update(result)
- else:
- return_metrics[metric.name] = result
- return return_metrics
-
- def create_model_card(
- self,
- output_dir,
- model_name: str,
- language: Optional[str] = None,
- license: Optional[str] = None,
- tags: Optional[str] = None,
- finetuned_from: Optional[str] = None,
- tasks: Optional[str] = None,
- dataset_tags: Optional[Union[str, List[str]]] = None,
- dataset: Optional[Union[str, List[str]]] = None,
- dataset_args: Optional[Union[str, List[str]]] = None,
- ):
- """
- Creates a draft of a model card using the information available to the `Trainer`.
-
- Args:
- output_dir (`str` or `os.PathLike`):
- The folder in which to create the model card.
- model_name (`str`, *optional*):
- The name of the model.
- language (`str`, *optional*):
- The language of the model (if applicable)
- license (`str`, *optional*):
- The license of the model. Will default to the license of the pretrained model used, if the original
- model given to the `Trainer` comes from a repo on the Hub.
- tags (`str` or `List[str]`, *optional*):
- Some tags to be included in the metadata of the model card.
- finetuned_from (`str`, *optional*):
- The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo
- of the original model given to the `Trainer` (if it comes from the Hub).
- tasks (`str` or `List[str]`, *optional*):
- One or several task identifiers, to be included in the metadata of the model card.
- dataset_tags (`str` or `List[str]`, *optional*):
- One or several dataset tags, to be included in the metadata of the model card.
- dataset (`str` or `List[str]`, *optional*):
- One or several dataset identifiers, to be included in the metadata of the model card.
- dataset_args (`str` or `List[str]`, *optional*):
- One or several dataset arguments, to be included in the metadata of the model card.
- """
- # Avoids a circular import by doing this when necessary.
- from .modelcard import TrainingSummary # tests_ignore
-
- training_summary = TrainingSummary.from_keras(
- self,
- keras_history=self.history,
- language=language,
- license=license,
- tags=tags,
- model_name=model_name,
- finetuned_from=finetuned_from,
- tasks=tasks,
- dataset_tags=dataset_tags,
- dataset=dataset,
- dataset_args=dataset_args,
- )
- model_card = training_summary.to_model_card()
- with open(os.path.join(output_dir, "README.md"), "w") as f:
- f.write(model_card)
-
- def set_input_embeddings(self, value):
- """
- Set model's input embeddings
-
- Args:
- value (`tf.Variable`):
- The new weights mapping hidden states to vocabulary.
- """
- main_layer = getattr(self, self.base_model_prefix)
-
- if main_layer is None:
- raise NotImplementedError("The model does not implements the base_model_prefix attribute.")
-
- try:
- main_layer.set_input_embeddings(value)
- except AttributeError:
- logger.info("Building the model")
- self(self.dummy_inputs)
- main_layer.set_input_embeddings(value)
-
- def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]:
- """
- Returns the model's output embeddings
-
- Returns:
- `tf.Variable`: The new weights mapping vocabulary to hidden states.
- """
- if self.get_lm_head() is not None:
- lm_head = self.get_lm_head()
-
- try:
- return lm_head.get_output_embeddings()
- except AttributeError:
- logger.info("Building the model")
- self(self.dummy_inputs)
-
- return lm_head().get_output_embeddings()
-
- return None # Overwrite for models with output embeddings
-
- def set_output_embeddings(self, value):
- """
- Set model's output embeddings
-
- Args:
- value (`tf.Variable`):
- The new weights mapping hidden states to vocabulary.
- """
- if self.get_lm_head() is not None:
- lm_head = self.get_lm_head()
- try:
- lm_head.set_output_embeddings(value)
- except AttributeError:
- logger.info("Building the model")
- self(self.dummy_inputs)
- lm_head.set_output_embeddings(value)
-
- def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]:
- """
- Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the
- embeddings
-
- Return:
- `tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model.
- """
- warnings.warn(
- "The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning
- )
- return self.get_lm_head()
-
- def get_prefix_bias_name(self) -> Union[None, str]:
- """
- Get the concatenated _prefix name of the bias from the model name to the parent layer
-
- Return:
- `str`: The _prefix name of the bias.
- """
- warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
- return None
-
- def get_bias(self) -> Union[None, Dict[str, tf.Variable]]:
- """
- Dict of bias attached to an LM head. The key represents the name of the bias attribute.
-
- Return:
- `tf.Variable`: The weights representing the bias, None if not an LM model.
- """
- if self.get_lm_head() is not None:
- lm_head = self.get_lm_head()
- try:
- return lm_head.get_bias()
- except AttributeError:
- self(self.dummy_inputs)
-
- return lm_head.get_bias()
- return None
-
- def set_bias(self, value):
- """
- Set all the bias in the LM head.
-
- Args:
- value (`Dict[tf.Variable]`):
- All the new bias attached to an LM head.
- """
- if self.get_lm_head() is not None:
- lm_head = self.get_lm_head()
- try:
- lm_head.set_bias(value)
- except AttributeError:
- self(self.dummy_inputs)
- lm_head.set_bias(value)
-
- def get_lm_head(self) -> tf.keras.layers.Layer:
- """
- The LM Head layer. This method must be overwritten by all the models that have a lm head.
-
- Return:
- `tf.keras.layers.Layer`: The LM head layer if the model has one, None if not.
- """
- return None
-
- def resize_token_embeddings(
- self, new_num_tokens: Optional[int] = None
- ) -> Union[tf.keras.layers.Embedding, tf.Variable]:
- """
- Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`.
-
- Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
-
- Arguments:
- new_num_tokens (`int`, *optional*):
- The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
- vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just
- returns a pointer to the input tokens without doing anything.
-
- Return:
- `tf.Variable` or `tf.keras.layers.Embedding`: Pointer to the input tokens of the model.
- """
- # TODO (joao): flagged for replacement (by `_v2_resized_token_embeddings`) due to embeddings refactor
-
- # Run the new code path if the model has a keras embeddings layer
- if isinstance(self.get_input_embeddings(), tf.keras.layers.Embedding):
- return self._v2_resized_token_embeddings(new_num_tokens)
-
- if new_num_tokens is None or new_num_tokens == self.config.vocab_size:
- return self._get_word_embedding_weight(self.get_input_embeddings())
-
- model_embeds = self._resize_token_embeddings(new_num_tokens)
-
- # Update base model and current model config
- self.config.vocab_size = new_num_tokens
-
- return model_embeds
-
- def _v2_resized_token_embeddings(self, new_num_tokens: Optional[int] = None) -> tf.keras.layers.Embedding:
- """
- Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`.
-
- Arguments:
- new_num_tokens (`int`, *optional*):
- The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
- vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just
- returns a pointer to the input tokens without doing anything.
-
- Return:
- `tf.keras.layers.Embedding`: Pointer to the input tokens of the model.
- """
- if new_num_tokens is None or new_num_tokens == self.config.vocab_size:
- return self.get_input_embeddings()
-
- model_embeds = self._v2_resize_token_embeddings(new_num_tokens)
-
- # Update base model and current model config
- self.config.vocab_size = new_num_tokens
-
- return model_embeds
-
- def _get_word_embedding_weight(model, embedding_layer):
- # TODO (joao): flagged for delection due to embeddings refactor
-
- # If the variable holds the weights themselves, return them
- if isinstance(embedding_layer, tf.Tensor):
- return embedding_layer
- # Otherwise, try to get them from the layer's attributes
-
- embeds = getattr(embedding_layer, "weight", None)
- if embeds is not None:
- return embeds
-
- embeds = getattr(embedding_layer, "decoder", None)
- if embeds is not None:
- return embeds
-
- # The reason why the attributes don't exist might be
- # because the model is not built, so retry getting
- # the argument after building the model
- model(model.dummy_inputs)
-
- embeds = getattr(embedding_layer, "weight", None)
- if embeds is not None:
- return embeds
-
- embeds = getattr(embedding_layer, "decoder", None)
- if embeds is not None:
- return embeds
-
- return None
-
- def _resize_token_embeddings(self, new_num_tokens):
- # TODO (joao): flagged for replacement (by `_v2_resize_token_embeddings`) due to embeddings refactor
- old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings())
- new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
-
- # if word embeddings are not tied, make sure that lm head bias is resized as well
- if self.get_bias() is not None:
- old_lm_head_bias = self.get_bias()
- new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens)
-
- self.set_bias(new_lm_head_bias)
-
- # if word embeddings are not tied, make sure that lm head decoder is resized as well
- if self.get_output_embeddings() is not None:
- old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings())
- new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens)
-
- self.set_output_embeddings(new_lm_head_decoder)
-
- self.set_input_embeddings(new_embeddings)
-
- return self.get_input_embeddings()
-
- def _v2_resize_token_embeddings(self, new_num_tokens):
- old_embeddings = self.get_input_embeddings()
- new_embeddings = self._v2_get_resized_embeddings(old_embeddings, new_num_tokens)
- self.set_input_embeddings(new_embeddings)
-
- # If word embeddings are not tied, make sure that lm head bias is resized as well
- if self.get_bias() is not None:
- old_lm_head_bias = self.get_bias()
- new_lm_head_bias = self._v2_get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens)
- self.set_bias(new_lm_head_bias)
-
- # If word embeddings are not tied, make sure that lm head decoder is resized as well.
- tied_weights = self.get_input_embeddings() == self.get_output_embeddings()
- if self.get_output_embeddings() is not None and not tied_weights:
- old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings())
- # TODO (joao): this one probably needs a v2 version with other models
- new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens)
- self.set_output_embeddings(new_lm_head_decoder)
-
- return self.get_input_embeddings()
-
- def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens):
- """
- Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end.
- Reducing the size will remove vectors from the end
-
- Args:
- old_lm_head_bias (`tf.Variable`):
- Old lm head bias to be resized.
- new_num_tokens (`int`, *optional*):
- New number of tokens in the linear matrix.
-
- Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
- vectors from the end. If not provided or `None`, just returns None
-
- Return:
- `tf.Variable`: Pointer to the resized bias.
- """
- # TODO (joao): flagged for replacement (by `_v2_get_resized_lm_head_bias`) due to embeddings refactor
- new_lm_head_bias = {}
-
- for attr, weight in old_lm_head_bias.items():
- first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)
- size_diff = new_num_tokens - old_num_tokens
- final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens]
-
- # initialize new bias
- if tf.math.greater(size_diff, 0):
- padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]
- current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1)
- num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
- mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy]
- bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True)
- bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False)
- else:
- slice_from = [0] if first_dim is None else [0, 0]
- current_bias = tf.slice(
- weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape)
- )
- bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True)
-
- new_bias = self.add_weight(
- shape=final_shape,
- initializer="zeros",
- trainable=True,
- name=weight.name.split(":")[0],
- )
- init_bias = tf.where(bias_mask, current_bias, new_bias.value())
-
- new_bias.assign(init_bias)
- new_lm_head_bias[attr] = new_bias
-
- return new_lm_head_bias
-
- def _v2_get_resized_lm_head_bias(
- self, old_lm_head_bias: Dict[str, tf.Variable], new_num_tokens: int
- ) -> Dict[str, tf.Tensor]:
- """
- Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end.
- Reducing the size will remove vectors from the end
-
- Args:
- old_lm_head_bias (`Dict[str, tf.Variable]`):
- Old lm head bias to be resized.
- new_num_tokens (`int`):
- New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at
- the end. Reducing the size will remove vectors from the end.
-
- Return:
- `tf.Tensor`: Values for the resized bias.
- """
- new_lm_head_bias = {}
-
- for attr, weight in old_lm_head_bias.items():
- # Determine the size difference (depending on the shape)
- first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)
- size_diff = new_num_tokens - old_num_tokens
-
- # Copy the old bias values to the new bias
- if old_num_tokens > new_num_tokens:
- new_bias = weight.value()[..., :new_num_tokens]
- else:
- padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]
- new_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape))
-
- new_lm_head_bias[attr] = new_bias
- return new_lm_head_bias
-
- def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens):
- """
- Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end.
- Reducing the size will remove vectors from the end
-
- Args:
- old_lm_head_decoder (`tf.Variable`):
- Old lm head decoder to be resized.
- new_num_tokens (`int`, *optional*):
- New number of tokens in the linear matrix.
-
- Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
- vectors from the end. If not provided or `None`, just returns None
-
- Return:
- `tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the input
- ones.
- """
- new_lm_head_decoder = old_lm_head_decoder
- is_input_output_equals = tf.reduce_any(
- self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder
- )
-
- if old_lm_head_decoder is not None and not is_input_output_equals:
- old_embedding_dim = shape_list(old_lm_head_decoder)[1]
- decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens)
- new_lm_head_decoder = self.add_weight(
- shape=(new_num_tokens, old_embedding_dim),
- initializer="zeros",
- trainable=True,
- name=old_lm_head_decoder.name.split(":")[0],
- )
- init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value())
-
- new_lm_head_decoder.assign(init_decoder)
-
- return new_lm_head_decoder
-
- def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:
- """
- Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly
- initialized vectors at the end. Reducing the size will remove vectors from the end
-
- Args:
- old_embeddings (`tf.Variable`):
- Old embeddings to be resized.
- new_num_tokens (`int`, *optional*):
- New number of tokens in the embedding matrix.
-
- Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
- vectors from the end. If not provided or `None`, just returns a pointer to the input tokens
- `tf.Variable` module of the model without doing anything.
-
- Return:
- `tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is
- `None`
- """
- # TODO (joao): flagged for replacement (by `_v2_get_resized_embeddings`) due to embeddings refactor
- old_embedding_dim = shape_list(old_embeddings)[1]
- init_range = getattr(self.config, "initializer_range", 0.02)
- embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens)
- new_embeddings = self.add_weight(
- name=old_embeddings.name.split(":")[0],
- shape=[new_num_tokens, old_embedding_dim],
- initializer=get_initializer(init_range),
- dtype=tf.float32,
- )
- init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value())
-
- new_embeddings.assign(init_embeddings)
-
- return new_embeddings
-
- def _v2_get_resized_embeddings(
- self, old_embeddings: tf.keras.layers.Embedding, new_num_tokens: int
- ) -> tf.keras.layers.Embedding:
- """
- Build a resized Embedding layer from a provided Embedding layer. Increasing the size will add newly initialized
- vectors at the end. Reducing the size will remove vectors from the end.
-
- Args:
- old_embeddings (`tf.keras.layers.Embedding`):
- Old embeddings to be resized.
- new_num_tokens (`int`, *optional*):
- New number of tokens in the embedding matrix.
-
- Return:
- `tf.keras.layers.Embedding`: Resized Embedding layer.
- """
-
- # Get the initialization range for the embeddings
- init_range = 0.02 # default value
- potential_initialization_variable_names = [
- "initializer_range", # most common
- "initializer_factor", # e.g. T5
- "init_std", # e.g BART
- ]
- for var_name in potential_initialization_variable_names:
- if hasattr(self.config, var_name):
- init_range = getattr(self.config, var_name)
-
- # Get a new (initialized) embeddings layer
- new_embeddings = tf.keras.layers.Embedding(
- input_dim=new_num_tokens,
- output_dim=old_embeddings.output_dim,
- embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=init_range),
- name=old_embeddings.embeddings.name[:-13], # exact same scoped name except "/embeddings:0"
- )
- new_embeddings(tf.constant([[0]]))
-
- # Copy the old embeddings to the new embeddings
- if old_embeddings.input_dim >= new_num_tokens:
- init_embeddings = old_embeddings.embeddings[:new_num_tokens]
- else:
- init_embeddings = tf.concat(
- [old_embeddings.embeddings, new_embeddings.embeddings[old_embeddings.input_dim :]], axis=0
- )
- new_embeddings.embeddings.assign(init_embeddings)
- return new_embeddings
-
- def prune_heads(self, heads_to_prune):
- """
- Prunes heads of the base model.
-
- Arguments:
- heads_to_prune (`Dict[int, List[int]]`):
- Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads
- to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on
- layer 1 and heads 2 and 3 on layer 2.
- """
- raise NotImplementedError
-
- def save_pretrained(
- self,
- save_directory,
- saved_model=False,
- version=1,
- push_to_hub=False,
- signatures=None,
- max_shard_size: Union[int, str] = "10GB",
- create_pr: bool = False,
- safe_serialization: bool = False,
- **kwargs,
- ):
- """
- Save a model and its configuration file to a directory, so that it can be re-loaded using the
- [`~TFPreTrainedModel.from_pretrained`] class method.
-
- Arguments:
- save_directory (`str`):
- Directory to which to save. Will be created if it doesn't exist.
- saved_model (`bool`, *optional*, defaults to `False`):
- If the model has to be saved in saved model format as well or not.
- version (`int`, *optional*, defaults to 1):
- The version of the saved model. A saved model needs to be versioned in order to be properly loaded by
- TensorFlow Serving as detailed in the official documentation
- https://www.tensorflow.org/tfx/serving/serving_basic
- push_to_hub (`bool`, *optional*, defaults to `False`):
- Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
- repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
- namespace).
- signatures (`dict` or `tf.function`, *optional*):
- Model's signature used for serving. This will be passed to the `signatures` argument of model.save().
- max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
- The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
- lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).
-
-
-
- If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
- which will be bigger than `max_shard_size`.
-
-
-
- create_pr (`bool`, *optional*, defaults to `False`):
- Whether or not to create a PR with the uploaded files or directly commit.
- safe_serialization (`bool`, *optional*, defaults to `False`):
- Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
-
- kwargs:
- Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
- """
- if os.path.isfile(save_directory):
- logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
- return
-
- os.makedirs(save_directory, exist_ok=True)
-
- if push_to_hub:
- commit_message = kwargs.pop("commit_message", None)
- repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
- repo_id = self._create_repo(repo_id, **kwargs)
- files_timestamps = self._get_files_timestamps(save_directory)
-
- if saved_model:
- if signatures is None:
- if any(spec.dtype == tf.int32 for spec in self.serving.input_signature[0].values()):
- int64_spec = {
- key: tf.TensorSpec(
- shape=spec.shape, dtype=tf.int64 if spec.dtype == tf.int32 else spec.dtype, name=spec.name
- )
- for key, spec in self.serving.input_signature[0].items()
- }
- int64_serving = tf.function(self.eager_serving, input_signature=[int64_spec])
- signatures = {"serving_default": self.serving, "int64_serving": int64_serving}
- else:
- signatures = self.serving
- saved_model_dir = os.path.join(save_directory, "saved_model", str(version))
- self.save(saved_model_dir, include_optimizer=False, signatures=signatures)
- logger.info(f"Saved model created in {saved_model_dir}")
-
- # Save configuration file
- self.config.architectures = [self.__class__.__name__[2:]]
-
- # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
- # loaded from the Hub.
- if self._auto_class is not None:
- custom_object_save(self, save_directory, config=self.config)
-
- self.config.save_pretrained(save_directory)
- if self.can_generate():
- self.generation_config.save_pretrained(save_directory)
-
- # If we save using the predefined names, we can load using `from_pretrained`
- weights_name = SAFE_WEIGHTS_NAME if safe_serialization else TF2_WEIGHTS_NAME
- output_model_file = os.path.join(save_directory, weights_name)
-
- shards, index = tf_shard_checkpoint(self.weights, max_shard_size)
-
- # Clean the folder from a previous save
- for filename in os.listdir(save_directory):
- full_filename = os.path.join(save_directory, filename)
- # If we have a shard file that is not going to be replaced, we delete it, but only from the main process
- # in distributed settings to avoid race conditions.
- weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "")
- if (
- filename.startswith(weights_no_suffix)
- and os.path.isfile(full_filename)
- and filename not in shards.keys()
- ):
- os.remove(full_filename)
-
- if index is None:
- if safe_serialization:
- state_dict = {format_weight_name(w.name): w.value() for w in self.weights}
- safe_save_file(state_dict, output_model_file, metadata={"format": "tf"})
- else:
- self.save_weights(output_model_file)
- logger.info(f"Model weights saved in {output_model_file}")
- else:
- save_index_file = os.path.join(save_directory, TF2_WEIGHTS_INDEX_NAME)
- # Save the index as well
- with open(save_index_file, "w", encoding="utf-8") as index_file:
- content = json.dumps(index, indent=2, sort_keys=True) + "\n"
- index_file.write(content)
- logger.info(
- f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be "
- f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
- f"index located at {save_index_file}."
- )
- for shard_file, shard in shards.items():
- with h5py.File(os.path.join(save_directory, shard_file), mode="w") as shard_file:
- layers = []
- for layer in sorted(shard, key=lambda x: x.name):
- if "model." in layer.name or len(layer.name.split("/")) == 1:
- layer_name = layer.name
- else:
- layer_name = "/".join(layer.name.split("/")[1:])
- param_dset = shard_file.create_dataset(
- layer_name, layer.numpy().shape, dtype=layer.numpy().dtype
- )
- param_dset[:] = layer.numpy()
- layers.append(layer_name.encode("utf8"))
- hdf5_format.save_attributes_to_hdf5_group(shard_file, "layer_names", layers)
-
- if push_to_hub:
- self._upload_modified_files(
- save_directory,
- repo_id,
- files_timestamps,
- commit_message=commit_message,
- token=kwargs.get("use_auth_token"),
- )
-
- @classmethod
- def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
- r"""
- Instantiate a pretrained TF 2.0 model from a pre-trained model configuration.
-
- The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
- pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
- task.
-
- The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
- weights are discarded.
-
- Parameters:
- pretrained_model_name_or_path (`str`, *optional*):
- Can be either:
-
- - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
- user or organization name, like `dbmdz/bert-base-german-cased`.
- - A path to a *directory* containing model weights saved using
- [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
- case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
- argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
- using the provided conversion scripts and loading the TensorFlow model afterwards.
- - `None` if you are both providing the configuration and state dictionary (resp. with keyword
- arguments `config` and `state_dict`).
- model_args (sequence of positional arguments, *optional*):
- All remaining positional arguments will be passed to the underlying model's `__init__` method.
- config (`Union[PretrainedConfig, str]`, *optional*):
- Can be either:
-
- - an instance of a class derived from [`PretrainedConfig`],
- - a string valid as input to [`~PretrainedConfig.from_pretrained`].
-
- Configuration for the model to use instead of an automatically loaded configuration. Configuration can
- be automatically loaded when:
-
- - The model is a model provided by the library (loaded with the *model id* string of a pretrained
- model).
- - The model was saved using [`~TFPreTrainedModel.save_pretrained`] and is reloaded by supplying the
- save directory.
- - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
- configuration JSON file named *config.json* is found in the directory.
- from_pt (`bool`, *optional*, defaults to `False`):
- Load the model weights from a PyTorch state_dict save file (see docstring of
- `pretrained_model_name_or_path` argument).
- ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
- Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
- as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
- checkpoint with 3 labels).
- cache_dir (`str`, *optional*):
- Path to a directory in which a downloaded pretrained model configuration should be cached if the
- standard cache should not be used.
- force_download (`bool`, *optional*, defaults to `False`):
- Whether or not to force the (re-)download of the model weights and configuration files, overriding the
- cached versions if they exist.
- resume_download (`bool`, *optional*, defaults to `False`):
- Whether or not to delete incompletely received files. Will attempt to resume the download if such a
- file exists.
- proxies:
- (`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g.,
- `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
- output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a
- dictionary containing missing keys, unexpected keys and error messages.
- local_files_only(`bool`, *optional*, defaults to `False`):
- Whether or not to only look at local files (e.g., not try doanloading the model).
- use_auth_token (`str` or `bool`, *optional*):
- The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
- the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
- revision (`str`, *optional*, defaults to `"main"`):
- The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
- git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
- identifier allowed by git.
-
-
-
-
- To test a pull request you made on the Hub, you can pass `revision="refs/pr/".
-
-
-
- mirror (`str`, *optional*):
- Mirror source to accelerate downloads in China. If you are from China and have an accessibility
- problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
- Please refer to the mirror site for more information.
- subfolder (`str`, *optional*, defaults to `""`):
- In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
- specify the folder name here.
- tf_to_pt_weight_rename (`Callable`, *optional*):
- A function that is called to transform the names of weights during the PyTorch to TensorFlow
- crossloading process. This is not necessary for most models, but is useful to allow composite models to
- be crossloaded correctly.
- kwargs (remaining dictionary of keyword arguments, *optional*):
- Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
- `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
- automatically loaded:
-
- - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
- underlying model's `__init__` method (we assume all relevant updates to the configuration have
- already been done)
- - If a configuration is not provided, `kwargs` will be first passed to the configuration class
- initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
- corresponds to a configuration attribute will be used to override said attribute with the
- supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
- will be passed to the underlying model's `__init__` function.
-
- Examples:
-
- ```python
- >>> from transformers import BertConfig, TFBertModel
-
- >>> # Download model and configuration from huggingface.co and cache.
- >>> model = TFBertModel.from_pretrained("bert-base-uncased")
- >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
- >>> model = TFBertModel.from_pretrained("./test/saved_model/")
- >>> # Update configuration during loading.
- >>> model = TFBertModel.from_pretrained("bert-base-uncased", output_attentions=True)
- >>> assert model.config.output_attentions == True
- >>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).
- >>> config = BertConfig.from_json_file("./pt_model/my_pt_model_config.json")
- >>> model = TFBertModel.from_pretrained("./pt_model/my_pytorch_model.bin", from_pt=True, config=config)
- ```"""
- config = kwargs.pop("config", None)
- cache_dir = kwargs.pop("cache_dir", None)
- from_pt = kwargs.pop("from_pt", False)
- ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
- force_download = kwargs.pop("force_download", False)
- resume_download = kwargs.pop("resume_download", False)
- proxies = kwargs.pop("proxies", None)
- output_loading_info = kwargs.pop("output_loading_info", False)
- local_files_only = kwargs.pop("local_files_only", False)
- use_auth_token = kwargs.pop("use_auth_token", None)
- revision = kwargs.pop("revision", None)
- trust_remote_code = kwargs.pop("trust_remote_code", None)
- _ = kwargs.pop("mirror", None)
- load_weight_prefix = kwargs.pop("load_weight_prefix", None)
- from_pipeline = kwargs.pop("_from_pipeline", None)
- from_auto_class = kwargs.pop("_from_auto", False)
- subfolder = kwargs.pop("subfolder", "")
- commit_hash = kwargs.pop("_commit_hash", None)
- tf_to_pt_weight_rename = kwargs.pop("tf_to_pt_weight_rename", None)
-
- if trust_remote_code is True:
- logger.warning(
- "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is"
- " ignored."
- )
-
- user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class}
- if from_pipeline is not None:
- user_agent["using_pipeline"] = from_pipeline
-
- if is_offline_mode() and not local_files_only:
- logger.info("Offline mode: forcing local_files_only=True")
- local_files_only = True
-
- # Load config if we don't provide a configuration
- if not isinstance(config, PretrainedConfig):
- config_path = config if config is not None else pretrained_model_name_or_path
- config, model_kwargs = cls.config_class.from_pretrained(
- config_path,
- cache_dir=cache_dir,
- return_unused_kwargs=True,
- force_download=force_download,
- resume_download=resume_download,
- proxies=proxies,
- local_files_only=local_files_only,
- use_auth_token=use_auth_token,
- revision=revision,
- _from_auto=from_auto_class,
- _from_pipeline=from_pipeline,
- _commit_hash=commit_hash,
- **kwargs,
- )
- else:
- model_kwargs = kwargs
-
- if commit_hash is None:
- commit_hash = getattr(config, "_commit_hash", None)
-
- # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the
- # index of the files.
- is_sharded = False
- # Load model
- if pretrained_model_name_or_path is not None:
- pretrained_model_name_or_path = str(pretrained_model_name_or_path)
- is_local = os.path.isdir(pretrained_model_name_or_path)
- if is_local:
- if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
- # Load from a PyTorch checkpoint in priority if from_pt
- archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
- elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME)):
- # Load from a sharded PyTorch checkpoint
- archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME)
- is_sharded = True
- elif is_safetensors_available() and os.path.isfile(
- os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME)
- ):
- # Load from a safetensors checkpoint
- archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME)
- elif is_safetensors_available() and os.path.isfile(
- os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME)
- ):
- # Load from a sharded safetensors checkpoint
- archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME)
- is_sharded = True
- raise NotImplementedError("Support for sharded checkpoints using safetensors is coming soon!")
- elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
- # Load from a TF 2.0 checkpoint
- archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
- elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME)):
- # Load from a sharded TF 2.0 checkpoint
- archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME)
- is_sharded = True
- # At this stage we don't have a weight file so we will raise an error.
- elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)) or os.path.isfile(
- os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME)
- ):
- raise EnvironmentError(
- f"Error no file named {TF2_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} "
- "but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those "
- "weights."
- )
- else:
- raise EnvironmentError(
- f"Error no file named {TF2_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory "
- f"{pretrained_model_name_or_path}."
- )
- elif os.path.isfile(pretrained_model_name_or_path):
- archive_file = pretrained_model_name_or_path
- is_local = True
- elif os.path.isfile(pretrained_model_name_or_path + ".index"):
- archive_file = pretrained_model_name_or_path + ".index"
- is_local = True
- elif is_remote_url(pretrained_model_name_or_path):
- filename = pretrained_model_name_or_path
- resolved_archive_file = download_url(pretrained_model_name_or_path)
- else:
- # set correct filename
- if from_pt:
- filename = WEIGHTS_NAME
- elif is_safetensors_available():
- filename = SAFE_WEIGHTS_NAME
- else:
- filename = TF2_WEIGHTS_NAME
-
- try:
- # Load from URL or cache if already cached
- cached_file_kwargs = {
- "cache_dir": cache_dir,
- "force_download": force_download,
- "proxies": proxies,
- "resume_download": resume_download,
- "local_files_only": local_files_only,
- "use_auth_token": use_auth_token,
- "user_agent": user_agent,
- "revision": revision,
- "subfolder": subfolder,
- "_raise_exceptions_for_missing_entries": False,
- "_commit_hash": commit_hash,
- }
- resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)
-
- # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None
- # result when internet is up, the repo and revision exist, but the file does not.
- if resolved_archive_file is None and filename == SAFE_WEIGHTS_NAME:
- # Maybe the checkpoint is sharded, we try to grab the index name in this case.
- resolved_archive_file = cached_file(
- pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **cached_file_kwargs
- )
- if resolved_archive_file is not None:
- is_sharded = True
- raise NotImplementedError(
- "Support for sharded checkpoints using safetensors is coming soon!"
- )
- else:
- # This repo has no safetensors file of any kind, we switch to TensorFlow.
- filename = TF2_WEIGHTS_NAME
- resolved_archive_file = cached_file(
- pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **cached_file_kwargs
- )
- if resolved_archive_file is None and filename == TF2_WEIGHTS_NAME:
- # Maybe the checkpoint is sharded, we try to grab the index name in this case.
- resolved_archive_file = cached_file(
- pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME, **cached_file_kwargs
- )
- if resolved_archive_file is not None:
- is_sharded = True
- if resolved_archive_file is None and filename == WEIGHTS_NAME:
- # Maybe the checkpoint is sharded, we try to grab the index name in this case.
- resolved_archive_file = cached_file(
- pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs
- )
- if resolved_archive_file is not None:
- is_sharded = True
- if resolved_archive_file is None:
- # Otherwise, maybe there is a PyTorch or Flax model file. We try those to give a helpful error
- # message.
- has_file_kwargs = {
- "revision": revision,
- "proxies": proxies,
- "use_auth_token": use_auth_token,
- }
- if has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs):
- raise EnvironmentError(
- f"{pretrained_model_name_or_path} does not appear to have a file named"
- f" {TF2_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to"
- " load this model from those weights."
- )
- else:
- raise EnvironmentError(
- f"{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME},"
- f" {TF2_WEIGHTS_NAME} or {TF_WEIGHTS_NAME}"
- )
-
- except EnvironmentError:
- # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted
- # to the original exception.
- raise
- except Exception:
- # For any other exception, we throw a generic error.
-
- raise EnvironmentError(
- f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it"
- " from 'https://huggingface.co/models', make sure you don't have a local directory with the"
- f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
- f" directory containing a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME} or {TF_WEIGHTS_NAME}"
- )
- if is_local:
- logger.info(f"loading weights file {archive_file}")
- resolved_archive_file = archive_file
- filename = resolved_archive_file.split(os.path.sep)[-1]
- else:
- logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}")
- else:
- resolved_archive_file = None
-
- # We'll need to download and cache each checkpoint shard if the checkpoint is sharded.
- if is_sharded:
- # resolved_archive_file becomes a list of files that point to the different checkpoint shards in this case.
- resolved_archive_file, _ = get_checkpoint_shard_files(
- pretrained_model_name_or_path,
- resolved_archive_file,
- cache_dir=cache_dir,
- force_download=force_download,
- proxies=proxies,
- resume_download=resume_download,
- local_files_only=local_files_only,
- use_auth_token=use_auth_token,
- user_agent=user_agent,
- revision=revision,
- _commit_hash=commit_hash,
- )
-
- safetensors_from_pt = False
- if filename == SAFE_WEIGHTS_NAME:
- with safe_open(resolved_archive_file, framework="tf") as f:
- safetensors_metadata = f.metadata()
- if safetensors_metadata is None or safetensors_metadata.get("format") not in ["pt", "tf", "flax"]:
- raise OSError(
- f"The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata."
- " Make sure you save your model with the `save_pretrained` method."
- )
- safetensors_from_pt = safetensors_metadata.get("format") == "pt"
-
- config.name_or_path = pretrained_model_name_or_path
-
- # composed models, *e.g.* TFRag, require special treatment when it comes to loading
- # pre-trained weights.
- if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None:
- model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name")
-
- # Instantiate model.
- model = cls(config, *model_args, **model_kwargs)
-
- if from_pt:
- from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
-
- # Load from a PyTorch checkpoint
- return load_pytorch_checkpoint_in_tf2_model(
- model,
- resolved_archive_file,
- allow_missing_keys=True,
- output_loading_info=output_loading_info,
- _prefix=load_weight_prefix,
- tf_to_pt_weight_rename=tf_to_pt_weight_rename,
- )
-
- # we might need to extend the variable scope for composite models
- if load_weight_prefix is not None:
- with tf.compat.v1.variable_scope(load_weight_prefix):
- model(model.dummy_inputs) # build the network with dummy inputs
- else:
- model(model.dummy_inputs) # build the network with dummy inputs
-
- if safetensors_from_pt:
- from .modeling_tf_pytorch_utils import load_pytorch_state_dict_in_tf2_model
-
- state_dict = safe_load_file(resolved_archive_file)
- # Load from a PyTorch checkpoint
- return load_pytorch_state_dict_in_tf2_model(
- model,
- state_dict,
- allow_missing_keys=True,
- output_loading_info=output_loading_info,
- _prefix=load_weight_prefix,
- )
-
- # 'by_name' allow us to do transfer learning by skipping/adding layers
- # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357
- try:
- if is_sharded:
- for file in resolved_archive_file:
- os.path.isfile(file), f"Error retrieving files {file}"
-
- missing_keys, unexpected_keys, mismatched_keys = load_tf_sharded_weights(
- model,
- resolved_archive_file,
- ignore_mismatched_sizes=ignore_mismatched_sizes,
- _prefix=load_weight_prefix,
- )
- else:
- missing_keys, unexpected_keys, mismatched_keys = load_tf_weights(
- model,
- resolved_archive_file,
- ignore_mismatched_sizes=ignore_mismatched_sizes,
- _prefix=load_weight_prefix,
- )
- except OSError as e:
- try:
- with open(resolved_archive_file) as f:
- if f.read().startswith("version"):
- raise OSError(
- "You seem to have cloned a repository without having git-lfs installed. Please install "
- "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
- "you cloned."
- )
- else:
- raise ValueError from e
- except (UnicodeDecodeError, ValueError):
- raise OSError(
- "Unable to load weights from h5 file. "
- "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
- )
-
- model(model.dummy_inputs) # Make sure restore ops are run
-
- if cls._keys_to_ignore_on_load_missing is not None:
- for pat in cls._keys_to_ignore_on_load_missing:
- missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
-
- if cls._keys_to_ignore_on_load_unexpected is not None:
- for pat in cls._keys_to_ignore_on_load_unexpected:
- unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
-
- if len(unexpected_keys) > 0:
- logger.warning(
- f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when"
- f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are"
- f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or"
- " with another architecture (e.g. initializing a BertForSequenceClassification model from a"
- " BertForPreTraining model).\n- This IS NOT expected if you are initializing"
- f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical"
- " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
- )
- else:
- logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n")
-
- if len(missing_keys) > 0:
- logger.warning(
- f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at"
- f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably"
- " TRAIN this model on a down-stream task to be able to use it for predictions and inference."
- )
- elif len(mismatched_keys) == 0:
- logger.warning(
- f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at"
- f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint"
- f" was trained on, you can already use {model.__class__.__name__} for predictions without further"
- " training."
- )
- if len(mismatched_keys) > 0:
- mismatched_warning = "\n".join(
- [
- f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
- for key, shape1, shape2 in mismatched_keys
- ]
- )
- logger.warning(
- f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
- f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not"
- f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able"
- " to use it for predictions and inference."
- )
-
- # If it is a model with generation capabilities, attempt to load the generation config
- if model.can_generate():
- try:
- model.generation_config = GenerationConfig.from_pretrained(
- pretrained_model_name_or_path,
- cache_dir=cache_dir,
- force_download=force_download,
- resume_download=resume_download,
- proxies=proxies,
- local_files_only=local_files_only,
- use_auth_token=use_auth_token,
- revision=revision,
- subfolder=subfolder,
- _from_auto=from_auto_class,
- _from_pipeline=from_pipeline,
- **kwargs,
- )
- except OSError:
- logger.info(
- "Generation config file not found, using a generation config created from the model config."
- )
- pass
-
- if output_loading_info:
- loading_info = {
- "missing_keys": missing_keys,
- "unexpected_keys": unexpected_keys,
- "mismatched_keys": mismatched_keys,
- }
-
- return model, loading_info
-
- return model
-
- def push_to_hub(
- self,
- repo_id: str,
- use_temp_dir: Optional[bool] = None,
- commit_message: Optional[str] = None,
- private: Optional[bool] = None,
- max_shard_size: Optional[Union[int, str]] = "10GB",
- use_auth_token: Optional[Union[bool, str]] = None,
- create_pr: bool = False,
- **base_model_card_args,
- ) -> str:
- """
- Upload the model files to the 🤗 Model Hub while synchronizing a local clone of the repo in `repo_path_or_name`.
-
- Parameters:
- repo_id (`str`):
- The name of the repository you want to push your model to. It should contain your organization name
- when pushing to a given organization.
- use_temp_dir (`bool`, *optional*):
- Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub.
- Will default to `True` if there is no directory named like `repo_id`, `False` otherwise.
- commit_message (`str`, *optional*):
- Message to commit while pushing. Will default to `"Upload model"`.
- private (`bool`, *optional*):
- Whether or not the repository created should be private.
- use_auth_token (`bool` or `str`, *optional*):
- The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
- when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url`
- is not specified.
- max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
- Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard
- will then be each of size lower than this size. If expressed as a string, needs to be digits followed
- by a unit (like `"5MB"`).
- create_pr (`bool`, *optional*, defaults to `False`):
- Whether or not to create a PR with the uploaded files or directly commit.
-
- Examples:
-
- ```python
- from transformers import TFAutoModel
-
- model = TFAutoModel.from_pretrained("bert-base-cased")
-
- # Push the model to your namespace with the name "my-finetuned-bert".
- model.push_to_hub("my-finetuned-bert")
-
- # Push the model to an organization with the name "my-finetuned-bert".
- model.push_to_hub("huggingface/my-finetuned-bert")
- ```
- """
- if "repo_path_or_name" in base_model_card_args:
- warnings.warn(
- "The `repo_path_or_name` argument is deprecated and will be removed in v5 of Transformers. Use "
- "`repo_id` instead."
- )
- repo_id = base_model_card_args.pop("repo_path_or_name")
- # Deprecation warning will be sent after for repo_url and organization
- repo_url = base_model_card_args.pop("repo_url", None)
- organization = base_model_card_args.pop("organization", None)
-
- if os.path.isdir(repo_id):
- working_dir = repo_id
- repo_id = repo_id.split(os.path.sep)[-1]
- else:
- working_dir = repo_id.split("/")[-1]
-
- repo_id = self._create_repo(
- repo_id, private=private, use_auth_token=use_auth_token, repo_url=repo_url, organization=organization
- )
-
- if use_temp_dir is None:
- use_temp_dir = not os.path.isdir(working_dir)
-
- with working_or_temp_dir(working_dir=working_dir, use_temp_dir=use_temp_dir) as work_dir:
- files_timestamps = self._get_files_timestamps(work_dir)
-
- # Save all files.
- self.save_pretrained(work_dir, max_shard_size=max_shard_size)
- if hasattr(self, "history") and hasattr(self, "create_model_card"):
- # This is a Keras model and we might be able to fish out its History and make a model card out of it
- base_model_card_args = {
- "output_dir": work_dir,
- "model_name": Path(repo_id).name,
- }
- base_model_card_args.update(base_model_card_args)
- self.create_model_card(**base_model_card_args)
-
- self._upload_modified_files(
- work_dir,
- repo_id,
- files_timestamps,
- commit_message=commit_message,
- token=use_auth_token,
- create_pr=create_pr,
- )
-
- @classmethod
- def register_for_auto_class(cls, auto_class="TFAutoModel"):
- """
- Register this class with a given auto class. This should only be used for custom models as the ones in the
- library are already mapped with an auto class.
-
-
-
- This API is experimental and may have some slight breaking changes in the next releases.
-
-
-
- Args:
- auto_class (`str` or `type`, *optional*, defaults to `"TFAutoModel"`):
- The auto class to register this new model with.
- """
- if not isinstance(auto_class, str):
- auto_class = auto_class.__name__
-
- import transformers.models.auto as auto_module
-
- if not hasattr(auto_module, auto_class):
- raise ValueError(f"{auto_class} is not a valid auto class.")
-
- cls._auto_class = auto_class
-
-
-class TFConv1D(tf.keras.layers.Layer):
- """
- 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
-
- Basically works like a linear layer but the weights are transposed.
-
- Args:
- nf (`int`):
- The number of output features.
- nx (`int`):
- The number of input features.
- initializer_range (`float`, *optional*, defaults to 0.02):
- The standard deviation to use to initialize the weights.
- kwargs:
- Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`.
- """
-
- def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
- super().__init__(**kwargs)
- self.nf = nf
- self.nx = nx
- self.initializer_range = initializer_range
-
- def build(self, input_shape):
- self.weight = self.add_weight(
- "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)
- )
- self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer())
-
- def call(self, x):
- bz, sl = shape_list(x)[:2]
-
- x = tf.reshape(x, [-1, self.nx])
- x = tf.matmul(x, self.weight) + self.bias
-
- x = tf.reshape(x, [bz, sl, self.nf])
-
- return x
-
-
-class TFSharedEmbeddings(tf.keras.layers.Layer):
- r"""
- Construct shared token embeddings.
-
- The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language
- modeling.
-
- Args:
- vocab_size (`int`):
- The size of the vocabulary, e.g., the number of unique tokens.
- hidden_size (`int`):
- The size of the embedding vectors.
- initializer_range (`float`, *optional*):
- The standard deviation to use when initializing the weights. If no value is provided, it will default to
- \\(1/\sqrt{hidden\_size}\\).
- kwargs:
- Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`.
- """
- # TODO (joao): flagged for delection due to embeddings refactor
-
- def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs):
- super().__init__(**kwargs)
- self.vocab_size = vocab_size
- self.hidden_size = hidden_size
- self.initializer_range = hidden_size**-0.5 if initializer_range is None else initializer_range
-
- def build(self, input_shape):
- """
- Build shared token embedding layer Shared weights logic adapted from
- https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
- """
- self.weight = self.add_weight(
- "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)
- )
- super().build(input_shape)
-
- def get_config(self):
- config = {
- "vocab_size": self.vocab_size,
- "hidden_size": self.hidden_size,
- "initializer_range": self.initializer_range,
- }
- base_config = super().get_config()
-
- return dict(list(base_config.items()) + list(config.items()))
-
- def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor:
- """
- Get token embeddings of inputs or decode final hidden state.
-
- Args:
- inputs (`tf.Tensor`):
- In embedding mode, should be an int64 tensor with shape `[batch_size, length]`.
-
- In linear mode, should be a float tensor with shape `[batch_size, length, hidden_size]`.
- mode (`str`, defaults to `"embedding"`):
- A valid value is either `"embedding"` or `"linear"`, the first one indicates that the layer should be
- used as an embedding layer, the second one that the layer should be used as a linear decoder.
-
- Returns:
- `tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape `[batch_size, length,
- embedding_size]`.
-
- In linear mode, the output is a float32 with shape `[batch_size, length, vocab_size]`.
-
- Raises:
- ValueError: if `mode` is not valid.
-
- Shared weights logic is adapted from
- [here](https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24).
- """
- if mode == "embedding":
- return self._embedding(inputs)
- elif mode == "linear":
- return self._linear(inputs)
- else:
- raise ValueError(f"mode {mode} is not valid.")
-
- def _embedding(self, input_ids):
- """Applies embedding based on inputs tensor."""
- return tf.gather(self.weight, input_ids)
-
- def _linear(self, inputs):
- """
- Computes logits by running inputs through a linear layer.
-
- Args:
- inputs: A float32 tensor with shape [..., hidden_size]
-
- Returns:
- float32 tensor with shape [..., vocab_size].
- """
- first_dims = shape_list(inputs)[:-1]
- x = tf.reshape(inputs, [-1, self.hidden_size])
- logits = tf.matmul(x, self.weight, transpose_b=True)
-
- return tf.reshape(logits, first_dims + [self.vocab_size])
-
-
-class TFSequenceSummary(tf.keras.layers.Layer):
- """
- Compute a single vector summary of a sequence hidden states.
-
- Args:
- config ([`PretrainedConfig`]):
- The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
- config class of your model for the default values it uses):
-
- - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:
-
- - `"last"` -- Take the last token hidden state (like XLNet)
- - `"first"` -- Take the first token hidden state (like Bert)
- - `"mean"` -- Take the mean of all tokens hidden states
- - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- - `"attn"` -- Not implemented now, use multi-head attention
-
- - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
- - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
- (otherwise to `config.hidden_size`).
- - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
- another string or `None` will add no activation.
- - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
- - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
-
- initializer_range (`float`, defaults to 0.02): The standard deviation to use to initialize the weights.
- kwargs:
- Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`.
- """
-
- def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs):
- super().__init__(**kwargs)
-
- self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last"
- if self.summary_type == "attn":
- # We should use a standard multi-head attention module with absolute positional embedding for that.
- # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
- # We can probably just use the multi-head attention module of PyTorch >=1.1.0
- raise NotImplementedError
-
- self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj
- if self.has_summary:
- if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
- num_classes = config.num_labels
- else:
- num_classes = config.hidden_size
- self.summary = tf.keras.layers.Dense(
- num_classes, kernel_initializer=get_initializer(initializer_range), name="summary"
- )
-
- self.has_activation = False
- activation_string = getattr(config, "summary_activation", None)
- if activation_string is not None:
- self.has_activation = True
- self.activation = get_tf_activation(activation_string)
-
- self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0
- if self.has_first_dropout:
- self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout)
-
- self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0
- if self.has_last_dropout:
- self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout)
-
- def call(self, inputs, cls_index=None, training=False):
- if not isinstance(inputs, (dict, tuple, list)):
- hidden_states = inputs
- elif isinstance(inputs, (tuple, list)):
- hidden_states = inputs[0]
- cls_index = inputs[1] if len(inputs) > 1 else None
- assert len(inputs) <= 2, "Too many inputs."
- else:
- hidden_states = inputs.get("hidden_states")
- cls_index = inputs.get("cls_index", None)
-
- if self.summary_type == "last":
- output = hidden_states[:, -1]
- elif self.summary_type == "first":
- output = hidden_states[:, 0]
- elif self.summary_type == "mean":
- output = tf.reduce_mean(hidden_states, axis=1)
- elif self.summary_type == "cls_index":
- hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims]
- if cls_index is None:
- cls_index = tf.fill(
- hidden_shape[:-2], hidden_shape[-2] - 1
- ) # A tensor full of shape [batch] or [batch, num choices] full of sequence length
- cls_shape = shape_list(cls_index)
- if len(cls_shape) <= len(hidden_shape) - 2:
- cls_index = tf.expand_dims(cls_index, axis=-1)
- # else:
- # cls_index = cls_index[..., tf.newaxis]
- # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
- # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
- output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
- output = tf.squeeze(
- output, axis=len(hidden_shape) - 2
- ) # shape of output: (batch, num choices, hidden_size)
- elif self.summary_type == "attn":
- raise NotImplementedError
-
- if self.has_first_dropout:
- output = self.first_dropout(output, training=training)
-
- if self.has_summary:
- output = self.summary(output)
-
- if self.has_activation:
- output = self.activation(output)
-
- if self.has_last_dropout:
- output = self.last_dropout(output, training=training)
-
- return output
-
-
-def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal:
- """
- Creates a `tf.initializers.TruncatedNormal` with the given range.
-
- Args:
- initializer_range (*float*, defaults to 0.02): Standard deviation of the initializer range.
-
- Returns:
- `tf.initializers.TruncatedNormal`: The truncated normal initializer.
- """
- return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/svgLib/path/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/svgLib/path/__init__.py
deleted file mode 100644
index 742bc64ce037a53a765efc80ed773b840af5b4c7..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/svgLib/path/__init__.py
+++ /dev/null
@@ -1,61 +0,0 @@
-from fontTools.pens.transformPen import TransformPen
-from fontTools.misc import etree
-from fontTools.misc.textTools import tostr
-from .parser import parse_path
-from .shapes import PathBuilder
-
-
-__all__ = [tostr(s) for s in ("SVGPath", "parse_path")]
-
-
-class SVGPath(object):
- """Parse SVG ``path`` elements from a file or string, and draw them
- onto a glyph object that supports the FontTools Pen protocol.
-
- For example, reading from an SVG file and drawing to a Defcon Glyph:
-
- import defcon
- glyph = defcon.Glyph()
- pen = glyph.getPen()
- svg = SVGPath("path/to/a.svg")
- svg.draw(pen)
-
- Or reading from a string containing SVG data, using the alternative
- 'fromstring' (a class method):
-
- data = ' (
- bytes
- | tempfile._TemporaryFileWrapper
- | list[bytes | tempfile._TemporaryFileWrapper]
- | None
- ):
- """
- Parameters:
- x: List of JSON objects with filename as 'name' property and base64 data as 'data' property
- Returns:
- File objects in requested format
- """
- if x is None:
- return None
-
- def process_single_file(f) -> bytes | tempfile._TemporaryFileWrapper:
- file_name, data, is_file = (
- f["name"],
- f["data"],
- f.get("is_file", False),
- )
- if self.type == "file":
- if is_file:
- path = self.make_temp_copy_if_needed(file_name)
- else:
- data, _ = client_utils.decode_base64_to_binary(data)
- path = self.file_bytes_to_file(
- data, dir=self.DEFAULT_TEMP_DIR, file_name=file_name
- )
- path = str(utils.abspath(path))
- self.temp_files.add(path)
-
- # Creation of tempfiles here
- file = tempfile.NamedTemporaryFile(
- delete=False, dir=self.DEFAULT_TEMP_DIR
- )
- file.name = path
- file.orig_name = file_name # type: ignore
- return file
- elif (
- self.type == "binary" or self.type == "bytes"
- ): # "bytes" is included for backwards compatibility
- if is_file:
- with open(file_name, "rb") as file_data:
- return file_data.read()
- return client_utils.decode_base64_to_binary(data)[0]
- else:
- raise ValueError(
- "Unknown type: "
- + str(self.type)
- + ". Please choose from: 'file', 'bytes'."
- )
-
- if self.file_count == "single":
- if isinstance(x, list):
- return process_single_file(x[0])
- else:
- return process_single_file(x)
- else:
- if isinstance(x, list):
- return [process_single_file(f) for f in x]
- else:
- return process_single_file(x)
-
- def postprocess(
- self, y: str | list[str] | None
- ) -> dict[str, Any] | list[dict[str, Any]] | None:
- """
- Parameters:
- y: file path
- Returns:
- JSON object with key 'name' for filename, 'data' for base64 url, and 'size' for filesize in bytes
- """
- if y is None:
- return None
- if isinstance(y, list):
- return [
- {
- "orig_name": Path(file).name,
- "name": self.make_temp_copy_if_needed(file),
- "size": Path(file).stat().st_size,
- "data": None,
- "is_file": True,
- }
- for file in y
- ]
- else:
- d = {
- "orig_name": Path(y).name,
- "name": self.make_temp_copy_if_needed(y),
- "size": Path(y).stat().st_size,
- "data": None,
- "is_file": True,
- }
- return d
-
- def as_example(self, input_data: str | list | None) -> str:
- if input_data is None:
- return ""
- elif isinstance(input_data, list):
- return ", ".join([Path(file).name for file in input_data])
- else:
- return Path(input_data).name
-
- def api_info(self) -> dict[str, dict | bool]:
- if self.file_count == "single":
- return self._single_file_api_info()
- else:
- return self._multiple_file_api_info()
-
- def serialized_info(self):
- if self.file_count == "single":
- return self._single_file_serialized_info()
- else:
- return self._multiple_file_serialized_info()
-
- def example_inputs(self) -> dict[str, Any]:
- if self.file_count == "single":
- return self._single_file_example_inputs()
- else:
- return self._multiple_file_example_inputs()
diff --git a/spaces/cihyFjudo/fairness-paper-search/Cheat Permata Trainstation Facebook How to Enjoy the Game for a Long Time.md b/spaces/cihyFjudo/fairness-paper-search/Cheat Permata Trainstation Facebook How to Enjoy the Game for a Long Time.md
deleted file mode 100644
index 60b147e105444b8b8ffdcc9548172c805cc34400..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Cheat Permata Trainstation Facebook How to Enjoy the Game for a Long Time.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Laquila Solitaria 2 Full Movie In English Free Download PORTABLE.md b/spaces/cihyFjudo/fairness-paper-search/Laquila Solitaria 2 Full Movie In English Free Download PORTABLE.md
deleted file mode 100644
index 133f859e130f9209eef0f5f5bb016bbc885cfed8..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Laquila Solitaria 2 Full Movie In English Free Download PORTABLE.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
L'aquila solitaria 2 full movie in english free download
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Serial Number In Sql Server Query Timeout What You Need to Know and Why It Matters.md b/spaces/cihyFjudo/fairness-paper-search/Serial Number In Sql Server Query Timeout What You Need to Know and Why It Matters.md
deleted file mode 100644
index 7236f8bb5f3c86cd124fce6cf64973a6408c6b4a..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Serial Number In Sql Server Query Timeout What You Need to Know and Why It Matters.md
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
System.Data.SqlClient.SqlException (0x80131904): Connection Timeout Expired.The timeout period elapsed while attempting to consume the pre-login handshake acknowledgment.This could be because the pre-login handshake failed or the server was unable to respond back in time.The duration spent while attempting to connect to this server was [Pre-Login] initialization=23; handshake=14979; System.ComponentModel.Win32Exception (0x80004005): The wait operation timed out.
System.Data.SqlClient.SqlException (0x80131904): Timeout expired.The timeout period elapsed prior to completion of the operation or the server is not responding.System.ComponentModel.Win32Exception (0x80004005): The wait operation timed out.
-
Connection Timeout Expired.The timeout period elapsed while attempting to consume the pre-login handshake acknowledgment.This could be because the pre-login handshake failed or the server was unable to respond back in time. The duration spent while attempting to connect to this server was [Pre-Login] initialization=21036; handshake=0; (Microsoft SQL Server, Error: -2).
-
A query time-out is different from a connection time-out or login time-out. The connection or login timeout occurs when the initial connection to the database server reaches a predefined time-out period. At this stage, no query has been submitted to the server. These are examples of connection or login time-out error messages:
-
Connection Timeout Expired. The timeout period elapsed while attempting to consume the pre-login handshake acknowledgment. This could be because the pre-login handshake failed or the server was unable to respond back in time. The duration spent while attempting to connect to this server was [Pre-Login] initialization=23; handshake=14979;
-
The connection time-out value is a client-side setting and is typically set to 15 seconds. For more information about how to troubleshoot connection time-out, see troubleshoot connection timeout. For query timeout troubleshooting, watch this video.
-
The user should should receive a message indicating the BSFN has timed out and the transaction was rolled back.
The issue can be reproduced at will with the following steps: 1) P4101\P41026. Create a new serial controlled item and set to 9 to assign serial number manually.
-
-
The maximum number of rows that the system can return in a non-chunked query.The default setting (0) allows for an unlimited number of rows.If the query results exceed a specified value, then InfluxDB includes a "partial":true tag in the response body.
-
There are two configurable timeout options that affect the execution of remote queries. The error messages occur when a query exceeds the timeout option values. Refer to the "More Information" section of this article for further details about the timeout options.
-
For more information about the remote login timeout setting and where IDBInitialize::Initialize is called, refer to the "Connecting to an OLE DB Provider" topic in MSDN or the Microsoft SQL Server 7.0 Resource Guide in the BackOffice Resource Kit.
You may also refer to the following topics in MSDN for a description of how the query processor interacts with an OLE DB provider to enable distributed and heterogeneous queries:
-
The first error message pertains to the IDBInitialize:Initialize method.
When the query attempts to establish a connection to the remote server, the first error message occurs if the time it takes the query to establish a connection exceeds the remote login timeout option value.
The second error message pertains to the ICommandText::Execute method.
This messages indicates that the query took more time to process than the time that is specified in the remote query timeout configuration setting.
By default, in Microsoft SQL Server 7.0, the timeout setting is zero (0 - infinite wait). By default, in SQL Server 2000 and in SQL Server 2005, the timeout setting is 600 (10 minutes).
-
When troubleshooting DNS issues, it is useful to have access to Domain Name System (DNS) records of a website. All mainstream operating systems have tools that enable users to query a web server and receive important information such as IP addresses and other pieces of domain-related information.
-
Start of Authority (SOA) records provide authoritative information about the domain and the server, such as the email address of the administrator, serial number, refresh interval, query expiration time, etc.
-
Note that I have two separate sqlcommands and also two separate connections to handle this. Reason is with sqltransaction it doesn't let me to query on the same table. I think timeout may be happening because of this?
-
Prepared statements are a PostgreSQL feature that can be used to optimize theperformance of queries that are executed more than once. When a queryis prepared by a call to Connection.prepare(), the server parses,analyzes and compiles the query allowing to reuse that work once there isa need to run the same query again.
-
Maximum number of rows to return, specified as the comma-separated pair consisting of 'MaxRows' and a positive numeric scalar. By default, the select function returns all rows from the executed SQL query. Use this name-value pair argument to limit the number of rows imported into MATLAB®.
-
SQL query timeout, specified as the comma-separated pair consistingof 'QueryTimeOut' and a positive numeric scalar.By default, the select function ignores the timeoutvalue. Use this name-value pair argument to specify the number ofseconds to wait for executing the SQL query selectquery.
-
The default number of partitions to use when shuffling data for joins or aggregations. Note: For structured streaming, this configuration cannot be changed between query restarts from the same checkpoint location.
-
Set a query duration timeout in seconds in Thrift Server. If the timeout is set to a positive value, a running query will be cancelled automatically when the timeout is exceeded, otherwise the query continues to run till completion. If timeout values are set for each statement via java.sql.Statement.setQueryTimeout and they are smaller than this configuration value, they take precedence. If you set this timeout and prefer to cancel the queries right away without waiting task to finish, consider enabling spark.sql.thriftServer.interruptOnCancel together.
-
Open transactions hold the locks on rows affected by the transaction until they are committed or rolled back, and any other write query modifying the same rows has to wait for the open transaction to release the locks. If the query has to wait for more than the lock_wait_timeout (default 60 seconds), it fails. This happens most often when the open transaction is idle and unnecessarily holding the locks.
-
If a query has failed because it waited long enough to exceed the lock_wait_timeout, identify the transaction that is causing the timeout, and kill its connection. Killing the connection rolls back the uncommitted writes of the open transaction. For example, a write operation is performed in the following transaction, but it is not committed.
-
Maximum number of seconds that each action of a query is permitted to execute before returning an error. The cumulative time may exceed this value. For JDBC statements, ColdFusion sets this attribute. For other drivers, see the driver documentation.
-
You can cache query results and execute stored procedures. For information about this and about displaying cfquery output, see the Developing ColdFusion Applications. Because the timeout attribute only affects the maximum time for each suboperation of a query, the cumulative time may exceed its value. To set a timeout for a page that might get a very large result set, set the Administrator > Server Settings > Timeout Requests option to an appropriate value or use the RequestTimeout attribute of the cfsetting tag (for example, ). The Caching page of the ColdFusion Administrator specifies the maximum number of cached queries. Setting this value to 0 disables query caching. You cannot use ColdFusion reserved words as query names. You cannot use SQL reserved words as variable or column names in a Query of Queries, unless they are escaped. The escape character is the bracket []; for example:
-
A cached query is a query that has its results stored in the server's memory. The results are stored when the query is first run. Whenever you run the query thereafter, ColdFusion retrieves the results from memory.
-
Special handling for "file" protocol: The file JavaMail provider can be used to read raw messages from files.The server field is used to specify the path to the parent of the folder.Individual message files should be stored with the name n.msg,where n is the message number.Alternatively, the server field can be the name of a file which contains a single message.The current implementation is quite basic, and is mainly intended for debugging purposes.
-
The throughput number represents the actual number of requests/minute the server handled. This calculationincludes any delays you added to your test and JMeter's own internal processing time. The advantageof doing the calculation like this is that this number represents somethingreal - your server in fact handled that many requests per minute, and you can increase the number of threadsand/or decrease the delays to discover your server's maximum throughput. Whereas if you made calculationsthat factored out delays and JMeter's processing, it would be unclear what you could conclude from thatnumber.
-
Our team does not usually recommend having the connection timeout set to unlimited. This causes the SSIS package to continue to use resources until the command can complete. If there are multiple SSIS package jobs and the server is unavailable for an extended period, this could cause the SSIS package jobs to pile up until the server becomes available. Nonetheless, I wanted to find a way to set it 0 in case it was necessary. After numerous tests, I identified a workaround which did not involve manually setting the connection string or setting up a configuration file.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/TunesKit DRM Media Converter 2.8.7.155 With Crack jancah The Ultimate Solution for Removing DRM Protection.md b/spaces/cihyFjudo/fairness-paper-search/TunesKit DRM Media Converter 2.8.7.155 With Crack jancah The Ultimate Solution for Removing DRM Protection.md
deleted file mode 100644
index a2990d7f33ef00f18d5693787c9fc07eb47d499f..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/TunesKit DRM Media Converter 2.8.7.155 With Crack jancah The Ultimate Solution for Removing DRM Protection.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
TunesKit DRM Media Converter 2.8.7.155 With Crack jancah
- )
-}
diff --git a/spaces/codelion/Grounding_DINO_demo/groundingdino/util/box_ops.py b/spaces/codelion/Grounding_DINO_demo/groundingdino/util/box_ops.py
deleted file mode 100644
index 781068d294e576954edb4bd07b6e0f30e4e1bcd9..0000000000000000000000000000000000000000
--- a/spaces/codelion/Grounding_DINO_demo/groundingdino/util/box_ops.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-"""
-Utilities for bounding box manipulation and GIoU.
-"""
-import torch
-from torchvision.ops.boxes import box_area
-
-
-def box_cxcywh_to_xyxy(x):
- x_c, y_c, w, h = x.unbind(-1)
- b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
- return torch.stack(b, dim=-1)
-
-
-def box_xyxy_to_cxcywh(x):
- x0, y0, x1, y1 = x.unbind(-1)
- b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
- return torch.stack(b, dim=-1)
-
-
-# modified from torchvision to also return the union
-def box_iou(boxes1, boxes2):
- area1 = box_area(boxes1)
- area2 = box_area(boxes2)
-
- # import ipdb; ipdb.set_trace()
- lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
- rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
-
- wh = (rb - lt).clamp(min=0) # [N,M,2]
- inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
-
- union = area1[:, None] + area2 - inter
-
- iou = inter / (union + 1e-6)
- return iou, union
-
-
-def generalized_box_iou(boxes1, boxes2):
- """
- Generalized IoU from https://giou.stanford.edu/
-
- The boxes should be in [x0, y0, x1, y1] format
-
- Returns a [N, M] pairwise matrix, where N = len(boxes1)
- and M = len(boxes2)
- """
- # degenerate boxes gives inf / nan results
- # so do an early check
- assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
- assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
- # except:
- # import ipdb; ipdb.set_trace()
- iou, union = box_iou(boxes1, boxes2)
-
- lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
- rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
-
- wh = (rb - lt).clamp(min=0) # [N,M,2]
- area = wh[:, :, 0] * wh[:, :, 1]
-
- return iou - (area - union) / (area + 1e-6)
-
-
-# modified from torchvision to also return the union
-def box_iou_pairwise(boxes1, boxes2):
- area1 = box_area(boxes1)
- area2 = box_area(boxes2)
-
- lt = torch.max(boxes1[:, :2], boxes2[:, :2]) # [N,2]
- rb = torch.min(boxes1[:, 2:], boxes2[:, 2:]) # [N,2]
-
- wh = (rb - lt).clamp(min=0) # [N,2]
- inter = wh[:, 0] * wh[:, 1] # [N]
-
- union = area1 + area2 - inter
-
- iou = inter / union
- return iou, union
-
-
-def generalized_box_iou_pairwise(boxes1, boxes2):
- """
- Generalized IoU from https://giou.stanford.edu/
-
- Input:
- - boxes1, boxes2: N,4
- Output:
- - giou: N, 4
- """
- # degenerate boxes gives inf / nan results
- # so do an early check
- assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
- assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
- assert boxes1.shape == boxes2.shape
- iou, union = box_iou_pairwise(boxes1, boxes2) # N, 4
-
- lt = torch.min(boxes1[:, :2], boxes2[:, :2])
- rb = torch.max(boxes1[:, 2:], boxes2[:, 2:])
-
- wh = (rb - lt).clamp(min=0) # [N,2]
- area = wh[:, 0] * wh[:, 1]
-
- return iou - (area - union) / area
-
-
-def masks_to_boxes(masks):
- """Compute the bounding boxes around the provided masks
-
- The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
-
- Returns a [N, 4] tensors, with the boxes in xyxy format
- """
- if masks.numel() == 0:
- return torch.zeros((0, 4), device=masks.device)
-
- h, w = masks.shape[-2:]
-
- y = torch.arange(0, h, dtype=torch.float)
- x = torch.arange(0, w, dtype=torch.float)
- y, x = torch.meshgrid(y, x)
-
- x_mask = masks * x.unsqueeze(0)
- x_max = x_mask.flatten(1).max(-1)[0]
- x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
-
- y_mask = masks * y.unsqueeze(0)
- y_max = y_mask.flatten(1).max(-1)[0]
- y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
-
- return torch.stack([x_min, y_min, x_max, y_max], 1)
-
-
-if __name__ == "__main__":
- x = torch.rand(5, 4)
- y = torch.rand(3, 4)
- iou, union = box_iou(x, y)
- import ipdb
-
- ipdb.set_trace()
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/vc1dsp.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/vc1dsp.h
deleted file mode 100644
index cd01ac5384b10e9ffa17db43774b68f6ab037fab..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/vc1dsp.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVCODEC_ARM_VC1DSP_H
-#define AVCODEC_ARM_VC1DSP_H
-
-#include "libavcodec/vc1dsp.h"
-
-void ff_vc1dsp_init_neon(VC1DSPContext *dsp);
-
-#endif /* AVCODEC_ARM_VC1DSP_H */
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/atrac9dec.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/atrac9dec.c
deleted file mode 100644
index 60962b1676207095ff4ba3330185a22d2c7309a1..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/atrac9dec.c
+++ /dev/null
@@ -1,1007 +0,0 @@
-/*
- * ATRAC9 decoder
- * Copyright (c) 2018 Rostislav Pehlivanov
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/channel_layout.h"
-#include "libavutil/thread.h"
-
-#include "codec_internal.h"
-#include "decode.h"
-#include "get_bits.h"
-#include "atrac9tab.h"
-#include "libavutil/tx.h"
-#include "libavutil/lfg.h"
-#include "libavutil/float_dsp.h"
-#include "libavutil/mem_internal.h"
-
-#define ATRAC9_SF_VLC_BITS 8
-#define ATRAC9_COEFF_VLC_BITS 9
-
-typedef struct ATRAC9ChannelData {
- int band_ext;
- int q_unit_cnt;
- int band_ext_data[4];
- int32_t scalefactors[31];
- int32_t scalefactors_prev[31];
-
- int precision_coarse[30];
- int precision_fine[30];
- int precision_mask[30];
-
- int codebookset[30];
-
- int32_t q_coeffs_coarse[256];
- int32_t q_coeffs_fine[256];
-
- DECLARE_ALIGNED(32, float, coeffs )[256];
- DECLARE_ALIGNED(32, float, prev_win)[128];
-} ATRAC9ChannelData;
-
-typedef struct ATRAC9BlockData {
- ATRAC9ChannelData channel[2];
-
- /* Base */
- int band_count;
- int q_unit_cnt;
- int q_unit_cnt_prev;
-
- /* Stereo block only */
- int stereo_q_unit;
-
- /* Band extension only */
- int has_band_ext;
- int has_band_ext_data;
- int band_ext_q_unit;
-
- /* Gradient */
- int grad_mode;
- int grad_boundary;
- int gradient[31];
-
- /* Stereo */
- int cpe_base_channel;
- int is_signs[30];
-
- int reuseable;
-
-} ATRAC9BlockData;
-
-typedef struct ATRAC9Context {
- AVCodecContext *avctx;
- AVFloatDSPContext *fdsp;
- AVTXContext *tx;
- av_tx_fn tx_fn;
- ATRAC9BlockData block[5];
- AVLFG lfg;
-
- /* Set on init */
- int frame_log2;
- int avg_frame_size;
- int frame_count;
- int samplerate_idx;
- const ATRAC9BlockConfig *block_config;
-
- /* Generated on init */
- uint8_t alloc_curve[48][48];
- DECLARE_ALIGNED(32, float, imdct_win)[256];
-
- DECLARE_ALIGNED(32, float, temp)[2048];
-} ATRAC9Context;
-
-static VLC sf_vlc[2][8]; /* Signed/unsigned, length */
-static VLC coeff_vlc[2][8][4]; /* Cookbook, precision, cookbook index */
-
-static inline int parse_gradient(ATRAC9Context *s, ATRAC9BlockData *b,
- GetBitContext *gb)
-{
- int grad_range[2];
- int grad_value[2];
- int values, sign, base;
- uint8_t *curve;
- float scale;
-
- b->grad_mode = get_bits(gb, 2);
- if (b->grad_mode) {
- grad_range[0] = get_bits(gb, 5);
- grad_range[1] = 31;
- grad_value[0] = get_bits(gb, 5);
- grad_value[1] = 31;
- } else {
- grad_range[0] = get_bits(gb, 6);
- grad_range[1] = get_bits(gb, 6) + 1;
- grad_value[0] = get_bits(gb, 5);
- grad_value[1] = get_bits(gb, 5);
- }
- b->grad_boundary = get_bits(gb, 4);
-
- if (grad_range[0] >= grad_range[1] || grad_range[1] > 31)
- return AVERROR_INVALIDDATA;
-
- if (b->grad_boundary > b->q_unit_cnt)
- return AVERROR_INVALIDDATA;
-
- values = grad_value[1] - grad_value[0];
- sign = 1 - 2*(values < 0);
- base = grad_value[0] + sign;
- scale = (FFABS(values) - 1) / 31.0f;
- curve = s->alloc_curve[grad_range[1] - grad_range[0] - 1];
-
- for (int i = 0; i <= b->q_unit_cnt; i++)
- b->gradient[i] = grad_value[i >= grad_range[0]];
-
- for (int i = grad_range[0]; i < grad_range[1]; i++)
- b->gradient[i] = base + sign*((int)(scale*curve[i - grad_range[0]]));
-
- return 0;
-}
-
-static inline void calc_precision(ATRAC9Context *s, ATRAC9BlockData *b,
- ATRAC9ChannelData *c)
-{
- memset(c->precision_mask, 0, sizeof(c->precision_mask));
- for (int i = 1; i < b->q_unit_cnt; i++) {
- const int delta = FFABS(c->scalefactors[i] - c->scalefactors[i - 1]) - 1;
- if (delta > 0) {
- const int neg = c->scalefactors[i - 1] > c->scalefactors[i];
- c->precision_mask[i - neg] += FFMIN(delta, 5);
- }
- }
-
- if (b->grad_mode) {
- for (int i = 0; i < b->q_unit_cnt; i++) {
- c->precision_coarse[i] = c->scalefactors[i];
- c->precision_coarse[i] += c->precision_mask[i] - b->gradient[i];
- if (c->precision_coarse[i] < 0)
- continue;
- switch (b->grad_mode) {
- case 1:
- c->precision_coarse[i] >>= 1;
- break;
- case 2:
- c->precision_coarse[i] = (3 * c->precision_coarse[i]) >> 3;
- break;
- case 3:
- c->precision_coarse[i] >>= 2;
- break;
- }
- }
- } else {
- for (int i = 0; i < b->q_unit_cnt; i++)
- c->precision_coarse[i] = c->scalefactors[i] - b->gradient[i];
- }
-
-
- for (int i = 0; i < b->q_unit_cnt; i++)
- c->precision_coarse[i] = FFMAX(c->precision_coarse[i], 1);
-
- for (int i = 0; i < b->grad_boundary; i++)
- c->precision_coarse[i]++;
-
- for (int i = 0; i < b->q_unit_cnt; i++) {
- c->precision_fine[i] = 0;
- if (c->precision_coarse[i] > 15) {
- c->precision_fine[i] = FFMIN(c->precision_coarse[i], 30) - 15;
- c->precision_coarse[i] = 15;
- }
- }
-}
-
-static inline int parse_band_ext(ATRAC9Context *s, ATRAC9BlockData *b,
- GetBitContext *gb, int stereo)
-{
- int ext_band = 0;
-
- if (b->has_band_ext) {
- if (b->q_unit_cnt < 13 || b->q_unit_cnt > 20)
- return AVERROR_INVALIDDATA;
- ext_band = at9_tab_band_ext_group[b->q_unit_cnt - 13][2];
- if (stereo) {
- b->channel[1].band_ext = get_bits(gb, 2);
- b->channel[1].band_ext = ext_band > 2 ? b->channel[1].band_ext : 4;
- } else {
- skip_bits1(gb);
- }
- }
-
- b->has_band_ext_data = get_bits1(gb);
- if (!b->has_band_ext_data)
- return 0;
-
- if (!b->has_band_ext) {
- skip_bits(gb, 2);
- skip_bits_long(gb, get_bits(gb, 5));
- return 0;
- }
-
- b->channel[0].band_ext = get_bits(gb, 2);
- b->channel[0].band_ext = ext_band > 2 ? b->channel[0].band_ext : 4;
-
- if (!get_bits(gb, 5)) {
- for (int i = 0; i <= stereo; i++) {
- ATRAC9ChannelData *c = &b->channel[i];
- const int count = at9_tab_band_ext_cnt[c->band_ext][ext_band];
- for (int j = 0; j < count; j++) {
- int len = at9_tab_band_ext_lengths[c->band_ext][ext_band][j];
- c->band_ext_data[j] = av_clip_uintp2_c(c->band_ext_data[j], len);
- }
- }
-
- return 0;
- }
-
- for (int i = 0; i <= stereo; i++) {
- ATRAC9ChannelData *c = &b->channel[i];
- const int count = at9_tab_band_ext_cnt[c->band_ext][ext_band];
- for (int j = 0; j < count; j++) {
- int len = at9_tab_band_ext_lengths[c->band_ext][ext_band][j];
- c->band_ext_data[j] = get_bits(gb, len);
- }
- }
-
- return 0;
-}
-
-static inline int read_scalefactors(ATRAC9Context *s, ATRAC9BlockData *b,
- ATRAC9ChannelData *c, GetBitContext *gb,
- int channel_idx, int first_in_pkt)
-{
- static const uint8_t mode_map[2][4] = { { 0, 1, 2, 3 }, { 0, 2, 3, 4 } };
- const int mode = mode_map[channel_idx][get_bits(gb, 2)];
-
- memset(c->scalefactors, 0, sizeof(c->scalefactors));
-
- if (first_in_pkt && (mode == 4 || ((mode == 3) && !channel_idx))) {
- av_log(s->avctx, AV_LOG_ERROR, "Invalid scalefactor coding mode!\n");
- return AVERROR_INVALIDDATA;
- }
-
- switch (mode) {
- case 0: { /* VLC delta offset */
- const uint8_t *sf_weights = at9_tab_sf_weights[get_bits(gb, 3)];
- const int base = get_bits(gb, 5);
- const int len = get_bits(gb, 2) + 3;
- const VLC *tab = &sf_vlc[0][len];
-
- c->scalefactors[0] = get_bits(gb, len);
-
- for (int i = 1; i < b->band_ext_q_unit; i++) {
- int val = c->scalefactors[i - 1] + get_vlc2(gb, tab->table,
- ATRAC9_SF_VLC_BITS, 1);
- c->scalefactors[i] = val & ((1 << len) - 1);
- }
-
- for (int i = 0; i < b->band_ext_q_unit; i++)
- c->scalefactors[i] += base - sf_weights[i];
-
- break;
- }
- case 1: { /* CLC offset */
- const int len = get_bits(gb, 2) + 2;
- const int base = len < 5 ? get_bits(gb, 5) : 0;
- for (int i = 0; i < b->band_ext_q_unit; i++)
- c->scalefactors[i] = base + get_bits(gb, len);
- break;
- }
- case 2:
- case 4: { /* VLC dist to baseline */
- const int *baseline = mode == 4 ? c->scalefactors_prev :
- channel_idx ? b->channel[0].scalefactors :
- c->scalefactors_prev;
- const int baseline_len = mode == 4 ? b->q_unit_cnt_prev :
- channel_idx ? b->band_ext_q_unit :
- b->q_unit_cnt_prev;
-
- const int len = get_bits(gb, 2) + 2;
- const int unit_cnt = FFMIN(b->band_ext_q_unit, baseline_len);
- const VLC *tab = &sf_vlc[1][len];
-
- for (int i = 0; i < unit_cnt; i++) {
- int dist = get_vlc2(gb, tab->table, ATRAC9_SF_VLC_BITS, 1);
- c->scalefactors[i] = baseline[i] + dist;
- }
-
- for (int i = unit_cnt; i < b->band_ext_q_unit; i++)
- c->scalefactors[i] = get_bits(gb, 5);
-
- break;
- }
- case 3: { /* VLC offset with baseline */
- const int *baseline = channel_idx ? b->channel[0].scalefactors :
- c->scalefactors_prev;
- const int baseline_len = channel_idx ? b->band_ext_q_unit :
- b->q_unit_cnt_prev;
-
- const int base = get_bits(gb, 5) - (1 << (5 - 1));
- const int len = get_bits(gb, 2) + 1;
- const int unit_cnt = FFMIN(b->band_ext_q_unit, baseline_len);
- const VLC *tab = &sf_vlc[0][len];
-
- c->scalefactors[0] = get_bits(gb, len);
-
- for (int i = 1; i < unit_cnt; i++) {
- int val = c->scalefactors[i - 1] + get_vlc2(gb, tab->table,
- ATRAC9_SF_VLC_BITS, 1);
- c->scalefactors[i] = val & ((1 << len) - 1);
- }
-
- for (int i = 0; i < unit_cnt; i++)
- c->scalefactors[i] += base + baseline[i];
-
- for (int i = unit_cnt; i < b->band_ext_q_unit; i++)
- c->scalefactors[i] = get_bits(gb, 5);
- break;
- }
- }
-
- for (int i = 0; i < b->band_ext_q_unit; i++)
- if (c->scalefactors[i] < 0 || c->scalefactors[i] > 31)
- return AVERROR_INVALIDDATA;
-
- memcpy(c->scalefactors_prev, c->scalefactors, sizeof(c->scalefactors));
-
- return 0;
-}
-
-static inline void calc_codebook_idx(ATRAC9Context *s, ATRAC9BlockData *b,
- ATRAC9ChannelData *c)
-{
- int avg = 0;
- const int last_sf = c->scalefactors[c->q_unit_cnt];
-
- memset(c->codebookset, 0, sizeof(c->codebookset));
-
- if (c->q_unit_cnt <= 1)
- return;
- if (s->samplerate_idx > 7)
- return;
-
- c->scalefactors[c->q_unit_cnt] = c->scalefactors[c->q_unit_cnt - 1];
-
- if (c->q_unit_cnt > 12) {
- for (int i = 0; i < 12; i++)
- avg += c->scalefactors[i];
- avg = (avg + 6) / 12;
- }
-
- for (int i = 8; i < c->q_unit_cnt; i++) {
- const int prev = c->scalefactors[i - 1];
- const int cur = c->scalefactors[i ];
- const int next = c->scalefactors[i + 1];
- const int min = FFMIN(prev, next);
- if ((cur - min >= 3 || 2*cur - prev - next >= 3))
- c->codebookset[i] = 1;
- }
-
-
- for (int i = 12; i < c->q_unit_cnt; i++) {
- const int cur = c->scalefactors[i];
- const int cnd = at9_q_unit_to_coeff_cnt[i] == 16;
- const int min = FFMIN(c->scalefactors[i + 1], c->scalefactors[i - 1]);
- if (c->codebookset[i])
- continue;
-
- c->codebookset[i] = (((cur - min) >= 2) && (cur >= (avg - cnd)));
- }
-
- c->scalefactors[c->q_unit_cnt] = last_sf;
-}
-
-static inline void read_coeffs_coarse(ATRAC9Context *s, ATRAC9BlockData *b,
- ATRAC9ChannelData *c, GetBitContext *gb)
-{
- const int max_prec = s->samplerate_idx > 7 ? 1 : 7;
-
- memset(c->q_coeffs_coarse, 0, sizeof(c->q_coeffs_coarse));
-
- for (int i = 0; i < c->q_unit_cnt; i++) {
- int *coeffs = &c->q_coeffs_coarse[at9_q_unit_to_coeff_idx[i]];
- const int bands = at9_q_unit_to_coeff_cnt[i];
- const int prec = c->precision_coarse[i] + 1;
-
- if (prec <= max_prec) {
- const int cb = c->codebookset[i];
- const int cbi = at9_q_unit_to_codebookidx[i];
- const VLC *tab = &coeff_vlc[cb][prec][cbi];
- const HuffmanCodebook *huff = &at9_huffman_coeffs[cb][prec][cbi];
- const int groups = bands >> huff->value_cnt_pow;
-
- for (int j = 0; j < groups; j++) {
- uint16_t val = get_vlc2(gb, tab->table, ATRAC9_COEFF_VLC_BITS, 2);
-
- for (int k = 0; k < huff->value_cnt; k++) {
- coeffs[k] = sign_extend(val, huff->value_bits);
- val >>= huff->value_bits;
- }
-
- coeffs += huff->value_cnt;
- }
- } else {
- for (int j = 0; j < bands; j++)
- coeffs[j] = sign_extend(get_bits(gb, prec), prec);
- }
- }
-}
-
-static inline void read_coeffs_fine(ATRAC9Context *s, ATRAC9BlockData *b,
- ATRAC9ChannelData *c, GetBitContext *gb)
-{
- memset(c->q_coeffs_fine, 0, sizeof(c->q_coeffs_fine));
-
- for (int i = 0; i < c->q_unit_cnt; i++) {
- const int start = at9_q_unit_to_coeff_idx[i + 0];
- const int end = at9_q_unit_to_coeff_idx[i + 1];
- const int len = c->precision_fine[i] + 1;
-
- if (c->precision_fine[i] <= 0)
- continue;
-
- for (int j = start; j < end; j++)
- c->q_coeffs_fine[j] = sign_extend(get_bits(gb, len), len);
- }
-}
-
-static inline void dequantize(ATRAC9Context *s, ATRAC9BlockData *b,
- ATRAC9ChannelData *c)
-{
- memset(c->coeffs, 0, sizeof(c->coeffs));
-
- for (int i = 0; i < c->q_unit_cnt; i++) {
- const int start = at9_q_unit_to_coeff_idx[i + 0];
- const int end = at9_q_unit_to_coeff_idx[i + 1];
-
- const float coarse_c = at9_quant_step_coarse[c->precision_coarse[i]];
- const float fine_c = at9_quant_step_fine[c->precision_fine[i]];
-
- for (int j = start; j < end; j++) {
- const float vc = c->q_coeffs_coarse[j] * coarse_c;
- const float vf = c->q_coeffs_fine[j] * fine_c;
- c->coeffs[j] = vc + vf;
- }
- }
-}
-
-static inline void apply_intensity_stereo(ATRAC9Context *s, ATRAC9BlockData *b,
- const int stereo)
-{
- float *src = b->channel[ b->cpe_base_channel].coeffs;
- float *dst = b->channel[!b->cpe_base_channel].coeffs;
-
- if (!stereo)
- return;
-
- if (b->q_unit_cnt <= b->stereo_q_unit)
- return;
-
- for (int i = b->stereo_q_unit; i < b->q_unit_cnt; i++) {
- const int sign = b->is_signs[i];
- const int start = at9_q_unit_to_coeff_idx[i + 0];
- const int end = at9_q_unit_to_coeff_idx[i + 1];
- for (int j = start; j < end; j++)
- dst[j] = sign*src[j];
- }
-}
-
-static inline void apply_scalefactors(ATRAC9Context *s, ATRAC9BlockData *b,
- const int stereo)
-{
- for (int i = 0; i <= stereo; i++) {
- float *coeffs = b->channel[i].coeffs;
- for (int j = 0; j < b->q_unit_cnt; j++) {
- const int start = at9_q_unit_to_coeff_idx[j + 0];
- const int end = at9_q_unit_to_coeff_idx[j + 1];
- const int scalefactor = b->channel[i].scalefactors[j];
- const float scale = at9_scalefactor_c[scalefactor];
- for (int k = start; k < end; k++)
- coeffs[k] *= scale;
- }
- }
-}
-
-static inline void fill_with_noise(ATRAC9Context *s, ATRAC9ChannelData *c,
- int start, int count)
-{
- float maxval = 0.0f;
- for (int i = 0; i < count; i += 2) {
- double tmp[2];
- av_bmg_get(&s->lfg, tmp);
- c->coeffs[start + i + 0] = tmp[0];
- c->coeffs[start + i + 1] = tmp[1];
- maxval = FFMAX(FFMAX(FFABS(tmp[0]), FFABS(tmp[1])), maxval);
- }
- /* Normalize */
- for (int i = 0; i < count; i++)
- c->coeffs[start + i] /= maxval;
-}
-
-static inline void scale_band_ext_coeffs(ATRAC9ChannelData *c, float sf[6],
- const int s_unit, const int e_unit)
-{
- for (int i = s_unit; i < e_unit; i++) {
- const int start = at9_q_unit_to_coeff_idx[i + 0];
- const int end = at9_q_unit_to_coeff_idx[i + 1];
- for (int j = start; j < end; j++)
- c->coeffs[j] *= sf[i - s_unit];
- }
-}
-
-static inline void apply_band_extension(ATRAC9Context *s, ATRAC9BlockData *b,
- const int stereo)
-{
- const int g_units[4] = { /* A, B, C, total units */
- b->q_unit_cnt,
- at9_tab_band_ext_group[b->q_unit_cnt - 13][0],
- at9_tab_band_ext_group[b->q_unit_cnt - 13][1],
- FFMAX(g_units[2], 22),
- };
-
- const int g_bins[4] = { /* A, B, C, total bins */
- at9_q_unit_to_coeff_idx[g_units[0]],
- at9_q_unit_to_coeff_idx[g_units[1]],
- at9_q_unit_to_coeff_idx[g_units[2]],
- at9_q_unit_to_coeff_idx[g_units[3]],
- };
-
- for (int ch = 0; ch <= stereo; ch++) {
- ATRAC9ChannelData *c = &b->channel[ch];
-
- /* Mirror the spectrum */
- for (int i = 0; i < 3; i++)
- for (int j = 0; j < (g_bins[i + 1] - g_bins[i + 0]); j++)
- c->coeffs[g_bins[i] + j] = c->coeffs[g_bins[i] - j - 1];
-
- switch (c->band_ext) {
- case 0: {
- float sf[6] = { 0.0f };
- const int l = g_units[3] - g_units[0] - 1;
- const int n_start = at9_q_unit_to_coeff_idx[g_units[3] - 1];
- const int n_cnt = at9_q_unit_to_coeff_cnt[g_units[3] - 1];
- switch (at9_tab_band_ext_group[b->q_unit_cnt - 13][2]) {
- case 3:
- sf[0] = at9_band_ext_scales_m0[0][0][c->band_ext_data[0]];
- sf[1] = at9_band_ext_scales_m0[0][1][c->band_ext_data[0]];
- sf[2] = at9_band_ext_scales_m0[0][2][c->band_ext_data[1]];
- sf[3] = at9_band_ext_scales_m0[0][3][c->band_ext_data[2]];
- sf[4] = at9_band_ext_scales_m0[0][4][c->band_ext_data[3]];
- break;
- case 4:
- sf[0] = at9_band_ext_scales_m0[1][0][c->band_ext_data[0]];
- sf[1] = at9_band_ext_scales_m0[1][1][c->band_ext_data[0]];
- sf[2] = at9_band_ext_scales_m0[1][2][c->band_ext_data[1]];
- sf[3] = at9_band_ext_scales_m0[1][3][c->band_ext_data[2]];
- sf[4] = at9_band_ext_scales_m0[1][4][c->band_ext_data[3]];
- break;
- case 5:
- sf[0] = at9_band_ext_scales_m0[2][0][c->band_ext_data[0]];
- sf[1] = at9_band_ext_scales_m0[2][1][c->band_ext_data[1]];
- sf[2] = at9_band_ext_scales_m0[2][2][c->band_ext_data[1]];
- break;
- }
-
- sf[l] = at9_scalefactor_c[c->scalefactors[g_units[0]]];
-
- fill_with_noise(s, c, n_start, n_cnt);
- scale_band_ext_coeffs(c, sf, g_units[0], g_units[3]);
- break;
- }
- case 1: {
- float sf[6];
- for (int i = g_units[0]; i < g_units[3]; i++)
- sf[i - g_units[0]] = at9_scalefactor_c[c->scalefactors[i]];
-
- fill_with_noise(s, c, g_bins[0], g_bins[3] - g_bins[0]);
- scale_band_ext_coeffs(c, sf, g_units[0], g_units[3]);
- break;
- }
- case 2: {
- const float g_sf[2] = {
- at9_band_ext_scales_m2[c->band_ext_data[0]],
- at9_band_ext_scales_m2[c->band_ext_data[1]],
- };
-
- for (int i = 0; i < 2; i++)
- for (int j = g_bins[i + 0]; j < g_bins[i + 1]; j++)
- c->coeffs[j] *= g_sf[i];
- break;
- }
- case 3: {
- float scale = at9_band_ext_scales_m3[c->band_ext_data[0]][0];
- float rate = at9_band_ext_scales_m3[c->band_ext_data[1]][1];
- rate = pow(2, rate);
- for (int i = g_bins[0]; i < g_bins[3]; i++) {
- scale *= rate;
- c->coeffs[i] *= scale;
- }
- break;
- }
- case 4: {
- const float m = at9_band_ext_scales_m4[c->band_ext_data[0]];
- const float g_sf[3] = { 0.7079468f*m, 0.5011902f*m, 0.3548279f*m };
-
- for (int i = 0; i < 3; i++)
- for (int j = g_bins[i + 0]; j < g_bins[i + 1]; j++)
- c->coeffs[j] *= g_sf[i];
- break;
- }
- }
- }
-}
-
-static int atrac9_decode_block(ATRAC9Context *s, GetBitContext *gb,
- ATRAC9BlockData *b, AVFrame *frame,
- int frame_idx, int block_idx)
-{
- const int first_in_pkt = !get_bits1(gb);
- const int reuse_params = get_bits1(gb);
- const int stereo = s->block_config->type[block_idx] == ATRAC9_BLOCK_TYPE_CPE;
-
- if (s->block_config->type[block_idx] == ATRAC9_BLOCK_TYPE_LFE) {
- ATRAC9ChannelData *c = &b->channel[0];
- const int precision = reuse_params ? 8 : 4;
- c->q_unit_cnt = b->q_unit_cnt = 2;
-
- memset(c->scalefactors, 0, sizeof(c->scalefactors));
- memset(c->q_coeffs_fine, 0, sizeof(c->q_coeffs_fine));
- memset(c->q_coeffs_coarse, 0, sizeof(c->q_coeffs_coarse));
-
- for (int i = 0; i < b->q_unit_cnt; i++) {
- c->scalefactors[i] = get_bits(gb, 5);
- c->precision_coarse[i] = precision;
- c->precision_fine[i] = 0;
- }
-
- for (int i = 0; i < c->q_unit_cnt; i++) {
- const int start = at9_q_unit_to_coeff_idx[i + 0];
- const int end = at9_q_unit_to_coeff_idx[i + 1];
- for (int j = start; j < end; j++)
- c->q_coeffs_coarse[j] = get_bits(gb, c->precision_coarse[i] + 1);
- }
-
- dequantize (s, b, c);
- apply_scalefactors(s, b, 0);
-
- goto imdct;
- }
-
- if (first_in_pkt && reuse_params) {
- av_log(s->avctx, AV_LOG_ERROR, "Invalid block flags!\n");
- return AVERROR_INVALIDDATA;
- }
-
- /* Band parameters */
- if (!reuse_params) {
- int stereo_band, ext_band;
- const int min_band_count = s->samplerate_idx > 7 ? 1 : 3;
- b->reuseable = 0;
- b->band_count = get_bits(gb, 4) + min_band_count;
- b->q_unit_cnt = at9_tab_band_q_unit_map[b->band_count];
-
- b->band_ext_q_unit = b->stereo_q_unit = b->q_unit_cnt;
-
- if (b->band_count > at9_tab_sri_max_bands[s->samplerate_idx]) {
- av_log(s->avctx, AV_LOG_ERROR, "Invalid band count %i!\n",
- b->band_count);
- return AVERROR_INVALIDDATA;
- }
-
- if (stereo) {
- stereo_band = get_bits(gb, 4) + min_band_count;
- if (stereo_band > b->band_count) {
- av_log(s->avctx, AV_LOG_ERROR, "Invalid stereo band %i!\n",
- stereo_band);
- return AVERROR_INVALIDDATA;
- }
- b->stereo_q_unit = at9_tab_band_q_unit_map[stereo_band];
- }
-
- b->has_band_ext = get_bits1(gb);
- if (b->has_band_ext) {
- ext_band = get_bits(gb, 4) + min_band_count;
- if (ext_band < b->band_count) {
- av_log(s->avctx, AV_LOG_ERROR, "Invalid extension band %i!\n",
- ext_band);
- return AVERROR_INVALIDDATA;
- }
- b->band_ext_q_unit = at9_tab_band_q_unit_map[ext_band];
- }
- b->reuseable = 1;
- }
- if (!b->reuseable) {
- av_log(s->avctx, AV_LOG_ERROR, "invalid block reused!\n");
- return AVERROR_INVALIDDATA;
- }
-
- /* Calculate bit alloc gradient */
- if (parse_gradient(s, b, gb))
- return AVERROR_INVALIDDATA;
-
- /* IS data */
- b->cpe_base_channel = 0;
- if (stereo) {
- b->cpe_base_channel = get_bits1(gb);
- if (get_bits1(gb)) {
- for (int i = b->stereo_q_unit; i < b->q_unit_cnt; i++)
- b->is_signs[i] = 1 - 2*get_bits1(gb);
- } else {
- for (int i = 0; i < FF_ARRAY_ELEMS(b->is_signs); i++)
- b->is_signs[i] = 1;
- }
- }
-
- /* Band extension */
- if (parse_band_ext(s, b, gb, stereo))
- return AVERROR_INVALIDDATA;
-
- /* Scalefactors */
- for (int i = 0; i <= stereo; i++) {
- ATRAC9ChannelData *c = &b->channel[i];
- c->q_unit_cnt = i == b->cpe_base_channel ? b->q_unit_cnt :
- b->stereo_q_unit;
- if (read_scalefactors(s, b, c, gb, i, first_in_pkt))
- return AVERROR_INVALIDDATA;
-
- calc_precision (s, b, c);
- calc_codebook_idx (s, b, c);
- read_coeffs_coarse(s, b, c, gb);
- read_coeffs_fine (s, b, c, gb);
- dequantize (s, b, c);
- }
-
- b->q_unit_cnt_prev = b->has_band_ext ? b->band_ext_q_unit : b->q_unit_cnt;
-
- apply_intensity_stereo(s, b, stereo);
- apply_scalefactors (s, b, stereo);
-
- if (b->has_band_ext && b->has_band_ext_data)
- apply_band_extension (s, b, stereo);
-
-imdct:
- for (int i = 0; i <= stereo; i++) {
- ATRAC9ChannelData *c = &b->channel[i];
- const int dst_idx = s->block_config->plane_map[block_idx][i];
- const int wsize = 1 << s->frame_log2;
- const ptrdiff_t offset = wsize*frame_idx*sizeof(float);
- float *dst = (float *)(frame->extended_data[dst_idx] + offset);
-
- s->tx_fn(s->tx, s->temp, c->coeffs, sizeof(float));
- s->fdsp->vector_fmul_window(dst, c->prev_win, s->temp,
- s->imdct_win, wsize >> 1);
- memcpy(c->prev_win, s->temp + (wsize >> 1), sizeof(float)*wsize >> 1);
- }
-
- return 0;
-}
-
-static int atrac9_decode_frame(AVCodecContext *avctx, AVFrame *frame,
- int *got_frame_ptr, AVPacket *avpkt)
-{
- int ret;
- GetBitContext gb;
- ATRAC9Context *s = avctx->priv_data;
- const int frames = FFMIN(avpkt->size / s->avg_frame_size, s->frame_count);
-
- frame->nb_samples = (1 << s->frame_log2) * frames;
- ret = ff_get_buffer(avctx, frame, 0);
- if (ret < 0)
- return ret;
-
- init_get_bits8(&gb, avpkt->data, avpkt->size);
-
- for (int i = 0; i < frames; i++) {
- for (int j = 0; j < s->block_config->count; j++) {
- ret = atrac9_decode_block(s, &gb, &s->block[j], frame, i, j);
- if (ret)
- return ret;
- align_get_bits(&gb);
- }
- }
-
- *got_frame_ptr = 1;
-
- return avctx->block_align;
-}
-
-static void atrac9_decode_flush(AVCodecContext *avctx)
-{
- ATRAC9Context *s = avctx->priv_data;
-
- for (int j = 0; j < s->block_config->count; j++) {
- ATRAC9BlockData *b = &s->block[j];
- const int stereo = s->block_config->type[j] == ATRAC9_BLOCK_TYPE_CPE;
- for (int i = 0; i <= stereo; i++) {
- ATRAC9ChannelData *c = &b->channel[i];
- memset(c->prev_win, 0, sizeof(c->prev_win));
- }
- }
-}
-
-static av_cold int atrac9_decode_close(AVCodecContext *avctx)
-{
- ATRAC9Context *s = avctx->priv_data;
-
- av_tx_uninit(&s->tx);
- av_freep(&s->fdsp);
-
- return 0;
-}
-
-static av_cold void atrac9_init_vlc(VLC *vlc, int nb_bits, int nb_codes,
- const uint8_t (**tab)[2],
- unsigned *buf_offset, int offset)
-{
- static VLCElem vlc_buf[24812];
-
- vlc->table = &vlc_buf[*buf_offset];
- vlc->table_allocated = FF_ARRAY_ELEMS(vlc_buf) - *buf_offset;
- ff_init_vlc_from_lengths(vlc, nb_bits, nb_codes,
- &(*tab)[0][1], 2, &(*tab)[0][0], 2, 1,
- offset, INIT_VLC_STATIC_OVERLONG, NULL);
- *buf_offset += vlc->table_size;
- *tab += nb_codes;
-}
-
-static av_cold void atrac9_init_static(void)
-{
- const uint8_t (*tab)[2];
- unsigned offset = 0;
-
- /* Unsigned scalefactor VLCs */
- tab = at9_sfb_a_tab;
- for (int i = 1; i < 7; i++) {
- const HuffmanCodebook *hf = &at9_huffman_sf_unsigned[i];
-
- atrac9_init_vlc(&sf_vlc[0][i], ATRAC9_SF_VLC_BITS,
- hf->size, &tab, &offset, 0);
- }
-
- /* Signed scalefactor VLCs */
- tab = at9_sfb_b_tab;
- for (int i = 2; i < 6; i++) {
- const HuffmanCodebook *hf = &at9_huffman_sf_signed[i];
-
- /* The symbols are signed integers in the range -16..15;
- * the values in the source table are offset by 16 to make
- * them fit into an uint8_t; the -16 reverses this shift. */
- atrac9_init_vlc(&sf_vlc[1][i], ATRAC9_SF_VLC_BITS,
- hf->size, &tab, &offset, -16);
- }
-
- /* Coefficient VLCs */
- tab = at9_coeffs_tab;
- for (int i = 0; i < 2; i++) {
- for (int j = 2; j < 8; j++) {
- for (int k = i; k < 4; k++) {
- const HuffmanCodebook *hf = &at9_huffman_coeffs[i][j][k];
- atrac9_init_vlc(&coeff_vlc[i][j][k], ATRAC9_COEFF_VLC_BITS,
- hf->size, &tab, &offset, 0);
- }
- }
- }
-}
-
-static av_cold int atrac9_decode_init(AVCodecContext *avctx)
-{
- float scale;
- static AVOnce static_table_init = AV_ONCE_INIT;
- GetBitContext gb;
- ATRAC9Context *s = avctx->priv_data;
- int err, version, block_config_idx, superframe_idx, alloc_c_len;
-
- s->avctx = avctx;
-
- av_lfg_init(&s->lfg, 0xFBADF00D);
-
- if (avctx->block_align <= 0) {
- av_log(avctx, AV_LOG_ERROR, "Invalid block align\n");
- return AVERROR_INVALIDDATA;
- }
-
- if (avctx->extradata_size != 12) {
- av_log(avctx, AV_LOG_ERROR, "Invalid extradata length!\n");
- return AVERROR_INVALIDDATA;
- }
-
- version = AV_RL32(avctx->extradata);
- if (version > 2) {
- av_log(avctx, AV_LOG_ERROR, "Unsupported version (%i)!\n", version);
- return AVERROR_INVALIDDATA;
- }
-
- init_get_bits8(&gb, avctx->extradata + 4, avctx->extradata_size);
-
- if (get_bits(&gb, 8) != 0xFE) {
- av_log(avctx, AV_LOG_ERROR, "Incorrect magic byte!\n");
- return AVERROR_INVALIDDATA;
- }
-
- s->samplerate_idx = get_bits(&gb, 4);
- avctx->sample_rate = at9_tab_samplerates[s->samplerate_idx];
-
- block_config_idx = get_bits(&gb, 3);
- if (block_config_idx > 5) {
- av_log(avctx, AV_LOG_ERROR, "Incorrect block config!\n");
- return AVERROR_INVALIDDATA;
- }
- s->block_config = &at9_block_layout[block_config_idx];
-
- av_channel_layout_uninit(&avctx->ch_layout);
- avctx->ch_layout = s->block_config->channel_layout;
- avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
-
- if (get_bits1(&gb)) {
- av_log(avctx, AV_LOG_ERROR, "Incorrect verification bit!\n");
- return AVERROR_INVALIDDATA;
- }
-
- /* Average frame size in bytes */
- s->avg_frame_size = get_bits(&gb, 11) + 1;
-
- superframe_idx = get_bits(&gb, 2);
- if (superframe_idx & 1) {
- av_log(avctx, AV_LOG_ERROR, "Invalid superframe index!\n");
- return AVERROR_INVALIDDATA;
- }
-
- s->frame_count = 1 << superframe_idx;
- s->frame_log2 = at9_tab_sri_frame_log2[s->samplerate_idx];
-
- scale = 1.0f / 32768.0;
- err = av_tx_init(&s->tx, &s->tx_fn, AV_TX_FLOAT_MDCT, 1,
- 1 << s->frame_log2, &scale, 0);
- if (err < 0)
- return err;
-
- s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
- if (!s->fdsp)
- return AVERROR(ENOMEM);
-
- /* iMDCT window */
- for (int i = 0; i < (1 << s->frame_log2); i++) {
- const int len = 1 << s->frame_log2;
- const float sidx = ( i + 0.5f) / len;
- const float eidx = (len - i - 0.5f) / len;
- const float s_c = sinf(sidx*M_PI - M_PI_2)*0.5f + 0.5f;
- const float e_c = sinf(eidx*M_PI - M_PI_2)*0.5f + 0.5f;
- s->imdct_win[i] = s_c / ((s_c * s_c) + (e_c * e_c));
- }
-
- /* Allocation curve */
- alloc_c_len = FF_ARRAY_ELEMS(at9_tab_b_dist);
- for (int i = 1; i <= alloc_c_len; i++)
- for (int j = 0; j < i; j++)
- s->alloc_curve[i - 1][j] = at9_tab_b_dist[(j * alloc_c_len) / i];
-
- ff_thread_once(&static_table_init, atrac9_init_static);
-
- return 0;
-}
-
-const FFCodec ff_atrac9_decoder = {
- .p.name = "atrac9",
- CODEC_LONG_NAME("ATRAC9 (Adaptive TRansform Acoustic Coding 9)"),
- .p.type = AVMEDIA_TYPE_AUDIO,
- .p.id = AV_CODEC_ID_ATRAC9,
- .priv_data_size = sizeof(ATRAC9Context),
- .init = atrac9_decode_init,
- .close = atrac9_decode_close,
- FF_CODEC_DECODE_CB(atrac9_decode_frame),
- .flush = atrac9_decode_flush,
- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
- .p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
-};
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Arena Breakout 32 Bit APK A Thrilling and Challenging Game for Android.md b/spaces/congsaPfin/Manga-OCR/logs/Arena Breakout 32 Bit APK A Thrilling and Challenging Game for Android.md
deleted file mode 100644
index 05170bd6b69f816833e92297bdb344b7deaffb71..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Arena Breakout 32 Bit APK A Thrilling and Challenging Game for Android.md
+++ /dev/null
@@ -1,98 +0,0 @@
-
-
Arena Breakout 32 Bit APK: A New Action Game from Tencent
-
If you are looking for a new and exciting action game to play on your Android device, you might want to check out Arena Breakout 32 Bit APK. This is a game developed by Tencent Games, the same company behind popular titles like PUBG Mobile, Call of Duty Mobile, and Honor of Kings. In this article, we will tell you what Arena Breakout is, how to download and install it, and why you should play it.
Arena Breakout is a game that combines elements of shooting, survival, and exploration. It is set in a post-apocalyptic world where you have to fight against other players and zombies in various modes and maps. You can choose from different characters, weapons, and skills to customize your gameplay style. You can also team up with your friends or join random matches online.
-
A brief introduction to the game and its features
-
The game has four main modes: Solo, Duo, Squad, and Zombie. In Solo mode, you have to survive as long as possible against other players in a shrinking map. In Duo and Squad modes, you can team up with one or three other players respectively and cooperate to eliminate your enemies. In Zombie mode, you have to defend yourself from waves of zombies while collecting resources and completing objectives.
-
The game also has various maps that offer different environments and challenges. Some of the maps are: City Ruins, Desert Oasis, Snowy Mountain, Forest Camp, and Nuclear Plant. Each map has its own terrain, weather, loot, and hazards that you have to deal with.
-
The game also has a rich selection of characters, weapons, and skills that you can unlock and upgrade as you play. There are six characters to choose from: Hunter, Sniper, Medic, Engineer, Assault, and Support. Each character has its own unique abilities and stats that affect your performance in the game. There are also dozens of weapons to choose from, ranging from pistols, rifles, shotguns, SMGs, snipers, rocket launchers, grenades, and more. You can also equip different skills that give you passive or active benefits in the game.
-
How to download and install Arena Breakout 32 Bit APK on your Android device
-
If you want to play Arena Breakout on your Android device, you will need to download and install the 32 Bit APK file. This is because the game is not available on the Google Play Store yet. To do this, you will need to follow these steps:
Once the download is complete, locate the file on your device and tap on it.
-
You might see a warning message that says "Install blocked" or "Unknown sources". This is because you are installing an app from outside the Google Play Store. To proceed, you will need to enable the option to install apps from unknown sources. You can do this by going to your device settings > security > unknown sources > allow.
-
After enabling the option, go back to the APK file and tap on it again.
-
You will see a screen that shows the app permissions and information. Tap on "Install" to start the installation process.
-
Wait for a few seconds until the installation is complete. You will see a message that says "App installed". Tap on "Open" to launch the game.
-
-
Congratulations! You have successfully installed Arena Breakout 32 Bit APK on your Android device. You can now enjoy playing the game.
-
arena breakout android game download
-arena breakout apk for pc
-arena breakout apk latest version
-arena breakout apk mod
-arena breakout apk obb
-arena breakout apk offline
-arena breakout apk update
-arena breakout app store
-arena breakout bluestacks emulator
-arena breakout by tencent games
-arena breakout cheats and hacks
-arena breakout chinese version
-arena breakout english version
-arena breakout free fire mode
-arena breakout gameplay and review
-arena breakout google play store
-arena breakout graphics settings
-arena breakout guide and tips
-arena breakout how to install
-arena breakout how to play
-arena breakout ios game download
-arena breakout mobile game apk
-arena breakout new features and events
-arena breakout online multiplayer mode
-arena breakout realistic shooter game
-arena breakout system requirements
-arena breakout trailer and screenshots
-arena breakout unlimited money and gems
-arena breakout weapons and skins
-download arena breakout 32 bit apk free
-download arena breakout 32 bit apk full version
-download arena breakout 32 bit apk latest update
-download arena breakout 32 bit apk no ads
-download arena breakout 32 bit apk no root
-download arena breakout 32 bit apk no virus
-download arena breakout 32 bit apk on windows 10
-download arena breakout 32 bit apk premium unlocked
-download arena breakout 32 bit apk pro modded
-download arena breakout 32 bit apk safe and secure
-download arena breakout 32 bit apk with data file
-how to download and play arena breakout 32 bit apk on mac os x
-how to download and play arena breakout 32 bit apk on linux ubuntu
-how to fix lag and crash issues in arena breakout 32 bit apk
-how to get free rewards and codes in arena breakout 32 bit apk
-how to join a clan and chat with friends in arena breakout 32 bit apk
-how to rank up and level up fast in arena breakup 32 bit apk
-how to stream and record your gameplay of arena breakup 32 bit apk
-what is the best strategy and loadout for winning in arena breakup 32 bit apk
-what is the difference between the global and the chinese version of the game
-
Tips and tricks for playing Arena Break
Tips and tricks for playing Arena Breakout
-
Now that you have installed Arena Breakout 32 Bit APK on your device, you might want to know some tips and tricks to improve your gameplay and have more fun. Here are some of them:
-
-
Choose your character wisely. Each character has its own strengths and weaknesses, so you should pick the one that suits your playstyle and strategy. For example, if you like to snipe from afar, you might want to choose the Sniper character. If you like to heal and support your teammates, you might want to choose the Medic character.
-
Upgrade your weapons and skills. As you play the game, you will earn coins and gems that you can use to upgrade your weapons and skills. Upgrading your weapons will increase their damage, accuracy, fire rate, and magazine size. Upgrading your skills will enhance their effects and reduce their cooldowns. You can also buy new weapons and skills from the shop using coins and gems.
-
Use cover and movement. The game is fast-paced and action-packed, so you need to be alert and agile at all times. You should use cover to avoid enemy fire and move around the map to find better positions and angles. You should also use the sprint, jump, and slide buttons to dodge bullets and obstacles.
-
Communicate and cooperate with your teammates. The game is more fun and rewarding when you play with your friends or other players online. You can use the voice chat or text chat features to communicate with your teammates and coordinate your actions. You can also use the ping system to mark enemies, locations, items, and objectives.
-
Complete missions and achievements. The game has various missions and achievements that you can complete to earn extra coins, gems, and rewards. Some of the missions are daily, weekly, or seasonal, while some of the achievements are permanent. You can check your progress and claim your rewards from the mission menu.
-
-
Why should you play Arena Breakout?
-
You might be wondering why you should play Arena Breakout 32 Bit APK instead of other action games on the market. Well, here are some reasons why:
-
The advantages of playing Arena Breakout 32 Bit APK
-
One of the main advantages of playing Arena Breakout 32 Bit APK is that it is compatible with most Android devices, even those with low specifications. The game has a 32-bit version that runs smoothly on devices with 2 GB of RAM or less. The game also has a 64-bit version that offers better graphics and performance on devices with 4 GB of RAM or more.
-
Another advantage of playing Arena Breakout 32 Bit APK is that it is free to play and download. You don't need to pay anything to enjoy the game, as it is supported by ads and in-app purchases. You can also play the game offline without an internet connection, as long as you have downloaded the APK file beforehand.
-
A third advantage of playing Arena Breakout 32 Bit APK is that it is constantly updated and improved by Tencent Games, one of the leading game developers in the world. The game receives regular updates that add new features, modes, maps, characters, weapons, skills, missions, achievements, events, and more. The game also has a dedicated customer service team that responds to your feedback and queries.
-
The challenges and rewards of playing Arena Breakout
-
Playing Arena Breakout 32 Bit APK is not only fun but also challenging and rewarding. The game offers a variety of modes and maps that test your skills and strategies in different scenarios. You have to face other players who have different characters, weapons, and skills than you. You also have to deal with zombies who are fast, strong, and relentless.
-
The game also offers a lot of rewards for playing well and completing tasks. You can earn coins It has higher graphics and performance than the 32-bit version. You can choose the version that suits your device best.
-
Is the game safe and secure to download and play?
-
Yes, the game is safe and secure to download and play. The game is developed by Tencent Games, a reputable and trustworthy game developer. The game is also scanned and verified by APKPure, a reliable and trusted source for APK files. The game does not contain any viruses, malware, or spyware that can harm your device or data.
-
How can I play the game with my friends?
-
You can play the game with your friends by inviting them to join your team or by joining their team. You can do this by tapping on the "Team" button on the main menu and then choosing the "Invite" or "Join" option. You can also use the voice chat or text chat features to communicate with your friends while playing.
-
How can I get more coins and gems in the game?
-
You can get more coins and gems in the game by playing well and completing tasks. You can earn coins and gems by killing enemies, surviving longer, winning matches, completing missions, achieving achievements, participating in events, and opening crates. You can also buy coins and gems from the shop using real money.
-
How can I contact the customer service team of the game?
-
You can contact the customer service team of the game by tapping on the "Settings" button on the main menu and then choosing the "Customer Service" option. You can then send your feedback, queries, or complaints to the team via email or online chat. The team will respond to you as soon as possible.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Experience the Thrill of Cars Fast as Lightning on Your Android Device.md b/spaces/congsaPfin/Manga-OCR/logs/Experience the Thrill of Cars Fast as Lightning on Your Android Device.md
deleted file mode 100644
index 399febcfb180084d364f70192b033b45794dee16..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Experience the Thrill of Cars Fast as Lightning on Your Android Device.md
+++ /dev/null
@@ -1,110 +0,0 @@
-
-
Download Cars Fast as Lightning: A Fun and Exciting Racing Game for Mobile
-
Do you love cars, speed, and adventure? If so, you will love Cars Fast as Lightning, a racing game for mobile based on the popular Cars movie franchise. In this game, you can join Lightning McQueen, Mater, and other characters from Radiator Springs in a racing extravaganza that will keep you entertained for hours. You can customize your car, race against other players online or offline, build your own Radiator Springs in 3D, and watch fully animated cutscenes with voice acting by Owen Wilson and other stars. Whether you are a fan of the movies or not, you will find something to enjoy in this game. Here is everything you need to know about how to download, install, play, and win in Cars Fast as Lightning.
How to Download and Install Cars Fast as Lightning on Your Android Device
-
Downloading and installing Cars Fast as Lightning on your Android device is very easy. Just follow these simple steps:
-
-
Go to Google Play Store on your device and search for "Cars Fast as Lightning" or use this link.
-
Tap on the Install button and wait for the download to finish. The game is about 800 MB in size, so make sure you have enough space on your device.
-
Open the game and enjoy the racing action. You may need to grant some permissions to the game, such as access to your storage, location, contacts, etc.
-
-
How to Play Cars Fast as Lightning and Win Races
-
Playing Cars Fast as Lightning is very fun and easy. Here are some steps to help you get started:
-
-
Choose your favorite character from the Cars movie franchise. You can start with Lightning McQueen or Mater, but you can also unlock other characters such as Francesco Bernoulli, Sally Carrera, Doc Hudson, etc. Each character has their own personality, voice, and stats.
-
Customize your car with different paint jobs and upgrades. You can change the color, design, stickers, wheels, spoilers, etc. of your car. You can also upgrade your engine, tires, suspension, etc. to improve your speed, acceleration, handling, etc.
-
Race against other players online or offline in various modes and tracks. You can race in the Story mode, where you follow the plot of the movies and face different challenges and opponents. You can also race in the Grand Prix mode, where you compete in tournaments and cups. You can also race in the Time Trial mode, where you try to beat your own or other players' records. You can choose from different tracks, such as Radiator Springs, Tokyo, Porto Corsa, etc.
-
Use the TouchDrive control system or other options to steer, drift, and boost your car. The TouchDrive system allows you to control your car by tapping on the left or right side of the screen. You can also use the tilt or swipe options if you prefer. You can drift by tapping and holding on the screen while turning. You can boost by tapping on the lightning bolt icon when it is full.
-
Collect lightning bolts and coins to unlock new cars and features. Lightning bolts are used to fill up your boost meter and also to unlock new characters and tracks. Coins are used to buy paint jobs and upgrades for your car. You can earn both by racing, completing quests, watching ads, etc.
-
-
Tips and Tricks to Improve Your Performance in Cars Fast as Lightning
-
If you want to become a better racer in Cars Fast as Lightning, here are some tips and tricks that you should know:
-
-
Watch the cutscenes to learn more about the story and characters. The cutscenes are fully animated and voiced by the original actors from the movies. They are not only entertaining but also informative. They will give you hints and tips on how to win races and unlock new features.
-
Use nitro wisely and save it for the right moments. Nitro is a powerful boost that can help you gain speed and overtake your opponents. However, it is also limited and takes time to recharge. Therefore, you should use it strategically and not waste it. For example, you can use it at the start of the race, at the end of the race, or when you need to catch up with someone.
-
Follow the optimal route and avoid obstacles and traps. Each track has its own layout and features that can help or hinder your progress. You should pay attention to the signs and arrows that show you the best way to go. You should also avoid hitting obstacles such as barrels, cones, rocks, etc. that can slow you down or damage your car. You should also watch out for traps such as oil spills, water puddles, ramps, etc. that can make you lose control or fly off the track.
-
Complete quests and achievements to earn extra rewards. Quests are tasks that you can complete while racing, such as drifting for a certain distance, hitting a certain number of opponents, finishing a race in a certain time, etc. Achievements are milestones that you can reach by playing the game, such as winning a certain number of races, unlocking a certain number of characters, building a certain number of buildings in Radiator Springs, etc. Both quests and achievements will reward you with lightning bolts and coins that you can use to unlock more content.
-
Build your own Radiator Springs in 3D and invite your friends to visit. Besides racing, you can also have fun building your own version of Radiator Springs in 3D. You can place buildings such as Flo's V8 Cafe, Luigi's Casa Della Tires, Ramone's House of Body Art, etc. You can also decorate your town with trees, flowers, statues, etc. You can invite your friends to visit your town and see what you have created. You can also visit their towns and see what they have done.
-
-
Alternatives to Cars Fast as Lightning for More Racing Fun
-
If you love racing games but want to try something different from Cars Fast as Lightning, here are some alternatives that you may like:
-
download cars fast as lightning game for pc
-download cars fast as lightning mod apk
-download cars fast as lightning android
-download cars fast as lightning apk + data
-download cars fast as lightning hack
-download cars fast as lightning offline
-download cars fast as lightning latest version
-download cars fast as lightning for windows 10
-download cars fast as lightning unlimited money
-download cars fast as lightning gameloft
-download cars fast as lightning cheats
-download cars fast as lightning online
-download cars fast as lightning free
-download cars fast as lightning full version
-download cars fast as lightning update
-download cars fast as lightning bluestacks
-download cars fast as lightning apkcombo
-download cars fast as lightning racing game
-download cars fast as lightning disney pixar
-download cars fast as lightning 3d
-download cars fast as lightning play store
-download cars fast as lightning app
-download cars fast as lightning ios
-download cars fast as lightning obb file
-download cars fast as lightning revdl
-download cars fast as lightning mac
-download cars fast as lightning apk pure
-download cars fast as lightning bestgames.com
-download cars fast as lightning emulator
-download cars fast as lightning for laptop
-download cars fast as lightning apk mirror
-download cars fast as lightning no ads
-download cars fast as lightning newscientist.com
-download cars fast as lightning the-sun.com
-download cars fast as lightning yahoo.com
-download cars fast as lightning wikihow.com
-download cars fast as lightning youtube.com
-download cars fast as lightning facebook.com
-download cars fast as lightning twitter.com
-download cars fast as lightning instagram.com
-download cars fast as lightning reddit.com
-download cars fast as lightning quora.com
-download cars fast as lightning pinterest.com
-download cars fast as lightning medium.com
-download cars fast as lightning wordpress.com
-download cars fast as lightning tumblr.com
-download cars fast as lightning linkedin.com
-download cars fast as lightning slideshare.net
-
-
CSR 2 Racing: This is a realistic drag racing game with stunning graphics and customization options. You can collect over 200 cars from top brands such as Ferrari, Lamborghini, McLaren, Bugatti, etc. You can also customize your car with paint, decals, wheels, etc. You can race against other players online or offline in various modes and events. You can also join a crew and compete for prizes and glory.
-
Hot Lap League: Racing Mania!: This is a challenging racing simulation with no driving assists and over 150 tracks. You can choose from different cars, such as sports cars, muscle cars, supercars, etc. You can also tune your car with engine, suspension, brakes, etc. You can race against the AI or other players online or offline in various modes and seasons. You can also create your own tracks and share them with the community.
-
NASCAR Heat Mobile: This is a professional racing game where you can compete in NASCAR Cups and build your own fan zone. You can choose from over 40 official NASCAR drivers and teams, such as Kyle Busch, Chase Elliott, Joey Logano, etc. You can also customize your car with paint schemes, sponsors, numbers, etc. You can race on 23 licensed NASCAR tracks, such as Daytona, Talladega, Bristol, etc. You can also build your own fan zone with buildings, attractions, shops, etc. and earn money and fame.
-
-
Conclusion
-
Cars Fast as Lightning is a great racing game for mobile that offers fun, excitement, and customization. You can join Lightning McQueen, Mater, and other characters from the Cars movie franchise in a racing extravaganza that will keep you entertained for hours. You can customize your car, race against other players online or offline, build your own Radiator Springs in 3D, and watch fully animated cutscenes with voice acting by Owen Wilson and other stars. Whether you are a fan of the movies or not, you will find something to enjoy in this game.
-
If you are looking for a fun and exciting racing game for mobile, you should download Cars Fast as Lightning today and join the racing action with Lightning McQueen and Mater. You will not regret it!
-
Do you have any questions or comments about the game? Feel free to share them in the comments section below. We would love to hear from you!
-
FAQs
-
Here are some frequently asked questions about Cars Fast as Lightning:
-
-
Is Cars Fast as Lightning free to play?
-
Yes, Cars Fast as Lightning is free to play. However, it also contains in-app purchases that allow you to buy lightning bolts and coins with real money. You can disable this feature in your device settings if you wish.
-
Is Cars Fast as Lightning compatible with my device?
-
Cars Fast as Lightning requires Android 4.0 or higher to run. It also requires at least 800 MB of free space on your device. You can check the compatibility of your device on the Google Play Store page of the game.
-
How can I contact the developers of Cars Fast as Lightning?
-
If you have any issues or feedback about the game, you can contact the developers of Cars Fast as Lightning by using the following methods:
You can play Cars Fast as Lightning offline by turning off your internet connection before launching the game. However, some features of the game may not be available offline, such as online races, leaderboards, achievements, etc.
-
How can I backup my progress in Cars Fast as Lightning?
-
You can backup your progress in Cars Fast as Lightning by connecting your game to your Google Play Games account. This will allow you to sync your progress across multiple devices and restore it if you lose or change your device.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Download APKs from Trusted Sources and Avoid Malware.md b/spaces/congsaPfin/Manga-OCR/logs/How to Download APKs from Trusted Sources and Avoid Malware.md
deleted file mode 100644
index f6c1e593490f563ccdae49489d5f34f8e9b01a80..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/How to Download APKs from Trusted Sources and Avoid Malware.md
+++ /dev/null
@@ -1,121 +0,0 @@
-
-
How to Download and Install APKs on Android Devices
-
If you are an Android user, you might have heard of APK files and wondered how to download and install them on your device. APK files are a way of installing apps that are not available on the Google Play Store, or getting early access to new features or updates. In this article, we will explain what APK files are, how to download them from safe and secure sites, how to install them on your Android device, and what are the advantages and disadvantages of using them.
An APK file is a package file that contains all the elements that an app needs to install correctly on your device. It is similar to the EXE file format used for Windows applications. An APK file can include code, resources, assets, certificates, and manifest information.
-
APK files are used to install apps that are not available on Google Play Store
-
Sometimes, you might want to install an app that is not available in your region or country due to geo-restrictions or licensing issues. For example, you might want to use a VPN app that is banned in your location, or a streaming app that is not supported in your area. In such cases, you can use an APK file to sideload the app on your device.
-
APK files can also offer early access to new features or updates
-
Another reason why you might want to use an APK file is to get access to beta versions or updates of apps before they are officially released on the Google Play Store. For example, you might want to try out a new feature or bug fix that is not yet available for everyone. Some app developers offer APK files for testing purposes or for users who want to join their beta programs.
-
How to download APKs from safe and secure sites?
-
Use reputable sources like APKMirror, APKPure, Uptodown, Aptoide, etc.
-
The first step to download an APK file is to find a reliable source that offers verified and malware-free files. There are many websites that claim to offer APK files for download, but some of them might be malicious or fraudulent. Therefore, you should always use trusted sources like APKMirror, APKPure, Uptodown, Aptoide, etc. These sites have strict policies and procedures to ensure the quality and safety of the APK files they offer. You can also check the ratings, reviews, and comments of other users to get an idea of the app's performance and functionality.
-
Check the app permissions, ratings, reviews, and signatures before downloading
-
Before you download an APK file, you should always check the app permissions, ratings, reviews, and signatures to make sure that the app is legitimate and does not pose any security risks. App permissions are the requests that an app makes to access certain features or data on your device, such as your camera, microphone, contacts, location, etc. You should only grant permissions that are relevant and necessary for the app's functionality. Ratings and reviews are the feedback that other users have given to the app based on their experience and satisfaction. You should look for apps that have high ratings and positive reviews from reputable sources. Signatures are the digital certificates that verify the identity and authenticity of the app developer. You should only download APK files that have valid signatures from trusted developers.
-
Scan the APK files for viruses or malware using anti-malware software
-
Even if you download an APK file from a reputable source, you should still scan it for viruses or malware using anti-malware software before installing it on your device. Viruses or malware are malicious programs that can harm your device or steal your personal information. Anti-malware software are applications that can detect and remove viruses or malware from your device. You can use anti-malware software like Malwarebytes, Avast, Kaspersky, etc. to scan the APK files for any potential threats.
-
download apks for android
-download apks from google play
-download apks on pc
-download apks without play store
-download apks for firestick
-download apks for ios
-download apks for windows 10
-download apks free online
-download apks to sd card
-download apks on chromebook
-download apks safely and securely
-download apks of paid apps
-download apks with obb files
-download apks of games
-download apks modded and hacked
-download apks directly from website
-download apks using qr code
-download apks in bulk
-download apks faster and easier
-download apks latest version
-download apks not available in your country
-download apks of old versions
-download apks of apps removed from play store
-download apks of premium apps for free
-download apks of cracked apps
-download apks of apps that require root
-download apks of apps that are compatible with your device
-download apks of apps that are ad-free
-download apks of apps that are virus-free
-download apks of apps that are updated regularly
-download apks of apps that have good reviews and ratings
-download apks of apps that have more features and functions
-download apks of apps that have no in-app purchases
-download apks of apps that have offline mode
-download apks of apps that have dark mode
-download apks of apps that have widgets and shortcuts
-download apks of apps that have backup and restore options
-download apks of apps that have sync and cloud support
-download apks of apps that have multi-language support
-download apks of apps that have customization and personalization options
-how to download apks on android tv box
-how to download apks on macbook pro
-how to download apks on samsung smart tv
-how to download apks on kindle fire hd
-how to download apks on bluestacks
-how to download apks on nox player
-how to download apks on iphone
-how to download apks on ipad
-how to download apks on linux
-how to download apk files from apk mirror
-
How to install APKs on your Android device?
-
Enable the option to install unknown apps from your device settings
-
By default, Android devices do not allow you to install apps from unknown sources, which are sources other than the Google Play Store. This is a security measure to prevent you from installing harmful or unauthorized apps on your device. However, if you want to install an APK file that you have downloaded from a safe and secure site, you need to enable the option to install unknown apps from your device settings. To do this, follow these steps:
-
-
Go to your device settings and tap on Apps & notifications.
-
Tap on Advanced and then on Special app access.
-
Tap on Install unknown apps and select the app that you want to use to install the APK file, such as your browser or file manager.
-
Toggle on the Allow from this source option.
-
-
Note: The steps may vary depending on your device model and Android version.
-
Use a file manager app to locate and open the APK file
-
Once you have enabled the option to install unknown apps, you need to use a file manager app to locate and open the APK file that you have downloaded. A file manager app is an app that allows you to browse and manage the files and folders on your device. You can use a file manager app like Files by Google, ES File Explorer, Solid Explorer, etc. To install an APK file using a file manager app, follow these steps:
-
-
Open the file manager app and navigate to the folder where you have saved the APK file.
-
Tap on the APK file to open it.
-
If prompted, tap on Settings and then on Allow from this source.
-
Tap on Install and wait for the installation process to complete.
-
Tap on Open to launch the app or Done to exit.
-
-
Follow the installation prompts and grant the required permissions
-
When you install an APK file on your device, you might see some installation prompts that ask you to confirm or agree to certain terms or conditions. For example, you might see a prompt that asks you to verify your age, accept the privacy policy, or agree to the terms of service of the app. You should read these prompts carefully and only proceed if you are comfortable with them. You might also see some prompts that ask you to grant certain permissions to the app, such as access to your camera, microphone, contacts, location, etc. You should only grant permissions that are relevant and necessary for the app's functionality. You can always change or revoke these permissions later from your device settings.
-
What are the advantages and disadvantages of using APKs?
-
Advantages include getting access to apps that are not available in your region , getting beta versions or updates faster, and saving storage space
-
One of the main advantages of using APK files is that they allow you to get access to apps that are not available in your region or country due to geo-restrictions or licensing issues. For example, you can use a VPN app that is banned in your location, or a streaming app that is not supported in your area. Another advantage of using APK files is that they allow you to get access to beta versions or updates of apps before they are officially released on the Google Play Store. For example, you can try out a new feature or bug fix that is not yet available for everyone. Some app developers offer APK files for testing purposes or for users who want to join their beta programs. A third advantage of using APK files is that they can help you save storage space on your device. Some APK files are smaller than the app files on the Google Play Store, as they do not include unnecessary data or resources. This can free up some space on your device and improve its performance.
-
Disadvantages include risking your device security, violating app developers' rights, and losing app support or compatibility
-
One of the main disadvantages of using APK files is that they can pose a risk to your device security. Some APK files might contain viruses or malware that can harm your device or steal your personal information. Even if you download an APK file from a reputable source, you should still scan it for viruses or malware using anti-malware software before installing it on your device. Another disadvantage of using APK files is that they can violate the app developers' rights and intellectual property. Some APK files might be pirated or cracked versions of paid apps, which can deprive the app developers of their revenue and recognition. This can also affect the quality and innovation of the apps, as the developers might lose motivation or resources to improve their products. A third disadvantage of using APK files is that they can cause app support or compatibility issues. Some APK files might not work properly on your device, as they might not be compatible with your device model, Android version, or other apps. This can result in crashes, errors, or glitches that can affect your user experience. Moreover, some APK files might not receive regular updates or bug fixes from the app developers, which can make them outdated or vulnerable.
-
Conclusion
-
In conclusion, APK files are a way of installing apps that are not available on the Google Play Store, or getting early access to new features or updates. However, they also come with some risks and drawbacks that you should be aware of before using them. To download and install APK files safely and responsibly, you should follow these tips:
-
-
Use reputable sources like APKMirror, APKPure, Uptodown, Aptoide, etc.
-
Check the app permissions, ratings, reviews, and signatures before downloading.
-
Scan the APK files for viruses or malware using anti-malware software.
-
Enable the option to install unknown apps from your device settings.
-
Use a file manager app to locate and open the APK file.
-
Follow the installation prompts and grant the required permissions.
-
Do not download or install pirated or cracked versions of paid apps.
-
Do not download or install apps that are illegal or unethical in your region or country.
-
Do not download or install apps that you do not trust or need.
-
-
We hope this article has helped you understand how to download and install APKs on Android devices. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
-
Frequently Asked Questions
-
What is an APK file?
-
An APK file is a package file that contains all the elements that an app needs to install correctly on an Android device. It is similar to the EXE file format used for Windows applications.
-
Why do I need an APK file?
-
You might need an APK file to install an app that is not available on the Google Play Store, or to get early access to a beta version or update of an app.
-
How do I download an APK file?
-
You can download an APK file from a reputable source like APKMirror, APKPure, Uptodown, Aptoide, etc. You should also check the app permissions, ratings, reviews, and signatures before downloading.
-
How do I install an APK file?
-
You need to enable the option to install unknown apps from your device settings, use a file manager app to locate and open the APK file, and follow the installation prompts and grant the required permissions.
-
What are the risks of using an APK file ?
-
Some of the risks of using an APK file are risking your device security, violating app developers' rights, and losing app support or compatibility. You should always scan the APK files for viruses or malware, avoid pirated or cracked versions of paid apps, and only download apps that are legal and ethical.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Instagram APK 3.2.0 - Enjoy New Features and Improvements on Your Android.md b/spaces/congsaPfin/Manga-OCR/logs/Instagram APK 3.2.0 - Enjoy New Features and Improvements on Your Android.md
deleted file mode 100644
index 5c9a91cefe2f3322c316e3fad8fbf6839a721e3d..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Instagram APK 3.2.0 - Enjoy New Features and Improvements on Your Android.md
+++ /dev/null
@@ -1,134 +0,0 @@
-
-
Instagram APK 3.2.0: What You Need to Know
-
Instagram is one of the most popular social media platforms in the world, with over 1 billion monthly active users. It allows you to create and share your photos, stories, reels and videos with the friends and followers you care about. You can also connect with people, explore the community, and discover new content that matches your interests.
-
But what if you want to access the latest features and updates of Instagram before they are officially released on the Google Play Store? Or what if you want to customize your app settings and preferences according to your device specifications? Or what if you want to have more control over your security and privacy settings?
In that case, you might want to download and install the Instagram APK 3.2.0 file on your Android device. In this article, we will explain what is an Instagram APK file, why you might need it, how to download and install it, how to use it, and what are the benefits and drawbacks of using it.
-
What is Instagram APK?
-
What is an APK file?
-
An APK file is an Android Package Kit file that contains all the files and code needed to run an Android app on your device. It is similar to an EXE file for Windows or a DMG file for Mac. You can download an APK file from various sources online, such as official websites, third-party app stores, or file-sharing platforms.
-
Why do you need Instagram APK?
-
You might need an Instagram APK file for various reasons, such as:
-
-
You want to access the latest features and updates of Instagram before they are officially released on the Google Play Store.
-
You want to customize your app settings and preferences according to your device specifications.
-
You want to have more control over your security and privacy settings.
-
You want to bypass the regional restrictions or censorship imposed by your country or network provider.
-
You want to backup or restore your app data in case of accidental deletion or device loss.
-
-
How to download and install Instagram APK 3.2.0
-
Download from Google Play Store
-
The easiest way to download and install Instagram APK 3.2.0 is from the Google Play Store. You can simply follow these steps:
-
-
Open the Google Play Store app on your device.
-
Search for "Instagram" in the search bar.
-
Select the app from the list of results.
-
Tap on "Update" if there is a new version available.
-
Wait for the download and installation process to complete.
-
Launch the app and enjoy!
-
-
Download from APKCombo
-
If you want to download and install Instagram APK 3.2.0 from a third-party app store, we recommend using APKCombo. It is a reliable and safe source that offers various versions of Instagram APK files for different devices and Android versions. You can follow these steps:
-
instagram apk 3.2.0 download
-instagram apk 3.2.0 free
-instagram apk 3.2.0 latest version
-instagram apk 3.2.0 mod
-instagram apk 3.2.0 old version
-instagram apk 3.2.0 for android
-instagram apk 3.2.0 update
-instagram apk 3.2.0 beta
-instagram apk 3.2.0 cracked
-instagram apk 3.2.0 premium
-instagram apk 3.2.0 offline
-instagram apk 3.2.0 pro
-instagram apk 3.2.0 hack
-instagram apk 3.2.0 dark mode
-instagram apk 3.2.0 no ads
-instagram apk 3.2.0 plus
-instagram apk 3.2.0 lite
-instagram apk 3.2.0 original
-instagram apk 3.2.0 new features
-instagram apk 3.2.0 review
-instagram apk 3.2.0 direct link
-instagram apk 3.2.0 mirror
-instagram apk 3.2.0 alternative
-instagram apk 3.2.0 safe
-instagram apk 3.2.0 bug fixes
-instagram apk 3.2.0 full version
-instagram apk 3.2.0 unlocked
-instagram apk 3.2.0 unlimited likes
-instagram apk 3.2.0 video downloader
-instagram apk 3.2.0 story saver
-instagram apk 3.2.0 repost app
-instagram apk 3.2.0 analytics tool
-instagram apk 3.2.0 photo editor
-instagram apk 3.2.0 filters pack
-instagram apk 3.2.0 stickers pack
-instagram apk 3.2.0 fonts pack
-instagram apk 3.2.0 layout app
-instagram apk 3.2
If you want to download and install Instagram APK 3.2.0 from other sources, you need to be careful and cautious. Some sources may contain malware, viruses, or fake files that can harm your device or compromise your data. You should always check the reviews, ratings, and permissions of the app before downloading it. You can follow these general steps:
-
-
Find a reputable source that offers Instagram APK 3.2.0 file.
-
Download the file to your device.
-
Open the file manager and locate the file.
-
Tap on the file and select "Install".
-
Allow the installation of unknown sources if prompted.
-
Launch the app and enjoy!
-
-
How to use Instagram APK 3.2.0
-
Create and share your photos, stories, reels and videos
-
Instagram APK 3.2.0 allows you to create and share your photos, stories, reels and videos with the friends and followers you care about. You can use various filters, stickers, effects, and editing tools to make your content more attractive and engaging. You can also add hashtags, captions, locations, and tags to your posts to increase your visibility and reach.
-
Connect with friends and followers
-
Instagram APK 3.2.0 allows you to connect with friends and followers through direct messages, comments, likes, and live streams. You can also join groups, chats, and rooms to chat with people who share your interests. You can also follow your favorite celebrities, influencers, brands, and creators to see their updates and stories.
-
Explore the community and discover new content
-
Instagram APK 3.2.0 allows you to explore the community and discover new content that matches your interests. You can browse through different categories, such as music, sports, fashion, beauty, art, travel, food, and more. You can also use the search function to find specific topics, accounts, hashtags, or locations. You can also check out the Explore tab to see what's trending and popular on Instagram.
-
Benefits and drawbacks of Instagram APK 3.2.0
-
Benefits
-
Latest features and updates
-
One of the main benefits of using Instagram APK 3.2.0 is that you can access the latest features and updates of Instagram before they are officially released on the Google Play Store. This way, you can enjoy the new functions and improvements of the app without waiting for the official update.
-
Customization and compatibility
-
Another benefit of using Instagram APK 3.2.0 is that you can customize your app settings and preferences according to your device specifications. You can adjust the resolution, quality, speed, language, theme, notifications, and other options of the app to suit your needs and preferences. You can also make sure that the app is compatible with your device model and Android version.
-
Security and privacy
-
A third benefit of using Instagram APK 3.2.0 is that you can have more control over your security and privacy settings. You can choose who can see your posts, stories, reels and videos, who can send you messages or requests, who can comment or like your content, who can tag you or mention you in their posts, who can see your activity status or online presence, who can access your location or contacts information, and more.
-
Drawbacks
-
Risk of malware and viruses
-
One of the main drawbacks of using Instagram APK 3.2.0 is that you may expose your device or data to malware or viruses if you download it from untrusted or unreliable sources. Some sources may contain malicious code or fake files that can infect your device or steal your data. You should always scan the file before installing it and use a reputable antivirus software to protect your device.
-
Potential legal issues
-
Another drawback of using Instagram APK 3.2.0 is that you may face potential legal issues if you violate the terms of service or policies of Instagram or Google Play Store. You may infringe the intellectual property rights or privacy rights of Instagram or other parties. You may also violate the regional laws or regulations of your country or network provider. You should always read and agree to the terms of service and policies of Instagram and Google Play Store before using the app.
-
Possible errors and bugs
-
A third drawback of using Instagram APK 3.2.0 is that you may encounter possible errors and bugs while using the app. Some features or functions may not work properly or as expected. Some files or data may be corrupted or lost. Some updates or patches may not be compatible with your device or Android version. You should always backup your app data and report any issues or feedback to the developers of Instagram.
-
Conclusion
-
Instagram APK 3.2.0 is a file that allows you to download and install the latest version of Instagram on your Android device. It has some benefits, such as accessing the latest features and updates, customizing your app settings and preferences, and having more control over your security and privacy settings. However, it also has some drawbacks, such as risking malware and viruses, facing potential legal issues, and encountering possible errors and bugs. You should weigh the pros and cons of using Instagram APK 3.2.0 before deciding to use it.
-
FAQs
-
Here are some frequently asked questions about Instagram APK 3.2.0:
-
-
Is Instagram APK 3.2.0 safe to use?
-
It depends on where you download it from. If you download it from a trusted and reliable source, such as Google Play Store or APKCombo, it is safe to use. However, if you download it from an untrusted or unreliable source, it may contain malware or viruses that can harm your device or data.
-
Is Instagram APK 3.2.0 legal to use?
-
It depends on the terms of service and policies of Instagram and Google Play Store, as well as the regional laws and regulations of your country or network provider. You should always read and agree to the terms of service and policies of Instagram and Google Play Store before using the app. You should also check the regional laws and regulations of your country or network provider before using the app.
-
Is Instagram APK 3.2.0 free to use?
-
Yes, Instagram APK 3.2.0 is free to use. However, you may need to pay for some in-app purchases or subscriptions to access some premium features or content on Instagram.
-
How do I update Instagram APK 3.2.0?
-
You can update Instagram APK 3.2.0 by downloading and installing the latest version of the file from the same source you downloaded it from before. Alternatively, you can update it from the Google Play Store if it is available there.
-
How do I uninstall Instagram APK 3.2.0?
-
You can uninstall Instagram APK 3.2.0 by following these steps:
-
-
Go to your device settings.
-
Select "Apps" or "Applications".
-
Find and select "Instagram".
-
Tap on "Uninstall" and confirm.
-
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Play Higgs Domino with One Piece Characters and Music APK.md b/spaces/congsaPfin/Manga-OCR/logs/Play Higgs Domino with One Piece Characters and Music APK.md
deleted file mode 100644
index d6999939ccdeca08e9878a41ac77106ba8db503b..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Play Higgs Domino with One Piece Characters and Music APK.md
+++ /dev/null
@@ -1,101 +0,0 @@
-
-
Download Higgs Domino Tema One Piece: How to Enjoy the Best of Both Worlds
-
If you are a fan of domino games and One Piece, you might be interested in downloading Higgs Domino Tema One Piece, a modded version of the popular online domino game that features the characters and settings from the famous manga and anime series. In this article, we will tell you what is Higgs Domino, what is One Piece, how to download and install the modded APK file, and some FAQs that you might have.
-
What is Higgs Domino?
-
A popular online domino game with various modes and features
-
Higgs Domino is a multiplayer game that features a variety of different game modes. The game modes include classic dominoes, draw dominoes, and block dominoes. The game also has a variety of different table sizes, including 2-player, 3-player, and 4-player tables.
A local Indonesian flavor with social elements and VIP privileges
-
Higgs Domino is primarily known for its domino games with a unique local Indonesian flavor. The app provides a multiplayer mode where you can play with friends or other players from around the world. Higgs Domino often incorporates social elements, allowing players to chat with each other, send gifts, and build connections within the gaming community. The game also offers VIP features that let you enjoy exclusive benefits such as special frames, effects, emoticons, and more.
-
What is One Piece?
-
A long-running manga and anime series about pirates and adventures
-
One Piece is a Japanese manga series written and illustrated by Eiichiro Oda. It has been serialized in Shueisha's shōnen manga magazine Weekly Shōnen Jump since July 1997, with its individual chapters compiled into 105 tankōbon volumes as of March 2023. The story follows the adventures of Monkey D. Luffy, a boy whose body gained the properties of rubber after unintentionally eating a Devil Fruit. With his crew of pirates, named the Straw Hat Pirates, Luffy explores the Grand Line in search of the world's ultimate treasure known as "One Piece" in order to become the next Pirate King.
-
A worldwide phenomenon with millions of fans and merchandise
-
One Piece has received praise for its storytelling, world-building, art, characterization, and humor. It has received many awards and is ranked by critics, reviewers, and readers as one of the best manga of all time. As of August 2022, it had over 516.6 million copies in circulation in 61 countries and regions worldwide, making it the best-selling manga series in history, and the best-selling comic series printed in book volume format. One Piece has also been adapted into an anime series, several movies, video games, and merchandise. The anime series has been broadcast in more than 80 countries and regions worldwide, and has amassed a huge fan base.
-
How to download Higgs Domino Tema One Piece?
-
The benefits of using a modded version of the game with One Piece theme
-
If you love both Higgs Domino and One Piece, you might want to try Higgs Domino Tema One Piece, a modded version of the game that features the characters and settings from One Piece. By using this modded version, you can enjoy the following benefits:
-
-
You can play with your favorite One Piece characters, such as Luffy, Zoro, Nami, Sanji, Chopper, and more. Each character has their own unique skills and abilities that can help you win the game.
-
You can explore the different locations from One Piece, such as the East Blue, the Grand Line, the New World, and more. Each location has its own background music and sound effects that match the theme of the game.
-
You can customize your game with various One Piece items, such as hats, costumes, weapons, and more. You can also collect coins and rewards that are based on One Piece items, such as Devil Fruits, treasure chests, and more.
-
You can experience a more immersive and fun gameplay with the One Piece theme. You can enjoy the graphics, animations, and effects that are inspired by the manga and anime series. You can also interact with other players who share your passion for One Piece.
-
-
The steps to download and install the modded APK file
-
To download and install Higgs Domino Tema One Piece, you need to follow these steps:
-
-
Go to this link: [Higgs Domino Tema One Piece] and click on the download button. This will start downloading the modded APK file to your device.
-
Once the download is complete, go to your device's settings and enable the installation of apps from unknown sources. This will allow you to install the modded APK file without any problems.
-
Locate the downloaded modded APK file on your device and tap on it to start the installation process. Follow the instructions on the screen to complete the installation.
-
After the installation is done, you can launch the game and enjoy playing Higgs Domino Tema One Piece. You can also update the game regularly to get new features and improvements.
-
Conclusion
-
Higgs Domino Tema One Piece is a modded version of the popular online domino game that features the characters and settings from the famous manga and anime series. By downloading and installing this modded version, you can enjoy playing domino games with a unique One Piece theme. You can also customize your game with various One Piece items, explore the different locations from One Piece, and interact with other players who share your passion for One Piece. If you are a fan of both Higgs Domino and One Piece, you should definitely try this modded version and have fun.
-
FAQs
-
Q1: Is Higgs Domino Tema One Piece safe to use?
-
A1: Higgs Domino Tema One Piece is safe to use as long as you download it from a trusted source. However, you should always be careful when installing apps from unknown sources, as they might contain viruses or malware that can harm your device. You should also check the permissions that the app requests and make sure they are necessary for the app's functionality.
-
Q2: How to update Higgs Domino Tema One Piece?
-
A2: To update Higgs Domino Tema One Piece, you need to download the latest version of the modded APK file from the same source that you downloaded it from before. Then, you need to uninstall the previous version of the app and install the new version. Alternatively, you can check if the app has an in-built update feature that allows you to update it without uninstalling it.
-
How to download higgs domino v1.79 tema one piece
-Higgs domino mod apk tema one piece no password
-Higgs domino rp 1.91 tema one piece auto menang
-Higgs domino island v1.87 full effect tema one piece
-Higgs domino x8 speeder tema one piece terbaru
-Higgs domino versi n tema one piece tanpa password
-Higgs domino clone apk tema one piece 2023
-Higgs domino background music full ost one piece
-Higgs domino buto ijo mod tema one piece
-Higgs domino black edition tema one piece
-Higgs domino rp v1.85 apk tema one piece x8 speeder
-Higgs domino island mod apk tema one piece terbaru 2023
-Higgs domino versi 1.87 clone tema one piece
-Higgs domino speeder terbaru tema one piece 2023
-Higgs domino rp 1.87 clone tema one piece no password
-Higgs domino mod apk terbaru tema one piece 2023
-Higgs domino island v1.79 n tema spesial one piece
-Higgs domino versi n 1.87 clone tema one piece
-Higgs domino rp 1.91 tema one piece tanpa password
-Higgs domino island mod apk x8 speeder tema one piece
-Higgs domino versi n 1.87 speeder tema one piece
-Higgs domino rp v1.85 x8 speeder tema one piece terbaru
-Higgs domino island v1.87 full effect no password tema one piece
-Higgs domino versi n 1.87 mod apk tema one piece
-Higgs domino rp 1.87 clone x8 speeder tema one piece
-Higgs domino mod apk x8 speeder terbaru tema one piece
-Higgs domino island v1.79 n mod apk tema spesial one piece
-Higgs domino versi n 1.87 apk terbaru tema one piece
-Higgs domino rp v1.85 mod apk x8 speeder tema one piece
-Higgs domino island v1.87 full effect x8 speeder tema one piece
-Higgs domino versi n 1.87 clone mod apk tema one piece
-Higgs domino rp 1.91 auto menang terbaru tema one piece
-Higgs domino mod apk terbaru x8 speeder tema one piece
-Higgs domino island v1.79 n x8 speeder tema spesial one piece
-Higgs domino versi n 1.87 clone apk terbaru tema one piece
-Higgs domino rp v1.85 mod apk terbaru x8 speeder tema one piece
-Higgs domino island v1.87 full effect no password x8 speeder tema one piece
-Higgs domino versi n 1.87 clone mod apk x8 speeder tema one piece
-Higgs domino rp 1.91 auto menang tanpa password tema one piece
-Higgs domino mod apk terbaru no password x8 speeder tema one piece
-
Q3: How to get more coins and rewards in Higgs Domino Tema One Piece?
-
A3: To get more coins and rewards in Higgs Domino Tema One Piece, you can do the following:
-
-
Play more games and win more matches. The more you play and win, the more coins and rewards you will earn.
-
Complete daily tasks and achievements. The app will give you some tasks and achievements that you can complete to earn extra coins and rewards.
-
Invite your friends and other players to join the game. The app will reward you with some coins and rewards for every friend or player that you invite to play the game with you.
-
Watch ads and videos. The app will sometimes offer you some ads and videos that you can watch to earn some coins and rewards. However, this option might not be available in some regions or countries.
-
-
Q4: How to play with friends or other players in Higgs Domino Tema One Piece?
-
A4: To play with friends or other players in Higgs Domino Tema One Piece, you can do the following:
-
-
Create or join a room. The app will allow you to create or join a room where you can play with your friends or other players. You can choose the game mode, the table size, and the bet amount that you want to play with. You can also chat with your friends or other players in the room.
-
Use the social features. The app will provide you with some social features that let you interact with your friends or other players. You can add them as friends, send them messages, gifts, emoticons, and more. You can also join clubs or groups where you can meet new people and play together.
-
-
Q5: What are some other themes or mods for Higgs Domino?
-
A5: Higgs Domino has many other themes or mods that you can choose from. Some of them are:
-
-
Higgs Domino Tema Naruto: A modded version of the game that features the characters and settings from Naruto, another popular manga and anime series.
-
Higgs Domino Tema Doraemon: A modded version of the game that features the characters and settings from Doraemon, a classic Japanese manga and anime series.
-
Higgs Domino Tema PUBG: A modded version of the game that features the characters and settings from PUBG, a popular online battle royale game.
-
Higgs Domino Tema Free Fire: A modded version of the game that features the characters and settings from Free Fire, another popular online battle royale game.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Super Bino Go Mod APK 2022 Experience the New and Improved Classic Adventure with Hack Features.md b/spaces/congsaPfin/Manga-OCR/logs/Super Bino Go Mod APK 2022 Experience the New and Improved Classic Adventure with Hack Features.md
deleted file mode 100644
index e6e03bf69be92873d4f63a92972d598ec49f6194..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Super Bino Go Mod APK 2022 Experience the New and Improved Classic Adventure with Hack Features.md
+++ /dev/null
@@ -1,100 +0,0 @@
-
-
Super Bino Go Hack Mod APK Download 2022: A Guide for Adventure Lovers
-
If you are a fan of classic platformer games, you will love Super Bino Go. This game is an exciting and highly popular free adventure game for smartphones. With its engaging gameplay, it’s a must-try game in 2023. In this article, we will provide you with valuable information about the game’s features, how to download it, and smart gameplay strategies for Super Bino Go.
-
What is Super Bino Go?
-
Super Bino Go is a game that follows the adventures of Bino, a brave hero who is on a mission to save his princess from the evil monsters. The game is inspired by the classic Mario games, but with its own unique twists and challenges. You will have to run, jump, smash, and fight your way through various worlds and levels, collecting coins, power-ups, and stars along the way.
Super Bino Go has many features that make it an enjoyable and addictive game for all ages. Some of these features are:
-
-
Beautiful graphics and sound effects that create a lively and colorful atmosphere.
-
Easy and intuitive controls that allow you to move and jump with just one finger.
-
Over 200 challenging levels that will test your skills and reflexes.
-
7 different worlds with different themes and enemies, such as desert, forest, ice, volcano, etc.
-
A variety of power-ups that can help you overcome obstacles and defeat enemies, such as mushrooms, fireballs, helmets, etc.
-
Achievements and leaderboards that let you compete with your friends and other players around the world.
-
-
How to play Super Bino Go
-
The gameplay of Super Bino Go is simple and fun. You just need to tap the screen to make Bino run and jump. You can also swipe left or right to change direction. Your goal is to reach the end of each level without falling into traps or getting hit by enemies. Along the way, you can collect coins, power-ups, and stars that can help you unlock new worlds and levels. You can also use the fireballs to shoot at enemies or break bricks.
-
Why download Super Bino Go hack mod apk?
-
While Super Bino Go is a free game, it also has some in-app purchases that can enhance your gaming experience. For example, you can buy more coins, lives, or power-ups with real money. However, if you don’t want to spend any money on the game, you can download Super Bino Go hack mod apk instead.
-
Benefits of Super Bino Go hack mod apk
-
Super Bino Go hack mod apk is a modified version of the original game that gives you some advantages and benefits. Some of these benefits are:
-
-
Unlimited coins that you can use to buy anything in the game.
-
Unlimited lives that let you play as long as you want without worrying about dying.
-
All worlds and levels unlocked so you can explore them at your own pace.
-
No ads that can interrupt your gameplay or annoy you.
-
-
How to download and install Super Bino Go hack mod apk
-
To download and install Super Bino Go hack mod apk on your Android device, you need to follow these steps:
-
-
Go to [this link](^1^) and click on the download button to get the apk file.
-
Allow unknown sources in your device settings to install apps from outside the Google Play Store.
-
Locate the downloaded apk file in your file manager and tap on it to start the installation process.
-
Follow the instructions on the screen and wait for the installation to finish.
-
Launch the game and enjoy playing Super Bino Go hack mod apk with unlimited coins, lives, and more.
-
-
Tips and tricks for Super Bino Go
-
Super Bino Go is a game that requires skill, strategy, and patience. To help you master the game and have more fun, here are some tips and tricks that you can use:
-
super bino go adventure jungle mod apk unlimited coins
-download super bino go mod apk latest version 2022
-super bino go hack apk free download for android
-super bino go mod apk v4.1.40 (unlimited money)
-how to install super bino go hack mod apk on pc
-super bino go adventure game mod apk download
-super bino go hack mod apk no root required
-super bino go mod apk offline play mode
-super bino go hack apk 2022 update
-super bino go mod apk unlimited lives and gems
-super bino go hack mod apk online generator
-super bino go mod apk premium features unlocked
-super bino go hack apk download link 2022
-super bino go mod apk new levels and worlds
-super bino go hack mod apk for ios devices
-super bino go mod apk with cheats and tips
-super bino go hack apk full version download
-super bino go mod apk best adventure game 2022
-super bino go hack mod apk reviews and ratings
-super bino go mod apk download from modyolo.com[^1^]
-
Collect coins and power-ups
-
Coins and power-ups are essential items that can help you progress in the game. Coins can be used to buy more lives, power-ups, or unlock new worlds and levels. Power-ups can give you special abilities that can make you stronger, faster, or invincible. Some of the power-ups that you can find in the game are:
-
-
Mushrooms: These can make you bigger and allow you to break bricks with your head.
-
Fireballs: These can let you shoot fireballs at enemies or bricks.
-
Helmets: These can protect you from one hit by an enemy or an obstacle.
-
Stars: These can make you invincible for a short time and let you run through enemies and obstacles.
-
-
You can find coins and power-ups by breaking bricks, hitting question blocks, or exploring hidden areas. Try to collect as many as you can to boost your score and performance.
-
Avoid enemies and obstacles
-
Enemies and obstacles are the main threats that can hinder your progress in the game. Enemies can hurt you or kill you if you touch them, while obstacles can trap you or make you fall. Some of the enemies and obstacles that you will encounter in the game are:
-
-
Turtles: These are slow-moving enemies that can be defeated by jumping on them or shooting them with fireballs.
-
Spiders: These are fast-moving enemies that can jump and chase you. You can defeat them by jumping on them or shooting them with fireballs.
-
Cacti: These are stationary obstacles that can hurt you if you touch them. You cannot destroy them, so you have to avoid them or jump over them.
-
Lava: This is a deadly obstacle that can kill you instantly if you fall into it. You have to be careful when crossing bridges or platforms over lava.
-
-
You can avoid enemies and obstacles by jumping over them, running away from them, or using power-ups to destroy them. You can also use fireballs to clear your path or create shortcuts. Be alert and watch out for any signs of danger.
-
Explore different worlds and levels
-
Super Bino Go has 7 different worlds with different themes and enemies. Each world has 30 levels that vary in difficulty and complexity. You will have to complete each level by reaching the flag at the end. You will also have to collect 3 stars in each level to unlock new worlds and levels.
-
You can explore different worlds and levels by using the map screen. You can also replay any level that you have completed to improve your score or find any missing stars. You can also discover secret areas or hidden levels by looking for clues or hints in the environment. Try to explore every corner of each world and level to enjoy the game fully.
-
Conclusion
-
Super Bino Go is a game that will bring back your childhood memories of playing classic platformer games. It is a game that will challenge your skills and entertain you with its colorful graphics and sound effects. It is a game that will make you feel like a hero as you save your princess from the evil monsters.
-
If you want to experience Super Bino Go with more fun and excitement, you should download Super Bino Go hack mod apk. This will give you unlimited coins, lives, and access to all worlds and levels. You will also be able to play without any ads or interruptions.
-
To download Super Bino Go hack mod apk, just follow the steps that we have provided above. Then, launch the game and enjoy playing Super Bino Go hack mod apk with unlimited coins, lives, and more.
-
FAQs
-
Here are some frequently asked questions about Super Bino Go hack mod apk:
-
-
Is Super Bino Go hack mod apk safe to download and install?
-
Yes, Super Bino Go hack mod apk is safe to download and install on your Android device. It does not contain any viruses or malware that can harm your device or data. However, you should always download it from a trusted source and scan it with an antivirus app before installing it.
-
Will Super Bino Go hack mod apk work on any Android device?
-
Super Bino Go hack mod apk is compatible with most Android devices that have Android 4.1 or higher. However, some devices may not support the game or the mod due to different specifications or settings. If you encounter any problems or errors while playing the game or the mod, you can try to update your device, clear your cache, or reinstall the game or the mod.
-
Can I play Super Bino Go hack mod apk online or offline?
-
Super Bino Go hack mod apk can be played both online and offline. You can play online to access the leaderboards and achievements, or to compete with your friends and other players. You can also play offline if you don't have an internet connection or if you want to save your data. However, some features or functions may not work properly when you play offline, such as saving your progress or syncing your data.
-
Can I update Super Bino Go hack mod apk to the latest version?
-
Yes, you can update Super Bino Go hack mod apk to the latest version whenever there is a new update available. However, you should always backup your data before updating, as some updates may cause compatibility issues or data loss. You should also check the source of the update and make sure it is reliable and safe.
-
Can I use Super Bino Go hack mod apk with other mods or cheats?
-
No, you should not use Super Bino Go hack mod apk with other mods or cheats, as this may cause conflicts or errors in the game. Super Bino Go hack mod apk already provides you with enough benefits and advantages that you don't need any other mods or cheats. Using other mods or cheats may also result in bans or penalties from the game developers or authorities.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/The Best Way to Download Stumble Guys 40 APK for Android Devices.md b/spaces/congsaPfin/Manga-OCR/logs/The Best Way to Download Stumble Guys 40 APK for Android Devices.md
deleted file mode 100644
index f39759b9a57273e579e16899e08bf7788005891f..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/The Best Way to Download Stumble Guys 40 APK for Android Devices.md
+++ /dev/null
@@ -1,102 +0,0 @@
-
-
Stumble Guys 40 Download APK: How to Play the Ultimate Knockout Game on Your Android Device
-
Do you love playing games that are full of fun, action, and chaos? Do you want to compete with other players in a hilarious and thrilling race to the finish line? If you answered yes, then you should try Stumble Guys, a fast-paced knockout game that will make you laugh and scream at the same time.
Stumble Guys is a multiplayer game that is inspired by popular TV shows like Wipeout and Takeshi's Castle. In this game, you have to run, dash, slide, and dodge past various obstacles and opponents to reach the final round. The game can support up to 32 players in each match, so you can invite your friends or play with random people online.
-
A fun and chaotic multiplayer game
-
Stumble Guys is a game that is designed to make you have fun and enjoy the chaos. The game has colorful graphics, funny sound effects, and hilarious animations that will keep you entertained. You can also chat with other players and use emojis to express your emotions. The game is easy to play, but hard to master. You have to be quick, agile, and smart to avoid falling or getting eliminated.
-
A variety of levels and modes
-
Stumble Guys has a lot of levels and modes that will challenge your skills and creativity. You can play in different environments, such as ice, desert, forest, and more. You can also choose from different game modes, such as solo, team, or custom. Each level and mode has its own rules and objectives, so you have to adapt your strategy accordingly.
-
A customizable character
-
Stumble Guys allows you to customize your character with different costumes and accessories. You can choose from various outfits, such as pirate, ninja, astronaut, clown, and more. You can also mix and match different items to create your own unique look. You can unlock more costumes and accessories by playing the game or using in-game currency.
-
How to download and install Stumble Guys 40 APK?
-
If you want to play Stumble Guys on your Android device, you have to download and install the APK file. The APK file is a modified version of the original game that has some extra features and benefits. Here are the steps to download and install Stumble Guys 40 APK:
-
How to download stumble guys 40 apk for android
-Stumble guys 40 apk mod unlimited gems and coins
-Stumble guys 40 latest version free download
-Stumble guys 40 online multiplayer knockout game
-Stumble guys 40 tips and tricks to win every match
-Stumble guys 40 apk download for pc windows 10
-Stumble guys 40 review and gameplay
-Stumble guys 40 best skins and outfits
-Stumble guys 40 hack and cheat tool
-Stumble guys 40 update and new features
-Stumble guys 40 vs fall guys ultimate knockout
-Stumble guys 40 download link and installation guide
-Stumble guys 40 requirements and compatibility
-Stumble guys 40 apk mirror and alternative sources
-Stumble guys 40 funniest moments and fails compilation
-Stumble guys 40 how to play with friends and invite codes
-Stumble guys 40 rankings and leaderboards
-Stumble guys 40 custom maps and levels
-Stumble guys 40 glitches and bugs fix
-Stumble guys 40 support and feedback
-Stumble guys 40 apk pure and safe download
-Stumble guys 40 challenges and achievements
-Stumble guys 40 season pass and rewards
-Stumble guys 40 discord server and community
-Stumble guys 40 memes and fan art
-Stumble guys 40 no ads and premium version
-Stumble guys 40 offline mode and data usage
-Stumble guys 40 controls and settings
-Stumble guys 40 how to unlock all characters and items
-Stumble guys 40 news and updates
-Stumble guys 40 apk obb file download
-Stumble guys 40 gameplay video and screenshots
-Stumble guys 40 ratings and reviews on google play[^1^]
-Stumble guys 40 similar games and alternatives
-Stumble guys 40 how to get free gems and coins
-Stumble guys 40 wiki and guide
-Stumble guys 40 codes and coupons
-Stumble guys 40 tournaments and events
-Stumble guys 40 questions and answers
-Stumble guys 40 feedback form and survey
-
Step 1: Enable unknown sources
-
Before you can install the APK file, you have to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
-
Step 2: Download the APK file
-
Next, you have to download the APK file from a reliable source. You can use this link to download Stumble Guys 40 APK for free. Make sure you have enough storage space on your device before downloading the file.
-
Step 3: Install the APK file
-
After downloading the APK file, locate it in your device's file manager and tap on it. You will see a pop-up window asking for your permission to install the app. Tap on Install and wait for the installation process to finish.
-
Step 4: Launch the game and enjoy
-
Once the installation is done, you can launch the game and enjoy. You will see the Stumble Guys icon on your home screen or app drawer. Tap on it and start playing the ultimate knockout game on your Android device.
-
What are the features of Stumble Guys 40 APK?
-
Stumble Guys 40 APK is the latest version of the game that has some new and improved features. Here are some of the features that you can enjoy with this APK:
-
New levels and costumes
-
Stumble Guys 40 APK has added more levels and costumes to the game. You can now play in new maps, such as the castle, the farm, and the factory. You can also unlock more costumes, such as the knight, the farmer, and the robot. These new levels and costumes will make your gameplay more fun and diverse.
-
Improved performance and stability
-
Stumble Guys 40 APK has also improved the performance and stability of the game. The game now runs smoother and faster on your device, with less lag and glitches. The game also has better compatibility with different devices and Android versions. You can enjoy a seamless and flawless gaming experience with this APK.
-
Bug fixes and enhancements
-
Stumble Guys 40 APK has also fixed some bugs and issues that were present in the previous versions of the game. The game now has less crashes, errors, and freezes. The game also has some enhancements, such as better graphics, sound quality, and user interface. You can play the game with more confidence and satisfaction with this APK.
-
Conclusion
-
Stumble Guys is a fun and chaotic multiplayer game that will make you laugh and scream at the same time. You can play with up to 32 players in each match, compete in different levels and modes, and customize your character with different costumes and accessories. You can download and install Stumble Guys 40 APK on your Android device to enjoy the latest features and benefits of the game. Stumble Guys 40 APK is free, safe, and easy to use. Download it now and join the ultimate knockout game.
-
FAQs
-
Here are some frequently asked questions about Stumble Guys 40 APK:
-
-
-
Question
-
Answer
-
-
-
Is Stumble Guys 40 APK legal?
-
Yes, Stumble Guys 40 APK is legal. It is a modified version of the original game that does not violate any laws or regulations. However, you should use it at your own risk, as it may not be supported by the official developers or publishers.
-
-
-
Is Stumble Guys 40 APK safe?
-
Yes, Stumble Guys 40 APK is safe. It does not contain any viruses, malware, or spyware that can harm your device or data. However, you should download it from a trusted source, such as this link, to avoid any fake or malicious files.
-
-
-
Is Stumble Guys 40 APK compatible with my device?
-
Stumble Guys 40 APK is compatible with most Android devices that have Android 4.4 or higher. However, some devices may not be able to run the game smoothly or properly due to different specifications or settings. You should check your device's compatibility before downloading and installing the APK.
-
-
-
How can I update Stumble Guys 40 APK?
-
You can update Stumble Guys 40 APK by downloading and installing the latest version of the file from this link. You should always update your APK to enjoy the newest features and improvements of the game.
-
-
-
How can I uninstall Stumble Guys 40 APK?
-
You can uninstall Stumble Guys 40 APK by following these steps: Go to Settings > Apps > Stumble Guys > Uninstall. Tap on OK to confirm your action. You can also delete the APK file from your device's file manager.
-
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/TikTok APK for Windows How to Watch and Create Amazing Videos.md b/spaces/congsaPfin/Manga-OCR/logs/TikTok APK for Windows How to Watch and Create Amazing Videos.md
deleted file mode 100644
index 17c990e71de5be06f9c944e1a76659b28d438e06..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/TikTok APK for Windows How to Watch and Create Amazing Videos.md
+++ /dev/null
@@ -1,152 +0,0 @@
-
-
TikTok Apk Windows: How to Download and Use the App on Your PC
-
TikTok is one of the most popular social media apps in the world, with over 1 billion monthly active users. It allows you to create and share short videos with music, filters, effects, and more. You can also watch millions of videos from other users, covering various topics like comedy, gaming, beauty, education, and more.
But what if you want to use TikTok on your Windows PC? Maybe you want to enjoy a bigger screen, better sound quality, or more editing options. Or maybe you don't have a compatible smartphone or tablet. Whatever the reason, you can install and use TikTok on your Windows PC with some simple steps.
-
In this article, we will show you three methods to install TikTok on Windows, and how to use its basic features and tips. Let's get started!
-
How to Install TikTok on Windows: Three Methods
-
There are three main ways to install TikTok on your Windows PC: using an Android emulator, using the TikTok app from the Microsoft Store, or using the TikTok website on a browser. Here are the pros and cons of each method:
-
-
-
Method
-
Pros
-
Cons
-
-
-
Android emulator
-
- Gives you access to the full features of the mobile app - Allows you to use other Android apps on your PC - Supports keyboard and mouse input
-
- Requires downloading and installing additional software - May slow down your PC performance - May not be compatible with some PC models
-
-
-
TikTok app from Microsoft Store
-
- Easy to download and install - Officially supported by TikTok - Optimized for Windows devices
-
- Has limited features compared to the mobile app - May not be updated frequently - May have bugs or glitches
-
-
-
TikTok website on a browser
-
- No need to download or install anything - Works on any browser and device - Has a simple and user-friendly interface
-
- Has very limited features compared to the mobile app - Does not allow you to create or edit videos - Does not support keyboard shortcuts or mouse scrolling
-
-
-
Depending on your preferences and needs, you can choose any of these methods to install TikTok on your Windows PC. Here are the detailed steps for each method:
-
Method 1: Using an Android Emulator
-
An Android emulator is a software that simulates an Android device on your PC. It allows you to run Android apps and games on your PC as if you were using a smartphone or tablet. There are many Android emulators available online, but some of the most popular ones are BlueStacks, NoxPlayer, and LDPlayer.
-
tiktok apk windows 10 download
-tiktok apk windows 7 free
-tiktok apk windows 8.1 install
-tiktok apk windows store app
-tiktok apk windows phone version
-tiktok apk windows laptop pc
-tiktok apk windows desktop computer
-tiktok apk windows bluestacks emulator
-tiktok apk windows uptodown software
-tiktok apk windows microsoft apps
-tiktok apk windows 11 update
-tiktok apk windows xp support
-tiktok apk windows vista compatible
-tiktok apk windows mac os x
-tiktok apk windows linux ubuntu
-tiktok apk windows chromebook chrome os
-tiktok apk windows online web browser
-tiktok apk windows offline installer file
-tiktok apk windows modded hacked cracked
-tiktok apk windows latest version 2023
-tiktok apk windows old version 2022
-tiktok apk windows beta version 2021
-tiktok apk windows original official authentic
-tiktok apk windows safe secure trusted
-tiktok apk windows virus malware spyware free
-tiktok apk windows review rating feedback
-tiktok apk windows tutorial guide how to
-tiktok apk windows tips tricks hacks cheats
-tiktok apk windows features functions benefits
-tiktok apk windows problems issues errors bugs fixes solutions
-tiktok apk windows alternatives competitors substitutes replacements
-tiktok apk windows comparison contrast pros cons advantages disadvantages
-tiktok apk windows best top popular trending viral hot new latest most downloaded most viewed most liked most commented most shared most followed most subscribed most rated most reviewed most recommended most bookmarked most saved most favorited most upvoted most awarded most rewarded most appreciated most supported most endorsed most sponsored most promoted most advertised most marketed most optimized most ranked highest lowest best worst easiest hardest fastest slowest simplest complex cheapest expensive free paid premium pro plus deluxe ultimate supreme elite exclusive vip gold silver bronze platinum diamond ruby emerald sapphire crystal pearl gemstone quartz amethyst turquoise opal jade coral onyx marble granite limestone sandstone slate basalt obsidian pumice scoria gabbro diorite andesite rhyolite dacite trachyte phonolite tuff ignimbrite welded tuff breccia conglomerate sandstone siltstone shale mudstone claystone limestone dolomite chalk marble travertine tufa chert flint jasper agate quartzite novaculite slate phyllite schist gneiss migmatite granulite eclogite amphibolite hornfels skarn serpentinite soapstone talc steatite chlorite schist graphite schist alabaster gypsum anhydrite halite sylvite carnallite kainite polyhalite borax trona mirabilite thenardite glauberite nahcolite natron thermonatrite gaylussite pirssonite northupite shortite burkeite astrakanite leonite kieserite epsomite hexahydrite bloedite starkeyite loeweite glaucocerinite picromerite schoenite kainitoid langbeinite leonhardite bischofite carnallitoid tachyhydrite antarcticit hydrohalit salmiac ammonium chloride ammonium nitrate ammonium sulfate ammonium phosphate ammonium carbonate ammonium bicarbonate ammonium acetate ammonium citrate ammonium oxalate ammonium tartrate ammonium lactate ammonium formate ammonium benzoate ammonium propionate ammonium butyrate ammonium valerate ammonium caproate ammonium heptanoate ammonium caprylate ammonium pelargonate ammonium caprate ammonium undecanoate ammonium laurate ammonium tridecanoate ammonium myristate ammonium pentadecanoate ammonium palmitate ammonium margarate ammonium stearate ammonium nonadecanoate ammonium arachidate ammonium behenate ammonium lignocerate ammonium cerotate
-
To use an Android emulator to install TikTok on your Windows PC, follow these steps:
-
-
Download and install an Android emulator of your choice from its official website.
-
Launch the emulator and sign in with your Google account.
-
Open the Google Play Store app inside the emulator and search for TikTok.
-
Tap Install and wait for the app to download.
-
Tap Open or find the TikTok icon on the emulator's home screen.
-
Sign in or sign up with your TikTok account.
-
Enjoy using TikTok on your Windows PC!
-
-
Method 2: Using the TikTok App from the Microsoft Store
-
The Microsoft Store is a digital distribution platform that allows you to download and install apps and games for your Windows devices. It has an official TikTok app that you can use on your Windows PC. However, this app has fewer features than the mobile app, such as the ability to create and edit videos, use filters and effects, and access some settings and features. To use the TikTok app from the Microsoft Store to install TikTok on your Windows PC, follow these steps:
-
Open the Microsoft Store app on your Windows PC or visit its website.
-
Search for TikTok in the search bar and click on the app.
-
Click on Get or Install and wait for the app to download.
-
Click on Launch or find the TikTok icon on your Start menu or desktop.
-
Sign in or sign up with your TikTok account.
-
Enjoy using TikTok on your Windows PC!
-
-
Method 3: Using the TikTok Website on a Browser
-
The TikTok website is a web-based version of the app that you can access on any browser and device. It allows you to watch videos from other users, explore different categories and hashtags, and sign in or sign up with your TikTok account. However, it does not allow you to create or edit videos, use filters and effects, or access some settings and features.
- To use the TikTok website on a browser to install TikTok on your Windows PC, follow these steps:
-
Open any browser on your Windows PC and visit https://www.tiktok.com/.
-
Sign in or sign up with your TikTok account.
-
Enjoy watching TikTok videos on your Windows PC!
-
-
How to Use TikTok on Windows: Basic Features and Tips
-
Now that you have installed TikTok on your Windows PC, you may wonder how to use its basic features and tips. Here are some of the things you can do with TikTok on Windows:
-
How to Create and Edit Videos
-
If you want to create and edit videos on TikTok, you will need to use an Android emulator or the mobile app on your smartphone or tablet. The TikTok app from the Microsoft Store and the TikTok website do not support this feature.
-
To create and edit videos on TikTok, follow these steps:
-
-
Open the TikTok app on your Android emulator or mobile device.
-
Tap on the plus (+) icon at the bottom of the screen.
-
Choose whether to record a new video or upload an existing one from your gallery.
-
Use the icons on the right side of the screen to adjust the speed, timer, beauty mode, filters, effects, and sound of your video.
-
Tap on the red circle to start and stop recording. You can record up to 60 seconds of video in one or multiple clips.
-
Tap on Next when you are done recording or uploading.
-
Edit your video by trimming, cropping, adding stickers, text, voiceovers, transitions, and more.
-
Tap on Next when you are done editing.
-
Add a caption, hashtags, and tags to your video. You can also choose who can view, comment, duet, react, stitch, and download your video.
-
Tap on Post to share your video with your followers and the world!
-
-
How to Explore and Watch Videos
-
If you want to explore and watch videos on TikTok, you can use any of the three methods we mentioned above: using an Android emulator, using the TikTok app from the Microsoft Store, or using the TikTok website on a browser. However, each method has some differences in how you can navigate and watch videos.
-
To explore and watch videos on TikTok using an Android emulator, follow these steps:
-
-
Open the TikTok app on your Android emulator.
-
Swipe left or right on the home screen to switch between the For You page and the Following page. The For You page shows you videos recommended for you based on your preferences and behavior. The Following page shows you videos from users you follow.
-
Tap on any video to watch it in full screen. You can also swipe up or down to watch more videos.
-
Tap on any icon or text on the video screen to interact with it. You can like, comment, share, follow, duet, react, stitch, download, report, or add to favorites any video. You can also tap on the profile picture of the user who posted the video to visit their profile page.
-
Tap on the magnifying glass icon at the bottom of the screen to open the Discover page. Here you can search for users, videos, sounds, hashtags, categories, and more. You can also see the trending videos, sounds, and hashtags on this page.
-
-
To explore and watch videos on TikTok using the TikTok app from the Microsoft Store, follow these steps:
-
-
Open the TikTok app on your Windows PC.
-
Click on the For You tab or the Following tab at the top of the screen to switch between the two pages. The For You tab shows you videos recommended for you based on your preferences and behavior. The Following tab shows you videos from users you follow.
-
Click on any video to watch it in full screen. You can also use the arrow keys or the mouse wheel to watch more videos.
-
Click on any icon or text on the video screen to interact with it. You can like, comment, share, follow, duet, react, stitch, download, report, or add to favorites any video. You can also click on the profile picture of the user who posted the video to visit their profile page.
-
Click on the Discover tab at the top of the screen to open the Discover page. Here you can search for users, videos, sounds, hashtags, categories, and more. You can also see the trending videos, sounds, and hashtags on this page.
-
-
To explore and watch videos on TikTok using the TikTok website on a browser, follow these steps:
-
-
Open any browser on your Windows PC and visit https://www.tiktok.com/.
-
Click on the Watch Now button at the top right corner of the screen to open the For You page. Here you can see videos recommended for you based on your preferences and behavior.
-
Click on any video to watch it in full screen. You can also use the arrow keys or the mouse wheel to watch more videos.
-
Click on any icon or text on the video screen to interact with it. You can like, comment, share, follow, report, or add to favorites any video. You can also click on the profile picture of the user who posted the video to visit their profile page.
-
Click on the Discover button at the top left corner of the screen to open the Discover page. Here you can search for users, videos, sounds, hashtags, categories, and more. You can also see the trending videos, sounds, and hashtags on this page.
-
-
Conclusion: Summary and Recommendations
-
In this article, we have shown you how to install and use TikTok on your Windows PC using three methods: using an Android emulator, using the TikTok app from the Microsoft Store, or using the TikTok website on a browser. Each method has its own advantages and disadvantages, so you can choose the one that suits you best.
-
We recommend using an Android emulator if you want to enjoy the full features of the mobile app, such as creating and editing videos, using filters and effects, and accessing some settings and features. However, this method may require downloading and installing additional software, which may slow down your PC performance or cause compatibility issues.
-
We recommend using the TikTok app from the Microsoft Store if you want an easy and official way to install TikTok on your Windows PC. However, this app has limited features compared to the mobile app, such as the ability to create and edit videos, use filters and effects, and access some settings and features. This app may also not be updated frequently or have bugs or glitches. We recommend using the TikTok website on a browser if you want a simple and convenient way to watch TikTok videos on your Windows PC. However, this method has very limited features compared to the mobile app, such as the inability to create or edit videos, use filters and effects, or access some settings and features. This method also does not support keyboard shortcuts or mouse scrolling. We hope this article has helped you install and use TikTok on your Windows PC. If you have any questions or feedback, please let us know in the comments below. Happy TikToking!
FAQs: Five Common Questions and Answers
-
Here are some of the most frequently asked questions and answers about TikTok on Windows:
-
Q: Can I use TikTok on Windows 10?
-
A: Yes, you can use TikTok on Windows 10 using any of the three methods we mentioned above: using an Android emulator, using the TikTok app from the Microsoft Store, or using the TikTok website on a browser.
-
Q: Is TikTok safe to use on Windows?
-
A: TikTok is generally safe to use on Windows, as long as you download and install it from trusted sources, such as the official websites of the Android emulators or the Microsoft Store. You should also be careful about what you share and who you interact with on TikTok, as there may be some malicious or inappropriate content or users on the platform.
-
Q: How can I update TikTok on Windows?
-
A: To update TikTok on Windows, you need to follow different steps depending on the method you are using. If you are using an Android emulator, you need to open the Google Play Store app inside the emulator and check for updates. If you are using the TikTok app from the Microsoft Store, you need to open the Microsoft Store app on your Windows PC and check for updates. If you are using the TikTok website on a browser, you don't need to update anything, as the website will automatically reflect the latest version of the app.
-
Q: How can I delete TikTok on Windows?
-
A: To delete TikTok on Windows, you need to follow different steps depending on the method you are using. If you are using an Android emulator, you need to uninstall the TikTok app from the emulator's settings or home screen. If you are using the TikTok app from the Microsoft Store, you need to uninstall the app from your Windows PC's settings or start menu. If you are using the TikTok website on a browser, you don't need to delete anything, as there is no installation involved.
-
Q: How can I contact TikTok support on Windows?
-
A: To contact TikTok support on Windows, you can visit their official help center at https://support.tiktok.com/en/. Here you can find answers to common questions, report a problem, give feedback, or request a feature. You can also email them at feedback@tiktok.com or call them at 1-888-867-8111.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/Adomania 1 CD audio classe A1 book pdf - A comprehensive and interactive course for beginners.md b/spaces/contluForse/HuggingGPT/assets/Adomania 1 CD audio classe A1 book pdf - A comprehensive and interactive course for beginners.md
deleted file mode 100644
index d23ed17bd63b10d340daf915191b50c8734fad19..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Adomania 1 CD audio classe A1 book pdf - A comprehensive and interactive course for beginners.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Facebook Privacy . animal jam password, animal jam password online, animal jam facebook, animal jam password reset, animal jam password, animal jam password cracker, animal jam facebook free, animal jam password hack, animal jam hack, animal jam, animal jam hack, animal jam cracker, animal jam cracker 0.5.0, animal jam password cracker download free, animal jam 2018 crack, animal jam password crack. animal jam password cracker 2019, animal jam password generator, animal jam hack crack, animal jam password generator.
-
-App Animal Jam Password All cracks in one
-
-Animal Jam Password All crack in one
-
-App Animal Jam Password All crack in one
-
-Animal Jam Passwort Download
-
-Animal Jam Passwort Download in one
-
-Animal Jam Password All 2019
-
-Animal Jam Passwort Downloadin one
-
-Animal Jam Password All For Windows
-
-Animal Jam Passwort Download for Mac
-
-Animal Jam Password All 2019 For Mac
-
-Animal Jam Password All For Mac
-
-Animal Jam Passwort Download 2019 For Mac
-
-Animal Jam Password All 2019 Free Download
-
-Animal Jam Password All For Mac Free Download
-
-Animal Jam Password All For Mac Free 4fefd39f24
-
-
-
diff --git a/spaces/contluForse/HuggingGPT/assets/Daniel Reference Bible Malayalam Free Download Why You Should Get It.md b/spaces/contluForse/HuggingGPT/assets/Daniel Reference Bible Malayalam Free Download Why You Should Get It.md
deleted file mode 100644
index f873fa5cdb6a44547f85b6de1c70b2f9c0427c55..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Daniel Reference Bible Malayalam Free Download Why You Should Get It.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
This material is the result of years of study, reflection, research, organizing and writing. Its objective is to present our precious message in a profound yet simple manner gleaning the gems of truth from Scripture and the writings of the Spirit of Prophecy. In order to continue offering this free digital download, we need your financial support. Please consider donating so we can continue to reach as many people as possible around the world for God's kingdom. If you prefer to have a hard copy shipped to you, click here.
-
Yes! We want to help those teaching the Bible in all nations of the world in this visual age. All pictures are free for use in teaching and non-commercial streaming. We provide the pictures and you tell the story, with the Bible as your reference source. Conditions about the reuse of the images in new projects vary with each contributor. Those who donate to this project help share these resources around the world as a gift.
All 66 books of the Bible have been covered by John Schultz: An accomplishment of a life time, matched by only a few saints in history. Make your choice below and download the PDF Commentary eBook for free.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cooelf/Multimodal-CoT/timm/data/__init__.py b/spaces/cooelf/Multimodal-CoT/timm/data/__init__.py
deleted file mode 100644
index 7d3cb2b4d7e823aabb1d55781149579eeb94b024..0000000000000000000000000000000000000000
--- a/spaces/cooelf/Multimodal-CoT/timm/data/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from .auto_augment import RandAugment, AutoAugment, rand_augment_ops, auto_augment_policy,\
- rand_augment_transform, auto_augment_transform
-from .config import resolve_data_config
-from .constants import *
-from .dataset import ImageDataset, IterableImageDataset, AugMixDataset
-from .dataset_factory import create_dataset
-from .loader import create_loader
-from .mixup import Mixup, FastCollateMixup
-from .parsers import create_parser
-from .real_labels import RealLabelsImagenet
-from .transforms import *
-from .transforms_factory import create_transform
\ No newline at end of file
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/core/evaluation/class_names.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/core/evaluation/class_names.py
deleted file mode 100644
index 532c5fd78946ede66d747ec8e7b72dbb66471aac..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/core/evaluation/class_names.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import annotator.mmpkg.mmcv as mmcv
-
-
-def cityscapes_classes():
- """Cityscapes class names for external use."""
- return [
- 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
- 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
- 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
- 'bicycle'
- ]
-
-
-def ade_classes():
- """ADE20K class names for external use."""
- return [
- 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
- 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
- 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
- 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
- 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
- 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
- 'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
- 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
- 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
- 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
- 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
- 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
- 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
- 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
- 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
- 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
- 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
- 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
- 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
- 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
- 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
- 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
- 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
- 'clock', 'flag'
- ]
-
-
-def voc_classes():
- """Pascal VOC class names for external use."""
- return [
- 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
- 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
- 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
- 'tvmonitor'
- ]
-
-
-def cityscapes_palette():
- """Cityscapes palette for external use."""
- return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
- [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
- [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
- [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100],
- [0, 0, 230], [119, 11, 32]]
-
-
-def ade_palette():
- """ADE20K palette for external use."""
- return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
- [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
- [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
- [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
- [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
- [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
- [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
- [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
- [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
- [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
- [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
- [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
- [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
- [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
- [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
- [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
- [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
- [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
- [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
- [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
- [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
- [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
- [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
- [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
- [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
- [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
- [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
- [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
- [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
- [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
- [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
- [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
- [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
- [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
- [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
- [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
- [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
- [102, 255, 0], [92, 0, 255]]
-
-
-def voc_palette():
- """Pascal VOC palette for external use."""
- return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
- [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
- [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
- [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
- [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
-
-
-dataset_aliases = {
- 'cityscapes': ['cityscapes'],
- 'ade': ['ade', 'ade20k'],
- 'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug']
-}
-
-
-def get_classes(dataset):
- """Get class names of a dataset."""
- alias2name = {}
- for name, aliases in dataset_aliases.items():
- for alias in aliases:
- alias2name[alias] = name
-
- if mmcv.is_str(dataset):
- if dataset in alias2name:
- labels = eval(alias2name[dataset] + '_classes()')
- else:
- raise ValueError(f'Unrecognized dataset: {dataset}')
- else:
- raise TypeError(f'dataset must a str, but got {type(dataset)}')
- return labels
-
-
-def get_palette(dataset):
- """Get class palette (RGB) of a dataset."""
- alias2name = {}
- for name, aliases in dataset_aliases.items():
- for alias in aliases:
- alias2name[alias] = name
-
- if mmcv.is_str(dataset):
- if dataset in alias2name:
- labels = eval(alias2name[dataset] + '_palette()')
- else:
- raise ValueError(f'Unrecognized dataset: {dataset}')
- else:
- raise TypeError(f'dataset must a str, but got {type(dataset)}')
- return labels
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/pycocotools/cocoeval.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/pycocotools/cocoeval.py
deleted file mode 100644
index 89c251e1652a0cfc7e8ff1bbb1024a801ed2ebe7..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/pycocotools/cocoeval.py
+++ /dev/null
@@ -1,534 +0,0 @@
-__author__ = 'tsungyi'
-
-import numpy as np
-import datetime
-import time
-from collections import defaultdict
-from . import mask as maskUtils
-import copy
-
-class COCOeval:
- # Interface for evaluating detection on the Microsoft COCO dataset.
- #
- # The usage for CocoEval is as follows:
- # cocoGt=..., cocoDt=... # load dataset and results
- # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
- # E.params.recThrs = ...; # set parameters as desired
- # E.evaluate(); # run per image evaluation
- # E.accumulate(); # accumulate per image results
- # E.summarize(); # display summary metrics of results
- # For example usage see evalDemo.m and http://mscoco.org/.
- #
- # The evaluation parameters are as follows (defaults in brackets):
- # imgIds - [all] N img ids to use for evaluation
- # catIds - [all] K cat ids to use for evaluation
- # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
- # recThrs - [0:.01:1] R=101 recall thresholds for evaluation
- # areaRng - [...] A=4 object area ranges for evaluation
- # maxDets - [1 10 100] M=3 thresholds on max detections per image
- # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'
- # iouType replaced the now DEPRECATED useSegm parameter.
- # useCats - [1] if true use category labels for evaluation
- # Note: if useCats=0 category labels are ignored as in proposal scoring.
- # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
- #
- # evaluate(): evaluates detections on every image and every category and
- # concats the results into the "evalImgs" with fields:
- # dtIds - [1xD] id for each of the D detections (dt)
- # gtIds - [1xG] id for each of the G ground truths (gt)
- # dtMatches - [TxD] matching gt id at each IoU or 0
- # gtMatches - [TxG] matching dt id at each IoU or 0
- # dtScores - [1xD] confidence of each dt
- # gtIgnore - [1xG] ignore flag for each gt
- # dtIgnore - [TxD] ignore flag for each dt at each IoU
- #
- # accumulate(): accumulates the per-image, per-category evaluation
- # results in "evalImgs" into the dictionary "eval" with fields:
- # params - parameters used for evaluation
- # date - date evaluation was performed
- # counts - [T,R,K,A,M] parameter dimensions (see above)
- # precision - [TxRxKxAxM] precision for every evaluation setting
- # recall - [TxKxAxM] max recall for every evaluation setting
- # Note: precision and recall==-1 for settings with no gt objects.
- #
- # See also coco, mask, pycocoDemo, pycocoEvalDemo
- #
- # Microsoft COCO Toolbox. version 2.0
- # Data, paper, and tutorials available at: http://mscoco.org/
- # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
- # Licensed under the Simplified BSD License [see coco/license.txt]
- def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):
- '''
- Initialize CocoEval using coco APIs for gt and dt
- :param cocoGt: coco object with ground truth annotations
- :param cocoDt: coco object with detection results
- :return: None
- '''
- if not iouType:
- print('iouType not specified. use default iouType segm')
- self.cocoGt = cocoGt # ground truth COCO API
- self.cocoDt = cocoDt # detections COCO API
- self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
- self.eval = {} # accumulated evaluation results
- self._gts = defaultdict(list) # gt for evaluation
- self._dts = defaultdict(list) # dt for evaluation
- self.params = Params(iouType=iouType) # parameters
- self._paramsEval = {} # parameters for evaluation
- self.stats = [] # result summarization
- self.ious = {} # ious between all gts and dts
- if not cocoGt is None:
- self.params.imgIds = sorted(cocoGt.getImgIds())
- self.params.catIds = sorted(cocoGt.getCatIds())
-
-
- def _prepare(self):
- '''
- Prepare ._gts and ._dts for evaluation based on params
- :return: None
- '''
- def _toMask(anns, coco):
- # modify ann['segmentation'] by reference
- for ann in anns:
- rle = coco.annToRLE(ann)
- ann['segmentation'] = rle
- p = self.params
- if p.useCats:
- gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
- dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
- else:
- gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
- dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
-
- # convert ground truth to mask if iouType == 'segm'
- if p.iouType == 'segm':
- _toMask(gts, self.cocoGt)
- _toMask(dts, self.cocoDt)
- # set ignore flag
- for gt in gts:
- gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
- gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
- if p.iouType == 'keypoints':
- gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']
- self._gts = defaultdict(list) # gt for evaluation
- self._dts = defaultdict(list) # dt for evaluation
- for gt in gts:
- self._gts[gt['image_id'], gt['category_id']].append(gt)
- for dt in dts:
- self._dts[dt['image_id'], dt['category_id']].append(dt)
- self.evalImgs = defaultdict(list) # per-image per-category evaluation results
- self.eval = {} # accumulated evaluation results
-
- def evaluate(self):
- '''
- Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
- :return: None
- '''
- tic = time.time()
- print('Running per image evaluation...')
- p = self.params
- # add backward compatibility if useSegm is specified in params
- if not p.useSegm is None:
- p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
- print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
- print('Evaluate annotation type *{}*'.format(p.iouType))
- p.imgIds = list(np.unique(p.imgIds))
- if p.useCats:
- p.catIds = list(np.unique(p.catIds))
- p.maxDets = sorted(p.maxDets)
- self.params=p
-
- self._prepare()
- # loop through images, area range, max detection number
- catIds = p.catIds if p.useCats else [-1]
-
- if p.iouType == 'segm' or p.iouType == 'bbox':
- computeIoU = self.computeIoU
- elif p.iouType == 'keypoints':
- computeIoU = self.computeOks
- self.ious = {(imgId, catId): computeIoU(imgId, catId) \
- for imgId in p.imgIds
- for catId in catIds}
-
- evaluateImg = self.evaluateImg
- maxDet = p.maxDets[-1]
- self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)
- for catId in catIds
- for areaRng in p.areaRng
- for imgId in p.imgIds
- ]
- self._paramsEval = copy.deepcopy(self.params)
- toc = time.time()
- print('DONE (t={:0.2f}s).'.format(toc-tic))
-
- def computeIoU(self, imgId, catId):
- p = self.params
- if p.useCats:
- gt = self._gts[imgId,catId]
- dt = self._dts[imgId,catId]
- else:
- gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
- dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
- if len(gt) == 0 and len(dt) ==0:
- return []
- inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
- dt = [dt[i] for i in inds]
- if len(dt) > p.maxDets[-1]:
- dt=dt[0:p.maxDets[-1]]
-
- if p.iouType == 'segm':
- g = [g['segmentation'] for g in gt]
- d = [d['segmentation'] for d in dt]
- elif p.iouType == 'bbox':
- g = [g['bbox'] for g in gt]
- d = [d['bbox'] for d in dt]
- else:
- raise Exception('unknown iouType for iou computation')
-
- # compute iou between each dt and gt region
- iscrowd = [int(o['iscrowd']) for o in gt]
- ious = maskUtils.iou(d,g,iscrowd)
- return ious
-
- def computeOks(self, imgId, catId):
- p = self.params
- # dimention here should be Nxm
- gts = self._gts[imgId, catId]
- dts = self._dts[imgId, catId]
- inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
- dts = [dts[i] for i in inds]
- if len(dts) > p.maxDets[-1]:
- dts = dts[0:p.maxDets[-1]]
- # if len(gts) == 0 and len(dts) == 0:
- if len(gts) == 0 or len(dts) == 0:
- return []
- ious = np.zeros((len(dts), len(gts)))
- sigmas = p.kpt_oks_sigmas
- vars = (sigmas * 2)**2
- k = len(sigmas)
- # compute oks between each detection and ground truth object
- for j, gt in enumerate(gts):
- # create bounds for ignore regions(double the gt bbox)
- g = np.array(gt['keypoints'])
- xg = g[0::3]; yg = g[1::3]; vg = g[2::3]
- k1 = np.count_nonzero(vg > 0)
- bb = gt['bbox']
- x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2
- y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2
- for i, dt in enumerate(dts):
- d = np.array(dt['keypoints'])
- xd = d[0::3]; yd = d[1::3]
- if k1>0:
- # measure the per-keypoint distance if keypoints visible
- dx = xd - xg
- dy = yd - yg
- else:
- # measure minimum distance to keypoints in (x0,y0) & (x1,y1)
- z = np.zeros((k))
- dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)
- dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)
- e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2
- if k1 > 0:
- e=e[vg > 0]
- ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
- return ious
-
- def evaluateImg(self, imgId, catId, aRng, maxDet):
- '''
- perform evaluation for single category and image
- :return: dict (single image results)
- '''
- p = self.params
- if p.useCats:
- gt = self._gts[imgId,catId]
- dt = self._dts[imgId,catId]
- else:
- gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
- dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
- if len(gt) == 0 and len(dt) ==0:
- return None
-
- for g in gt:
- if g['ignore'] or (g['area']aRng[1]):
- g['_ignore'] = 1
- else:
- g['_ignore'] = 0
-
- # sort dt highest score first, sort gt ignore last
- gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
- gt = [gt[i] for i in gtind]
- dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
- dt = [dt[i] for i in dtind[0:maxDet]]
- iscrowd = [int(o['iscrowd']) for o in gt]
- # load computed ious
- ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
-
- T = len(p.iouThrs)
- G = len(gt)
- D = len(dt)
- gtm = np.zeros((T,G))
- dtm = np.zeros((T,D))
- gtIg = np.array([g['_ignore'] for g in gt])
- dtIg = np.zeros((T,D))
- if not len(ious)==0:
- for tind, t in enumerate(p.iouThrs):
- for dind, d in enumerate(dt):
- # information about best match so far (m=-1 -> unmatched)
- iou = min([t,1-1e-10])
- m = -1
- for gind, g in enumerate(gt):
- # if this gt already matched, and not a crowd, continue
- if gtm[tind,gind]>0 and not iscrowd[gind]:
- continue
- # if dt matched to reg gt, and on ignore gt, stop
- if m>-1 and gtIg[m]==0 and gtIg[gind]==1:
- break
- # continue to next gt unless better match made
- if ious[dind,gind] < iou:
- continue
- # if match successful and best so far, store appropriately
- iou=ious[dind,gind]
- m=gind
- # if match made store id of match for both dt and gt
- if m ==-1:
- continue
- dtIg[tind,dind] = gtIg[m]
- dtm[tind,dind] = gt[m]['id']
- gtm[tind,m] = d['id']
- # set unmatched detections outside of area range to ignore
- a = np.array([d['area']aRng[1] for d in dt]).reshape((1, len(dt)))
- dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))
- # store results for given image and category
- return {
- 'image_id': imgId,
- 'category_id': catId,
- 'aRng': aRng,
- 'maxDet': maxDet,
- 'dtIds': [d['id'] for d in dt],
- 'gtIds': [g['id'] for g in gt],
- 'dtMatches': dtm,
- 'gtMatches': gtm,
- 'dtScores': [d['score'] for d in dt],
- 'gtIgnore': gtIg,
- 'dtIgnore': dtIg,
- }
-
- def accumulate(self, p = None):
- '''
- Accumulate per image evaluation results and store the result in self.eval
- :param p: input params for evaluation
- :return: None
- '''
- print('Accumulating evaluation results...')
- tic = time.time()
- if not self.evalImgs:
- print('Please run evaluate() first')
- # allows input customized parameters
- if p is None:
- p = self.params
- p.catIds = p.catIds if p.useCats == 1 else [-1]
- T = len(p.iouThrs)
- R = len(p.recThrs)
- K = len(p.catIds) if p.useCats else 1
- A = len(p.areaRng)
- M = len(p.maxDets)
- precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories
- recall = -np.ones((T,K,A,M))
- scores = -np.ones((T,R,K,A,M))
-
- # create dictionary for future indexing
- _pe = self._paramsEval
- catIds = _pe.catIds if _pe.useCats else [-1]
- setK = set(catIds)
- setA = set(map(tuple, _pe.areaRng))
- setM = set(_pe.maxDets)
- setI = set(_pe.imgIds)
- # get inds to evaluate
- k_list = [n for n, k in enumerate(p.catIds) if k in setK]
- m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
- a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
- i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
- I0 = len(_pe.imgIds)
- A0 = len(_pe.areaRng)
- # retrieve E at each category, area range, and max number of detections
- for k, k0 in enumerate(k_list):
- Nk = k0*A0*I0
- for a, a0 in enumerate(a_list):
- Na = a0*I0
- for m, maxDet in enumerate(m_list):
- E = [self.evalImgs[Nk + Na + i] for i in i_list]
- E = [e for e in E if not e is None]
- if len(E) == 0:
- continue
- dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
-
- # different sorting method generates slightly different results.
- # mergesort is used to be consistent as Matlab implementation.
- inds = np.argsort(-dtScores, kind='mergesort')
- dtScoresSorted = dtScores[inds]
-
- dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]
- dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]
- gtIg = np.concatenate([e['gtIgnore'] for e in E])
- npig = np.count_nonzero(gtIg==0 )
- if npig == 0:
- continue
- tps = np.logical_and( dtm, np.logical_not(dtIg) )
- fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )
-
- tp_sum = np.cumsum(tps, axis=1).astype(dtype=float)
- fp_sum = np.cumsum(fps, axis=1).astype(dtype=float)
- for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
- tp = np.array(tp)
- fp = np.array(fp)
- nd = len(tp)
- rc = tp / npig
- pr = tp / (fp+tp+np.spacing(1))
- q = np.zeros((R,))
- ss = np.zeros((R,))
-
- if nd:
- recall[t,k,a,m] = rc[-1]
- else:
- recall[t,k,a,m] = 0
-
- # numpy is slow without cython optimization for accessing elements
- # use python array gets significant speed improvement
- pr = pr.tolist(); q = q.tolist()
-
- for i in range(nd-1, 0, -1):
- if pr[i] > pr[i-1]:
- pr[i-1] = pr[i]
-
- inds = np.searchsorted(rc, p.recThrs, side='left')
- try:
- for ri, pi in enumerate(inds):
- q[ri] = pr[pi]
- ss[ri] = dtScoresSorted[pi]
- except:
- pass
- precision[t,:,k,a,m] = np.array(q)
- scores[t,:,k,a,m] = np.array(ss)
- self.eval = {
- 'params': p,
- 'counts': [T, R, K, A, M],
- 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
- 'precision': precision,
- 'recall': recall,
- 'scores': scores,
- }
- toc = time.time()
- print('DONE (t={:0.2f}s).'.format( toc-tic))
-
- def summarize(self):
- '''
- Compute and display summary metrics for evaluation results.
- Note this functin can *only* be applied on the default parameter setting
- '''
- def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):
- p = self.params
- iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
- titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
- typeStr = '(AP)' if ap==1 else '(AR)'
- iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
- if iouThr is None else '{:0.2f}'.format(iouThr)
-
- aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
- mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
- if ap == 1:
- # dimension of precision: [TxRxKxAxM]
- s = self.eval['precision']
- # IoU
- if iouThr is not None:
- t = np.where(iouThr == p.iouThrs)[0]
- s = s[t]
- s = s[:,:,:,aind,mind]
- else:
- # dimension of recall: [TxKxAxM]
- s = self.eval['recall']
- if iouThr is not None:
- t = np.where(iouThr == p.iouThrs)[0]
- s = s[t]
- s = s[:,:,aind,mind]
- if len(s[s>-1])==0:
- mean_s = -1
- else:
- mean_s = np.mean(s[s>-1])
- print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
- return mean_s
- def _summarizeDets():
- stats = np.zeros((12,))
- stats[0] = _summarize(1)
- stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
- stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])
- stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])
- stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])
- stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])
- stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
- stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
- stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
- stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])
- stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])
- stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])
- return stats
- def _summarizeKps():
- stats = np.zeros((10,))
- stats[0] = _summarize(1, maxDets=20)
- stats[1] = _summarize(1, maxDets=20, iouThr=.5)
- stats[2] = _summarize(1, maxDets=20, iouThr=.75)
- stats[3] = _summarize(1, maxDets=20, areaRng='medium')
- stats[4] = _summarize(1, maxDets=20, areaRng='large')
- stats[5] = _summarize(0, maxDets=20)
- stats[6] = _summarize(0, maxDets=20, iouThr=.5)
- stats[7] = _summarize(0, maxDets=20, iouThr=.75)
- stats[8] = _summarize(0, maxDets=20, areaRng='medium')
- stats[9] = _summarize(0, maxDets=20, areaRng='large')
- return stats
- if not self.eval:
- raise Exception('Please run accumulate() first')
- iouType = self.params.iouType
- if iouType == 'segm' or iouType == 'bbox':
- summarize = _summarizeDets
- elif iouType == 'keypoints':
- summarize = _summarizeKps
- self.stats = summarize()
-
- def __str__(self):
- self.summarize()
-
-class Params:
- '''
- Params for coco evaluation api
- '''
- def setDetParams(self):
- self.imgIds = []
- self.catIds = []
- # np.arange causes trouble. the data point on arange is slightly larger than the true value
- self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
- self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
- self.maxDets = [1, 10, 100]
- self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
- self.areaRngLbl = ['all', 'small', 'medium', 'large']
- self.useCats = 1
-
- def setKpParams(self):
- self.imgIds = []
- self.catIds = []
- # np.arange causes trouble. the data point on arange is slightly larger than the true value
- self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
- self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
- self.maxDets = [20]
- self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
- self.areaRngLbl = ['all', 'medium', 'large']
- self.useCats = 1
- self.kpt_oks_sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0
-
- def __init__(self, iouType='segm'):
- if iouType == 'segm' or iouType == 'bbox':
- self.setDetParams()
- elif iouType == 'keypoints':
- self.setKpParams()
- else:
- raise Exception('iouType not supported')
- self.iouType = iouType
- # useSegm is deprecated
- self.useSegm = None
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py
deleted file mode 100644
index be777123a886503172a95fe0719e956a147bbd68..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained='open-mmlab://resnet50_v1c',
- backbone=dict(
- type='ResNetV1c',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 2, 4),
- strides=(1, 2, 1, 1),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='EncHead',
- in_channels=[512, 1024, 2048],
- in_index=(1, 2, 3),
- channels=512,
- num_codes=32,
- use_se_loss=True,
- add_lateral=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_se_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py
deleted file mode 100644
index 6927ea7a83ac9309e5f883ee974a5dcfa8a2aa3b..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python3
-from __future__ import print_function
-
-import roslib
-#roslib.load_manifest('my_package')
-import sys
-import rospy
-import cv2
-import numpy as np
-from std_msgs.msg import String
-from sensor_msgs.msg import Image
-from cv_bridge import CvBridge, CvBridgeError
-
-class video_show:
-
- def __init__(self):
- self.show_output = rospy.get_param('~show_output', True)
- self.save_output = rospy.get_param('~save_output', False)
- self.output_video_file = rospy.get_param('~output_video_file','result.mp4')
- # rospy.loginfo(f"Listener - params: show_output={self.show_output}, save_output={self.save_output}, output_video_file={self.output_video_file}")
-
- self.bridge = CvBridge()
- self.image_sub = rospy.Subscriber("midas_topic", Image, self.callback)
-
- def callback(self, data):
- try:
- cv_image = self.bridge.imgmsg_to_cv2(data)
- except CvBridgeError as e:
- print(e)
- return
-
- if cv_image.size == 0:
- return
-
- rospy.loginfo("Listener: Received new frame")
- cv_image = cv_image.astype("uint8")
-
- if self.show_output==True:
- cv2.imshow("video_show", cv_image)
- cv2.waitKey(10)
-
- if self.save_output==True:
- if self.video_writer_init==False:
- fourcc = cv2.VideoWriter_fourcc(*'XVID')
- self.out = cv2.VideoWriter(self.output_video_file, fourcc, 25, (cv_image.shape[1], cv_image.shape[0]))
-
- self.out.write(cv_image)
-
-
-
-def main(args):
- rospy.init_node('listener', anonymous=True)
- ic = video_show()
- try:
- rospy.spin()
- except KeyboardInterrupt:
- print("Shutting down")
- cv2.destroyAllWindows()
-
-if __name__ == '__main__':
- main(sys.argv)
\ No newline at end of file
diff --git a/spaces/corpvs/test/style.css b/spaces/corpvs/test/style.css
deleted file mode 100644
index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000
--- a/spaces/corpvs/test/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/crashedice/signify/SOURCE/yolo_files/utils/wandb_logging/wandb_utils.py b/spaces/crashedice/signify/SOURCE/yolo_files/utils/wandb_logging/wandb_utils.py
deleted file mode 100644
index 4a676551fdeffbadb75b8d651215eb17773f954a..0000000000000000000000000000000000000000
--- a/spaces/crashedice/signify/SOURCE/yolo_files/utils/wandb_logging/wandb_utils.py
+++ /dev/null
@@ -1,302 +0,0 @@
-import json
-import sys
-from pathlib import Path
-
-import torch
-import yaml
-from tqdm import tqdm
-
-sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path
-from utils.datasets import LoadImagesAndLabels
-from utils.datasets import img2label_paths
-from utils.general import colorstr, xywh2xyxy, check_dataset, check_file
-
-try:
- import wandb
- from wandb import init, finish
-except ImportError:
- wandb = None
-
-WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
-
-
-def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):
- return from_string[len(prefix):]
-
-
-def check_wandb_config_file(data_config_file):
- wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path
- if Path(wandb_config).is_file():
- return wandb_config
- return data_config_file
-
-
-def get_run_info(run_path):
- run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))
- run_id = run_path.stem
- project = run_path.parent.stem
- model_artifact_name = 'run_' + run_id + '_model'
- return run_id, project, model_artifact_name
-
-
-def check_wandb_resume(opt):
- process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None
- if isinstance(opt.resume, str):
- if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
- if opt.global_rank not in [-1, 0]: # For resuming DDP runs
- run_id, project, model_artifact_name = get_run_info(opt.resume)
- api = wandb.Api()
- artifact = api.artifact(project + '/' + model_artifact_name + ':latest')
- modeldir = artifact.download()
- opt.weights = str(Path(modeldir) / "last.pt")
- return True
- return None
-
-
-def process_wandb_config_ddp_mode(opt):
- with open(check_file(opt.data)) as f:
- data_dict = yaml.safe_load(f) # data dict
- train_dir, val_dir = None, None
- if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
- api = wandb.Api()
- train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias)
- train_dir = train_artifact.download()
- train_path = Path(train_dir) / 'data/images/'
- data_dict['train'] = str(train_path)
-
- if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX):
- api = wandb.Api()
- val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias)
- val_dir = val_artifact.download()
- val_path = Path(val_dir) / 'data/images/'
- data_dict['val'] = str(val_path)
- if train_dir or val_dir:
- ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml')
- with open(ddp_data_path, 'w') as f:
- yaml.safe_dump(data_dict, f)
- opt.data = ddp_data_path
-
-
-class WandbLogger():
- def __init__(self, opt, name, run_id, data_dict, job_type='Training'):
- # Pre-training routine --
- self.job_type = job_type
- self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict
- # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call
- if isinstance(opt.resume, str): # checks resume from artifact
- if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
- run_id, project, model_artifact_name = get_run_info(opt.resume)
- model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name
- assert wandb, 'install wandb to resume wandb runs'
- # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config
- self.wandb_run = wandb.init(id=run_id, project=project, resume='allow')
- opt.resume = model_artifact_name
- elif self.wandb:
- self.wandb_run = wandb.init(config=opt,
- resume="allow",
- project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
- name=name,
- job_type=job_type,
- id=run_id) if not wandb.run else wandb.run
- if self.wandb_run:
- if self.job_type == 'Training':
- if not opt.resume:
- wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict
- # Info useful for resuming from artifacts
- self.wandb_run.config.opt = vars(opt)
- self.wandb_run.config.data_dict = wandb_data_dict
- self.data_dict = self.setup_training(opt, data_dict)
- if self.job_type == 'Dataset Creation':
- self.data_dict = self.check_and_upload_dataset(opt)
- else:
- prefix = colorstr('wandb: ')
- print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)")
-
- def check_and_upload_dataset(self, opt):
- assert wandb, 'Install wandb to upload dataset'
- check_dataset(self.data_dict)
- config_path = self.log_dataset_artifact(check_file(opt.data),
- opt.single_cls,
- 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
- print("Created dataset config file ", config_path)
- with open(config_path) as f:
- wandb_data_dict = yaml.safe_load(f)
- return wandb_data_dict
-
- def setup_training(self, opt, data_dict):
- self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants
- self.bbox_interval = opt.bbox_interval
- if isinstance(opt.resume, str):
- modeldir, _ = self.download_model_artifact(opt)
- if modeldir:
- self.weights = Path(modeldir) / "last.pt"
- config = self.wandb_run.config
- opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str(
- self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \
- config.opt['hyp']
- data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume
- if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download
- self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'),
- opt.artifact_alias)
- self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'),
- opt.artifact_alias)
- self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None
- if self.train_artifact_path is not None:
- train_path = Path(self.train_artifact_path) / 'data/images/'
- data_dict['train'] = str(train_path)
- if self.val_artifact_path is not None:
- val_path = Path(self.val_artifact_path) / 'data/images/'
- data_dict['val'] = str(val_path)
- self.val_table = self.val_artifact.get("val")
- self.map_val_table_path()
- if self.val_artifact is not None:
- self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
- self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"])
- if opt.bbox_interval == -1:
- self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
- return data_dict
-
- def download_dataset_artifact(self, path, alias):
- if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX):
- dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias)
- assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'"
- datadir = dataset_artifact.download()
- return datadir, dataset_artifact
- return None, None
-
- def download_model_artifact(self, opt):
- if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
- model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest")
- assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist'
- modeldir = model_artifact.download()
- epochs_trained = model_artifact.metadata.get('epochs_trained')
- total_epochs = model_artifact.metadata.get('total_epochs')
- assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % (
- total_epochs)
- return modeldir, model_artifact
- return None, None
-
- def log_model(self, path, opt, epoch, fitness_score, best_model=False):
- model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
- 'original_url': str(path),
- 'epochs_trained': epoch + 1,
- 'save period': opt.save_period,
- 'project': opt.project,
- 'total_epochs': opt.epochs,
- 'fitness_score': fitness_score
- })
- model_artifact.add_file(str(path / 'last.pt'), name='last.pt')
- wandb.log_artifact(model_artifact,
- aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else ''])
- print("Saving model artifact on epoch ", epoch + 1)
-
- def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
- with open(data_file) as f:
- data = yaml.safe_load(f) # data dict
- nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
- names = {k: v for k, v in enumerate(names)} # to index dictionary
- self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(
- data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None
- self.val_artifact = self.create_dataset_table(LoadImagesAndLabels(
- data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None
- if data.get('train'):
- data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train')
- if data.get('val'):
- data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val')
- path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path
- data.pop('download', None)
- with open(path, 'w') as f:
- yaml.safe_dump(data, f)
-
- if self.job_type == 'Training': # builds correct artifact pipeline graph
- self.wandb_run.use_artifact(self.val_artifact)
- self.wandb_run.use_artifact(self.train_artifact)
- self.val_artifact.wait()
- self.val_table = self.val_artifact.get('val')
- self.map_val_table_path()
- else:
- self.wandb_run.log_artifact(self.train_artifact)
- self.wandb_run.log_artifact(self.val_artifact)
- return path
-
- def map_val_table_path(self):
- self.val_table_map = {}
- print("Mapping dataset")
- for i, data in enumerate(tqdm(self.val_table.data)):
- self.val_table_map[data[3]] = data[0]
-
- def create_dataset_table(self, dataset, class_to_id, name='dataset'):
- # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging
- artifact = wandb.Artifact(name=name, type="dataset")
- img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None
- img_files = tqdm(dataset.img_files) if not img_files else img_files
- for img_file in img_files:
- if Path(img_file).is_dir():
- artifact.add_dir(img_file, name='data/images')
- labels_path = 'labels'.join(dataset.path.rsplit('images', 1))
- artifact.add_dir(labels_path, name='data/labels')
- else:
- artifact.add_file(img_file, name='data/images/' + Path(img_file).name)
- label_file = Path(img2label_paths([img_file])[0])
- artifact.add_file(str(label_file),
- name='data/labels/' + label_file.name) if label_file.exists() else None
- table = wandb.Table(columns=["id", "train_image", "Classes", "name"])
- class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()])
- for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):
- box_data, img_classes = [], {}
- for cls, *xywh in labels[:, 1:].tolist():
- cls = int(cls)
- box_data.append({"position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]},
- "class_id": cls,
- "box_caption": "%s" % (class_to_id[cls])})
- img_classes[cls] = class_to_id[cls]
- boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space
- table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes),
- Path(paths).name)
- artifact.add(table, name)
- return artifact
-
- def log_training_progress(self, predn, path, names):
- if self.val_table and self.result_table:
- class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
- box_data = []
- total_conf = 0
- for *xyxy, conf, cls in predn.tolist():
- if conf >= 0.25:
- box_data.append(
- {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
- "class_id": int(cls),
- "box_caption": "%s %.3f" % (names[cls], conf),
- "scores": {"class_score": conf},
- "domain": "pixel"})
- total_conf = total_conf + conf
- boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
- id = self.val_table_map[Path(path).name]
- self.result_table.add_data(self.current_epoch,
- id,
- wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set),
- total_conf / max(1, len(box_data))
- )
-
- def log(self, log_dict):
- if self.wandb_run:
- for key, value in log_dict.items():
- self.log_dict[key] = value
-
- def end_epoch(self, best_result=False):
- if self.wandb_run:
- wandb.log(self.log_dict)
- self.log_dict = {}
- if self.result_artifact:
- train_results = wandb.JoinedTable(self.val_table, self.result_table, "id")
- self.result_artifact.add(train_results, 'result')
- wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch),
- ('best' if best_result else '')])
- self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"])
- self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
-
- def finish_run(self):
- if self.wandb_run:
- if self.log_dict:
- wandb.log(self.log_dict)
- wandb.run.finish()
diff --git a/spaces/crashedice/signify/setup.py b/spaces/crashedice/signify/setup.py
deleted file mode 100644
index e3281ae9bd7b98568e77014dba1b7b353d409205..0000000000000000000000000000000000000000
--- a/spaces/crashedice/signify/setup.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from pkg_resources import parse_version
-from configparser import ConfigParser
-import setuptools, shlex
-assert parse_version(setuptools.__version__)>=parse_version('36.2')
-
-# note: all settings are in settings.ini; edit there, not here
-config = ConfigParser(delimiters=['='])
-config.read('settings.ini', encoding='utf-8')
-cfg = config['DEFAULT']
-
-cfg_keys = 'version description keywords author author_email'.split()
-expected = cfg_keys + "lib_name user branch license status min_python audience language".split()
-for o in expected: assert o in cfg, "missing expected setting: {}".format(o)
-setup_cfg = {o:cfg[o] for o in cfg_keys}
-
-licenses = {
- 'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
- 'mit': ('MIT License', 'OSI Approved :: MIT License'),
- 'gpl2': ('GNU General Public License v2', 'OSI Approved :: GNU General Public License v2 (GPLv2)'),
- 'gpl3': ('GNU General Public License v3', 'OSI Approved :: GNU General Public License v3 (GPLv3)'),
- 'bsd3': ('BSD License', 'OSI Approved :: BSD License'),
-}
-statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
- '4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
-py_versions = '3.6 3.7 3.8 3.9 3.10'.split()
-
-requirements = shlex.split(cfg.get('requirements', ''))
-if cfg.get('pip_requirements'): requirements += shlex.split(cfg.get('pip_requirements', ''))
-min_python = cfg['min_python']
-lic = licenses.get(cfg['license'].lower(), (cfg['license'], None))
-dev_requirements = (cfg.get('dev_requirements') or '').split()
-
-setuptools.setup(
- name = cfg['lib_name'],
- license = lic[0],
- classifiers = [
- 'Development Status :: ' + statuses[int(cfg['status'])],
- 'Intended Audience :: ' + cfg['audience'].title(),
- 'Natural Language :: ' + cfg['language'].title(),
- ] + ['Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):]] + (['License :: ' + lic[1] ] if lic[1] else []),
- url = cfg['git_url'],
- packages = setuptools.find_packages(),
- include_package_data = True,
- install_requires = requirements,
- extras_require={ 'dev': dev_requirements },
- dependency_links = cfg.get('dep_links','').split(),
- python_requires = '>=' + cfg['min_python'],
- long_description = open('README.md', encoding='utf-8').read(),
- long_description_content_type = 'text/markdown',
- zip_safe = False,
- entry_points = {
- 'console_scripts': cfg.get('console_scripts','').split(),
- 'nbdev': [f'{cfg.get("lib_path")}={cfg.get("lib_path")}._modidx:d']
- },
- **setup_cfg)
-
-
diff --git a/spaces/crashedice/signify/signify/gan/data/__init__.py b/spaces/crashedice/signify/signify/gan/data/__init__.py
deleted file mode 100644
index 04a5ed0f602e69614778f83e1c384d489330c046..0000000000000000000000000000000000000000
--- a/spaces/crashedice/signify/signify/gan/data/__init__.py
+++ /dev/null
@@ -1,93 +0,0 @@
-"""This package includes all the modules related to data loading and preprocessing
-
- To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
- You need to implement four functions:
- -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
- -- <__len__>: return the size of dataset.
- -- <__getitem__>: get a data point from data loader.
- -- : (optionally) add dataset-specific options and set default options.
-
-Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
-See our template dataset class 'template_dataset.py' for more details.
-"""
-import importlib
-import torch.utils.data
-from signify.gan.data.base_dataset import BaseDataset
-
-
-def find_dataset_using_name(dataset_name):
- """Import the module "data/[dataset_name]_dataset.py".
-
- In the file, the class called DatasetNameDataset() will
- be instantiated. It has to be a subclass of BaseDataset,
- and it is case-insensitive.
- """
- dataset_filename = "signify.gan.data." + dataset_name + "_dataset"
- datasetlib = importlib.import_module(dataset_filename)
-
- dataset = None
- target_dataset_name = dataset_name.replace('_', '') + 'dataset'
- for name, cls in datasetlib.__dict__.items():
- if name.lower() == target_dataset_name.lower() \
- and issubclass(cls, BaseDataset):
- dataset = cls
-
- if dataset is None:
- raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
-
- return dataset
-
-
-def get_option_setter(dataset_name):
- """Return the static method of the dataset class."""
- dataset_class = find_dataset_using_name(dataset_name)
- return dataset_class.modify_commandline_options
-
-
-def create_dataset(opt):
- """Create a dataset given the option.
-
- This function wraps the class CustomDatasetDataLoader.
- This is the main interface between this package and 'train.py'/'test.py'
-
- Example:
- >>> from data import create_dataset
- >>> dataset = create_dataset(opt)
- """
- data_loader = CustomDatasetDataLoader(opt)
- dataset = data_loader.load_data()
- return dataset
-
-
-class CustomDatasetDataLoader():
- """Wrapper class of Dataset class that performs multi-threaded data loading"""
-
- def __init__(self, opt):
- """Initialize this class
-
- Step 1: create a dataset instance given the name [dataset_mode]
- Step 2: create a multi-threaded data loader.
- """
- self.opt = opt
- dataset_class = find_dataset_using_name(opt.dataset_mode)
- self.dataset = dataset_class(opt)
- print("dataset [%s] was created" % type(self.dataset).__name__)
- self.dataloader = torch.utils.data.DataLoader(
- self.dataset,
- batch_size=opt.batch_size,
- shuffle=not opt.serial_batches,
- num_workers=int(opt.num_threads))
-
- def load_data(self):
- return self
-
- def __len__(self):
- """Return the number of data in the dataset"""
- return min(len(self.dataset), self.opt.max_dataset_size)
-
- def __iter__(self):
- """Return a batch of data"""
- for i, data in enumerate(self.dataloader):
- if i * self.opt.batch_size >= self.opt.max_dataset_size:
- break
- yield data
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/FliImagePlugin.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/FliImagePlugin.py
deleted file mode 100644
index f4e89a03e0263bc6c1d318b379fdcfe7f61f8588..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/FliImagePlugin.py
+++ /dev/null
@@ -1,171 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# FLI/FLC file handling.
-#
-# History:
-# 95-09-01 fl Created
-# 97-01-03 fl Fixed parser, setup decoder tile
-# 98-07-15 fl Renamed offset attribute to avoid name clash
-#
-# Copyright (c) Secret Labs AB 1997-98.
-# Copyright (c) Fredrik Lundh 1995-97.
-#
-# See the README file for information on usage and redistribution.
-#
-
-import os
-
-from . import Image, ImageFile, ImagePalette
-from ._binary import i16le as i16
-from ._binary import i32le as i32
-from ._binary import o8
-
-#
-# decoder
-
-
-def _accept(prefix):
- return (
- len(prefix) >= 6
- and i16(prefix, 4) in [0xAF11, 0xAF12]
- and i16(prefix, 14) in [0, 3] # flags
- )
-
-
-##
-# Image plugin for the FLI/FLC animation format. Use the seek
-# method to load individual frames.
-
-
-class FliImageFile(ImageFile.ImageFile):
- format = "FLI"
- format_description = "Autodesk FLI/FLC Animation"
- _close_exclusive_fp_after_loading = False
-
- def _open(self):
- # HEAD
- s = self.fp.read(128)
- if not (_accept(s) and s[20:22] == b"\x00\x00"):
- msg = "not an FLI/FLC file"
- raise SyntaxError(msg)
-
- # frames
- self.n_frames = i16(s, 6)
- self.is_animated = self.n_frames > 1
-
- # image characteristics
- self.mode = "P"
- self._size = i16(s, 8), i16(s, 10)
-
- # animation speed
- duration = i32(s, 16)
- magic = i16(s, 4)
- if magic == 0xAF11:
- duration = (duration * 1000) // 70
- self.info["duration"] = duration
-
- # look for palette
- palette = [(a, a, a) for a in range(256)]
-
- s = self.fp.read(16)
-
- self.__offset = 128
-
- if i16(s, 4) == 0xF100:
- # prefix chunk; ignore it
- self.__offset = self.__offset + i32(s)
- s = self.fp.read(16)
-
- if i16(s, 4) == 0xF1FA:
- # look for palette chunk
- number_of_subchunks = i16(s, 6)
- chunk_size = None
- for _ in range(number_of_subchunks):
- if chunk_size is not None:
- self.fp.seek(chunk_size - 6, os.SEEK_CUR)
- s = self.fp.read(6)
- chunk_type = i16(s, 4)
- if chunk_type in (4, 11):
- self._palette(palette, 2 if chunk_type == 11 else 0)
- break
- chunk_size = i32(s)
- if not chunk_size:
- break
-
- palette = [o8(r) + o8(g) + o8(b) for (r, g, b) in palette]
- self.palette = ImagePalette.raw("RGB", b"".join(palette))
-
- # set things up to decode first frame
- self.__frame = -1
- self._fp = self.fp
- self.__rewind = self.fp.tell()
- self.seek(0)
-
- def _palette(self, palette, shift):
- # load palette
-
- i = 0
- for e in range(i16(self.fp.read(2))):
- s = self.fp.read(2)
- i = i + s[0]
- n = s[1]
- if n == 0:
- n = 256
- s = self.fp.read(n * 3)
- for n in range(0, len(s), 3):
- r = s[n] << shift
- g = s[n + 1] << shift
- b = s[n + 2] << shift
- palette[i] = (r, g, b)
- i += 1
-
- def seek(self, frame):
- if not self._seek_check(frame):
- return
- if frame < self.__frame:
- self._seek(0)
-
- for f in range(self.__frame + 1, frame + 1):
- self._seek(f)
-
- def _seek(self, frame):
- if frame == 0:
- self.__frame = -1
- self._fp.seek(self.__rewind)
- self.__offset = 128
- else:
- # ensure that the previous frame was loaded
- self.load()
-
- if frame != self.__frame + 1:
- msg = f"cannot seek to frame {frame}"
- raise ValueError(msg)
- self.__frame = frame
-
- # move to next frame
- self.fp = self._fp
- self.fp.seek(self.__offset)
-
- s = self.fp.read(4)
- if not s:
- raise EOFError
-
- framesize = i32(s)
-
- self.decodermaxblock = framesize
- self.tile = [("fli", (0, 0) + self.size, self.__offset, None)]
-
- self.__offset += framesize
-
- def tell(self):
- return self.__frame
-
-
-#
-# registry
-
-Image.register_open(FliImageFile.format, FliImageFile, _accept)
-
-Image.register_extensions(FliImageFile.format, [".fli", ".flc"])
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/click/testing.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/click/testing.py
deleted file mode 100644
index e0df0d2a657fe19523957b85964b9956e5c78a30..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/click/testing.py
+++ /dev/null
@@ -1,479 +0,0 @@
-import contextlib
-import io
-import os
-import shlex
-import shutil
-import sys
-import tempfile
-import typing as t
-from types import TracebackType
-
-from . import formatting
-from . import termui
-from . import utils
-from ._compat import _find_binary_reader
-
-if t.TYPE_CHECKING:
- from .core import BaseCommand
-
-
-class EchoingStdin:
- def __init__(self, input: t.BinaryIO, output: t.BinaryIO) -> None:
- self._input = input
- self._output = output
- self._paused = False
-
- def __getattr__(self, x: str) -> t.Any:
- return getattr(self._input, x)
-
- def _echo(self, rv: bytes) -> bytes:
- if not self._paused:
- self._output.write(rv)
-
- return rv
-
- def read(self, n: int = -1) -> bytes:
- return self._echo(self._input.read(n))
-
- def read1(self, n: int = -1) -> bytes:
- return self._echo(self._input.read1(n)) # type: ignore
-
- def readline(self, n: int = -1) -> bytes:
- return self._echo(self._input.readline(n))
-
- def readlines(self) -> t.List[bytes]:
- return [self._echo(x) for x in self._input.readlines()]
-
- def __iter__(self) -> t.Iterator[bytes]:
- return iter(self._echo(x) for x in self._input)
-
- def __repr__(self) -> str:
- return repr(self._input)
-
-
-@contextlib.contextmanager
-def _pause_echo(stream: t.Optional[EchoingStdin]) -> t.Iterator[None]:
- if stream is None:
- yield
- else:
- stream._paused = True
- yield
- stream._paused = False
-
-
-class _NamedTextIOWrapper(io.TextIOWrapper):
- def __init__(
- self, buffer: t.BinaryIO, name: str, mode: str, **kwargs: t.Any
- ) -> None:
- super().__init__(buffer, **kwargs)
- self._name = name
- self._mode = mode
-
- @property
- def name(self) -> str:
- return self._name
-
- @property
- def mode(self) -> str:
- return self._mode
-
-
-def make_input_stream(
- input: t.Optional[t.Union[str, bytes, t.IO[t.Any]]], charset: str
-) -> t.BinaryIO:
- # Is already an input stream.
- if hasattr(input, "read"):
- rv = _find_binary_reader(t.cast(t.IO[t.Any], input))
-
- if rv is not None:
- return rv
-
- raise TypeError("Could not find binary reader for input stream.")
-
- if input is None:
- input = b""
- elif isinstance(input, str):
- input = input.encode(charset)
-
- return io.BytesIO(input)
-
-
-class Result:
- """Holds the captured result of an invoked CLI script."""
-
- def __init__(
- self,
- runner: "CliRunner",
- stdout_bytes: bytes,
- stderr_bytes: t.Optional[bytes],
- return_value: t.Any,
- exit_code: int,
- exception: t.Optional[BaseException],
- exc_info: t.Optional[
- t.Tuple[t.Type[BaseException], BaseException, TracebackType]
- ] = None,
- ):
- #: The runner that created the result
- self.runner = runner
- #: The standard output as bytes.
- self.stdout_bytes = stdout_bytes
- #: The standard error as bytes, or None if not available
- self.stderr_bytes = stderr_bytes
- #: The value returned from the invoked command.
- #:
- #: .. versionadded:: 8.0
- self.return_value = return_value
- #: The exit code as integer.
- self.exit_code = exit_code
- #: The exception that happened if one did.
- self.exception = exception
- #: The traceback
- self.exc_info = exc_info
-
- @property
- def output(self) -> str:
- """The (standard) output as unicode string."""
- return self.stdout
-
- @property
- def stdout(self) -> str:
- """The standard output as unicode string."""
- return self.stdout_bytes.decode(self.runner.charset, "replace").replace(
- "\r\n", "\n"
- )
-
- @property
- def stderr(self) -> str:
- """The standard error as unicode string."""
- if self.stderr_bytes is None:
- raise ValueError("stderr not separately captured")
- return self.stderr_bytes.decode(self.runner.charset, "replace").replace(
- "\r\n", "\n"
- )
-
- def __repr__(self) -> str:
- exc_str = repr(self.exception) if self.exception else "okay"
- return f"<{type(self).__name__} {exc_str}>"
-
-
-class CliRunner:
- """The CLI runner provides functionality to invoke a Click command line
- script for unittesting purposes in a isolated environment. This only
- works in single-threaded systems without any concurrency as it changes the
- global interpreter state.
-
- :param charset: the character set for the input and output data.
- :param env: a dictionary with environment variables for overriding.
- :param echo_stdin: if this is set to `True`, then reading from stdin writes
- to stdout. This is useful for showing examples in
- some circumstances. Note that regular prompts
- will automatically echo the input.
- :param mix_stderr: if this is set to `False`, then stdout and stderr are
- preserved as independent streams. This is useful for
- Unix-philosophy apps that have predictable stdout and
- noisy stderr, such that each may be measured
- independently
- """
-
- def __init__(
- self,
- charset: str = "utf-8",
- env: t.Optional[t.Mapping[str, t.Optional[str]]] = None,
- echo_stdin: bool = False,
- mix_stderr: bool = True,
- ) -> None:
- self.charset = charset
- self.env: t.Mapping[str, t.Optional[str]] = env or {}
- self.echo_stdin = echo_stdin
- self.mix_stderr = mix_stderr
-
- def get_default_prog_name(self, cli: "BaseCommand") -> str:
- """Given a command object it will return the default program name
- for it. The default is the `name` attribute or ``"root"`` if not
- set.
- """
- return cli.name or "root"
-
- def make_env(
- self, overrides: t.Optional[t.Mapping[str, t.Optional[str]]] = None
- ) -> t.Mapping[str, t.Optional[str]]:
- """Returns the environment overrides for invoking a script."""
- rv = dict(self.env)
- if overrides:
- rv.update(overrides)
- return rv
-
- @contextlib.contextmanager
- def isolation(
- self,
- input: t.Optional[t.Union[str, bytes, t.IO[t.Any]]] = None,
- env: t.Optional[t.Mapping[str, t.Optional[str]]] = None,
- color: bool = False,
- ) -> t.Iterator[t.Tuple[io.BytesIO, t.Optional[io.BytesIO]]]:
- """A context manager that sets up the isolation for invoking of a
- command line tool. This sets up stdin with the given input data
- and `os.environ` with the overrides from the given dictionary.
- This also rebinds some internals in Click to be mocked (like the
- prompt functionality).
-
- This is automatically done in the :meth:`invoke` method.
-
- :param input: the input stream to put into sys.stdin.
- :param env: the environment overrides as dictionary.
- :param color: whether the output should contain color codes. The
- application can still override this explicitly.
-
- .. versionchanged:: 8.0
- ``stderr`` is opened with ``errors="backslashreplace"``
- instead of the default ``"strict"``.
-
- .. versionchanged:: 4.0
- Added the ``color`` parameter.
- """
- bytes_input = make_input_stream(input, self.charset)
- echo_input = None
-
- old_stdin = sys.stdin
- old_stdout = sys.stdout
- old_stderr = sys.stderr
- old_forced_width = formatting.FORCED_WIDTH
- formatting.FORCED_WIDTH = 80
-
- env = self.make_env(env)
-
- bytes_output = io.BytesIO()
-
- if self.echo_stdin:
- bytes_input = echo_input = t.cast(
- t.BinaryIO, EchoingStdin(bytes_input, bytes_output)
- )
-
- sys.stdin = text_input = _NamedTextIOWrapper(
- bytes_input, encoding=self.charset, name="", mode="r"
- )
-
- if self.echo_stdin:
- # Force unbuffered reads, otherwise TextIOWrapper reads a
- # large chunk which is echoed early.
- text_input._CHUNK_SIZE = 1 # type: ignore
-
- sys.stdout = _NamedTextIOWrapper(
- bytes_output, encoding=self.charset, name="", mode="w"
- )
-
- bytes_error = None
- if self.mix_stderr:
- sys.stderr = sys.stdout
- else:
- bytes_error = io.BytesIO()
- sys.stderr = _NamedTextIOWrapper(
- bytes_error,
- encoding=self.charset,
- name="",
- mode="w",
- errors="backslashreplace",
- )
-
- @_pause_echo(echo_input) # type: ignore
- def visible_input(prompt: t.Optional[str] = None) -> str:
- sys.stdout.write(prompt or "")
- val = text_input.readline().rstrip("\r\n")
- sys.stdout.write(f"{val}\n")
- sys.stdout.flush()
- return val
-
- @_pause_echo(echo_input) # type: ignore
- def hidden_input(prompt: t.Optional[str] = None) -> str:
- sys.stdout.write(f"{prompt or ''}\n")
- sys.stdout.flush()
- return text_input.readline().rstrip("\r\n")
-
- @_pause_echo(echo_input) # type: ignore
- def _getchar(echo: bool) -> str:
- char = sys.stdin.read(1)
-
- if echo:
- sys.stdout.write(char)
-
- sys.stdout.flush()
- return char
-
- default_color = color
-
- def should_strip_ansi(
- stream: t.Optional[t.IO[t.Any]] = None, color: t.Optional[bool] = None
- ) -> bool:
- if color is None:
- return not default_color
- return not color
-
- old_visible_prompt_func = termui.visible_prompt_func
- old_hidden_prompt_func = termui.hidden_prompt_func
- old__getchar_func = termui._getchar
- old_should_strip_ansi = utils.should_strip_ansi # type: ignore
- termui.visible_prompt_func = visible_input
- termui.hidden_prompt_func = hidden_input
- termui._getchar = _getchar
- utils.should_strip_ansi = should_strip_ansi # type: ignore
-
- old_env = {}
- try:
- for key, value in env.items():
- old_env[key] = os.environ.get(key)
- if value is None:
- try:
- del os.environ[key]
- except Exception:
- pass
- else:
- os.environ[key] = value
- yield (bytes_output, bytes_error)
- finally:
- for key, value in old_env.items():
- if value is None:
- try:
- del os.environ[key]
- except Exception:
- pass
- else:
- os.environ[key] = value
- sys.stdout = old_stdout
- sys.stderr = old_stderr
- sys.stdin = old_stdin
- termui.visible_prompt_func = old_visible_prompt_func
- termui.hidden_prompt_func = old_hidden_prompt_func
- termui._getchar = old__getchar_func
- utils.should_strip_ansi = old_should_strip_ansi # type: ignore
- formatting.FORCED_WIDTH = old_forced_width
-
- def invoke(
- self,
- cli: "BaseCommand",
- args: t.Optional[t.Union[str, t.Sequence[str]]] = None,
- input: t.Optional[t.Union[str, bytes, t.IO[t.Any]]] = None,
- env: t.Optional[t.Mapping[str, t.Optional[str]]] = None,
- catch_exceptions: bool = True,
- color: bool = False,
- **extra: t.Any,
- ) -> Result:
- """Invokes a command in an isolated environment. The arguments are
- forwarded directly to the command line script, the `extra` keyword
- arguments are passed to the :meth:`~clickpkg.Command.main` function of
- the command.
-
- This returns a :class:`Result` object.
-
- :param cli: the command to invoke
- :param args: the arguments to invoke. It may be given as an iterable
- or a string. When given as string it will be interpreted
- as a Unix shell command. More details at
- :func:`shlex.split`.
- :param input: the input data for `sys.stdin`.
- :param env: the environment overrides.
- :param catch_exceptions: Whether to catch any other exceptions than
- ``SystemExit``.
- :param extra: the keyword arguments to pass to :meth:`main`.
- :param color: whether the output should contain color codes. The
- application can still override this explicitly.
-
- .. versionchanged:: 8.0
- The result object has the ``return_value`` attribute with
- the value returned from the invoked command.
-
- .. versionchanged:: 4.0
- Added the ``color`` parameter.
-
- .. versionchanged:: 3.0
- Added the ``catch_exceptions`` parameter.
-
- .. versionchanged:: 3.0
- The result object has the ``exc_info`` attribute with the
- traceback if available.
- """
- exc_info = None
- with self.isolation(input=input, env=env, color=color) as outstreams:
- return_value = None
- exception: t.Optional[BaseException] = None
- exit_code = 0
-
- if isinstance(args, str):
- args = shlex.split(args)
-
- try:
- prog_name = extra.pop("prog_name")
- except KeyError:
- prog_name = self.get_default_prog_name(cli)
-
- try:
- return_value = cli.main(args=args or (), prog_name=prog_name, **extra)
- except SystemExit as e:
- exc_info = sys.exc_info()
- e_code = t.cast(t.Optional[t.Union[int, t.Any]], e.code)
-
- if e_code is None:
- e_code = 0
-
- if e_code != 0:
- exception = e
-
- if not isinstance(e_code, int):
- sys.stdout.write(str(e_code))
- sys.stdout.write("\n")
- e_code = 1
-
- exit_code = e_code
-
- except Exception as e:
- if not catch_exceptions:
- raise
- exception = e
- exit_code = 1
- exc_info = sys.exc_info()
- finally:
- sys.stdout.flush()
- stdout = outstreams[0].getvalue()
- if self.mix_stderr:
- stderr = None
- else:
- stderr = outstreams[1].getvalue() # type: ignore
-
- return Result(
- runner=self,
- stdout_bytes=stdout,
- stderr_bytes=stderr,
- return_value=return_value,
- exit_code=exit_code,
- exception=exception,
- exc_info=exc_info, # type: ignore
- )
-
- @contextlib.contextmanager
- def isolated_filesystem(
- self, temp_dir: t.Optional[t.Union[str, "os.PathLike[str]"]] = None
- ) -> t.Iterator[str]:
- """A context manager that creates a temporary directory and
- changes the current working directory to it. This isolates tests
- that affect the contents of the CWD to prevent them from
- interfering with each other.
-
- :param temp_dir: Create the temporary directory under this
- directory. If given, the created directory is not removed
- when exiting.
-
- .. versionchanged:: 8.0
- Added the ``temp_dir`` parameter.
- """
- cwd = os.getcwd()
- dt = tempfile.mkdtemp(dir=temp_dir)
- os.chdir(dt)
-
- try:
- yield dt
- finally:
- os.chdir(cwd)
-
- if temp_dir is None:
- try:
- shutil.rmtree(dt)
- except OSError: # noqa: B014
- pass
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/cffLib/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/cffLib/__init__.py
deleted file mode 100644
index b5b859fc501b7168051337ba2c16c0c0c8a12a4a..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/cffLib/__init__.py
+++ /dev/null
@@ -1,3833 +0,0 @@
-"""cffLib: read/write Adobe CFF fonts
-
-OpenType fonts with PostScript outlines contain a completely independent
-font file, Adobe's *Compact Font Format*. So dealing with OpenType fonts
-requires also dealing with CFF. This module allows you to read and write
-fonts written in the CFF format.
-
-In 2016, OpenType 1.8 introduced the `CFF2 `_
-format which, along with other changes, extended the CFF format to deal with
-the demands of variable fonts. This module parses both original CFF and CFF2.
-
-"""
-
-from fontTools.misc import sstruct
-from fontTools.misc import psCharStrings
-from fontTools.misc.arrayTools import unionRect, intRect
-from fontTools.misc.textTools import (
- bytechr,
- byteord,
- bytesjoin,
- tobytes,
- tostr,
- safeEval,
-)
-from fontTools.ttLib import TTFont
-from fontTools.ttLib.tables.otBase import OTTableWriter
-from fontTools.ttLib.tables.otBase import OTTableReader
-from fontTools.ttLib.tables import otTables as ot
-from io import BytesIO
-import struct
-import logging
-import re
-
-# mute cffLib debug messages when running ttx in verbose mode
-DEBUG = logging.DEBUG - 1
-log = logging.getLogger(__name__)
-
-cffHeaderFormat = """
- major: B
- minor: B
- hdrSize: B
-"""
-
-maxStackLimit = 513
-# maxstack operator has been deprecated. max stack is now always 513.
-
-
-class StopHintCountEvent(Exception):
- pass
-
-
-class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler):
- stop_hintcount_ops = (
- "op_hintmask",
- "op_cntrmask",
- "op_rmoveto",
- "op_hmoveto",
- "op_vmoveto",
- )
-
- def __init__(self, localSubrs, globalSubrs, private=None):
- psCharStrings.SimpleT2Decompiler.__init__(
- self, localSubrs, globalSubrs, private
- )
-
- def execute(self, charString):
- self.need_hintcount = True # until proven otherwise
- for op_name in self.stop_hintcount_ops:
- setattr(self, op_name, self.stop_hint_count)
-
- if hasattr(charString, "_desubroutinized"):
- # If a charstring has already been desubroutinized, we will still
- # need to execute it if we need to count hints in order to
- # compute the byte length for mask arguments, and haven't finished
- # counting hints pairs.
- if self.need_hintcount and self.callingStack:
- try:
- psCharStrings.SimpleT2Decompiler.execute(self, charString)
- except StopHintCountEvent:
- del self.callingStack[-1]
- return
-
- charString._patches = []
- psCharStrings.SimpleT2Decompiler.execute(self, charString)
- desubroutinized = charString.program[:]
- for idx, expansion in reversed(charString._patches):
- assert idx >= 2
- assert desubroutinized[idx - 1] in [
- "callsubr",
- "callgsubr",
- ], desubroutinized[idx - 1]
- assert type(desubroutinized[idx - 2]) == int
- if expansion[-1] == "return":
- expansion = expansion[:-1]
- desubroutinized[idx - 2 : idx] = expansion
- if not self.private.in_cff2:
- if "endchar" in desubroutinized:
- # Cut off after first endchar
- desubroutinized = desubroutinized[
- : desubroutinized.index("endchar") + 1
- ]
- else:
- if not len(desubroutinized) or desubroutinized[-1] != "return":
- desubroutinized.append("return")
-
- charString._desubroutinized = desubroutinized
- del charString._patches
-
- def op_callsubr(self, index):
- subr = self.localSubrs[self.operandStack[-1] + self.localBias]
- psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
- self.processSubr(index, subr)
-
- def op_callgsubr(self, index):
- subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
- psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
- self.processSubr(index, subr)
-
- def stop_hint_count(self, *args):
- self.need_hintcount = False
- for op_name in self.stop_hintcount_ops:
- setattr(self, op_name, None)
- cs = self.callingStack[-1]
- if hasattr(cs, "_desubroutinized"):
- raise StopHintCountEvent()
-
- def op_hintmask(self, index):
- psCharStrings.SimpleT2Decompiler.op_hintmask(self, index)
- if self.need_hintcount:
- self.stop_hint_count()
-
- def processSubr(self, index, subr):
- cs = self.callingStack[-1]
- if not hasattr(cs, "_desubroutinized"):
- cs._patches.append((index, subr._desubroutinized))
-
-
-class CFFFontSet(object):
- """A CFF font "file" can contain more than one font, although this is
- extremely rare (and not allowed within OpenType fonts).
-
- This class is the entry point for parsing a CFF table. To actually
- manipulate the data inside the CFF font, you will want to access the
- ``CFFFontSet``'s :class:`TopDict` object. To do this, a ``CFFFontSet``
- object can either be treated as a dictionary (with appropriate
- ``keys()`` and ``values()`` methods) mapping font names to :class:`TopDict`
- objects, or as a list.
-
- .. code:: python
-
- from fontTools import ttLib
- tt = ttLib.TTFont("Tests/cffLib/data/LinLibertine_RBI.otf")
- tt["CFF "].cff
- #
- tt["CFF "].cff[0] # Here's your actual font data
- #
-
- """
-
- def decompile(self, file, otFont, isCFF2=None):
- """Parse a binary CFF file into an internal representation. ``file``
- should be a file handle object. ``otFont`` is the top-level
- :py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file.
-
- If ``isCFF2`` is passed and set to ``True`` or ``False``, then the
- library makes an assertion that the CFF header is of the appropriate
- version.
- """
-
- self.otFont = otFont
- sstruct.unpack(cffHeaderFormat, file.read(3), self)
- if isCFF2 is not None:
- # called from ttLib: assert 'major' as read from file matches the
- # expected version
- expected_major = 2 if isCFF2 else 1
- if self.major != expected_major:
- raise ValueError(
- "Invalid CFF 'major' version: expected %d, found %d"
- % (expected_major, self.major)
- )
- else:
- # use 'major' version from file to determine if isCFF2
- assert self.major in (1, 2), "Unknown CFF format"
- isCFF2 = self.major == 2
- if not isCFF2:
- self.offSize = struct.unpack("B", file.read(1))[0]
- file.seek(self.hdrSize)
- self.fontNames = list(tostr(s) for s in Index(file, isCFF2=isCFF2))
- self.topDictIndex = TopDictIndex(file, isCFF2=isCFF2)
- self.strings = IndexedStrings(file)
- else: # isCFF2
- self.topDictSize = struct.unpack(">H", file.read(2))[0]
- file.seek(self.hdrSize)
- self.fontNames = ["CFF2Font"]
- cff2GetGlyphOrder = otFont.getGlyphOrder
- # in CFF2, offsetSize is the size of the TopDict data.
- self.topDictIndex = TopDictIndex(
- file, cff2GetGlyphOrder, self.topDictSize, isCFF2=isCFF2
- )
- self.strings = None
- self.GlobalSubrs = GlobalSubrsIndex(file, isCFF2=isCFF2)
- self.topDictIndex.strings = self.strings
- self.topDictIndex.GlobalSubrs = self.GlobalSubrs
-
- def __len__(self):
- return len(self.fontNames)
-
- def keys(self):
- return list(self.fontNames)
-
- def values(self):
- return self.topDictIndex
-
- def __getitem__(self, nameOrIndex):
- """Return TopDict instance identified by name (str) or index (int
- or any object that implements `__index__`).
- """
- if hasattr(nameOrIndex, "__index__"):
- index = nameOrIndex.__index__()
- elif isinstance(nameOrIndex, str):
- name = nameOrIndex
- try:
- index = self.fontNames.index(name)
- except ValueError:
- raise KeyError(nameOrIndex)
- else:
- raise TypeError(nameOrIndex)
- return self.topDictIndex[index]
-
- def compile(self, file, otFont, isCFF2=None):
- """Write the object back into binary representation onto the given file.
- ``file`` should be a file handle object. ``otFont`` is the top-level
- :py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file.
-
- If ``isCFF2`` is passed and set to ``True`` or ``False``, then the
- library makes an assertion that the CFF header is of the appropriate
- version.
- """
- self.otFont = otFont
- if isCFF2 is not None:
- # called from ttLib: assert 'major' value matches expected version
- expected_major = 2 if isCFF2 else 1
- if self.major != expected_major:
- raise ValueError(
- "Invalid CFF 'major' version: expected %d, found %d"
- % (expected_major, self.major)
- )
- else:
- # use current 'major' value to determine output format
- assert self.major in (1, 2), "Unknown CFF format"
- isCFF2 = self.major == 2
-
- if otFont.recalcBBoxes and not isCFF2:
- for topDict in self.topDictIndex:
- topDict.recalcFontBBox()
-
- if not isCFF2:
- strings = IndexedStrings()
- else:
- strings = None
- writer = CFFWriter(isCFF2)
- topCompiler = self.topDictIndex.getCompiler(strings, self, isCFF2=isCFF2)
- if isCFF2:
- self.hdrSize = 5
- writer.add(sstruct.pack(cffHeaderFormat, self))
- # Note: topDictSize will most likely change in CFFWriter.toFile().
- self.topDictSize = topCompiler.getDataLength()
- writer.add(struct.pack(">H", self.topDictSize))
- else:
- self.hdrSize = 4
- self.offSize = 4 # will most likely change in CFFWriter.toFile().
- writer.add(sstruct.pack(cffHeaderFormat, self))
- writer.add(struct.pack("B", self.offSize))
- if not isCFF2:
- fontNames = Index()
- for name in self.fontNames:
- fontNames.append(name)
- writer.add(fontNames.getCompiler(strings, self, isCFF2=isCFF2))
- writer.add(topCompiler)
- if not isCFF2:
- writer.add(strings.getCompiler())
- writer.add(self.GlobalSubrs.getCompiler(strings, self, isCFF2=isCFF2))
-
- for topDict in self.topDictIndex:
- if not hasattr(topDict, "charset") or topDict.charset is None:
- charset = otFont.getGlyphOrder()
- topDict.charset = charset
- children = topCompiler.getChildren(strings)
- for child in children:
- writer.add(child)
-
- writer.toFile(file)
-
- def toXML(self, xmlWriter):
- """Write the object into XML representation onto the given
- :class:`fontTools.misc.xmlWriter.XMLWriter`.
-
- .. code:: python
-
- writer = xmlWriter.XMLWriter(sys.stdout)
- tt["CFF "].cff.toXML(writer)
-
- """
-
- xmlWriter.simpletag("major", value=self.major)
- xmlWriter.newline()
- xmlWriter.simpletag("minor", value=self.minor)
- xmlWriter.newline()
- for fontName in self.fontNames:
- xmlWriter.begintag("CFFFont", name=tostr(fontName))
- xmlWriter.newline()
- font = self[fontName]
- font.toXML(xmlWriter)
- xmlWriter.endtag("CFFFont")
- xmlWriter.newline()
- xmlWriter.newline()
- xmlWriter.begintag("GlobalSubrs")
- xmlWriter.newline()
- self.GlobalSubrs.toXML(xmlWriter)
- xmlWriter.endtag("GlobalSubrs")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, otFont=None):
- """Reads data from the XML element into the ``CFFFontSet`` object."""
- self.otFont = otFont
-
- # set defaults. These will be replaced if there are entries for them
- # in the XML file.
- if not hasattr(self, "major"):
- self.major = 1
- if not hasattr(self, "minor"):
- self.minor = 0
-
- if name == "CFFFont":
- if self.major == 1:
- if not hasattr(self, "offSize"):
- # this will be recalculated when the cff is compiled.
- self.offSize = 4
- if not hasattr(self, "hdrSize"):
- self.hdrSize = 4
- if not hasattr(self, "GlobalSubrs"):
- self.GlobalSubrs = GlobalSubrsIndex()
- if not hasattr(self, "fontNames"):
- self.fontNames = []
- self.topDictIndex = TopDictIndex()
- fontName = attrs["name"]
- self.fontNames.append(fontName)
- topDict = TopDict(GlobalSubrs=self.GlobalSubrs)
- topDict.charset = None # gets filled in later
- elif self.major == 2:
- if not hasattr(self, "hdrSize"):
- self.hdrSize = 5
- if not hasattr(self, "GlobalSubrs"):
- self.GlobalSubrs = GlobalSubrsIndex()
- if not hasattr(self, "fontNames"):
- self.fontNames = ["CFF2Font"]
- cff2GetGlyphOrder = self.otFont.getGlyphOrder
- topDict = TopDict(
- GlobalSubrs=self.GlobalSubrs, cff2GetGlyphOrder=cff2GetGlyphOrder
- )
- self.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder)
- self.topDictIndex.append(topDict)
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- topDict.fromXML(name, attrs, content)
-
- if hasattr(topDict, "VarStore") and topDict.FDArray[0].vstore is None:
- fdArray = topDict.FDArray
- for fontDict in fdArray:
- if hasattr(fontDict, "Private"):
- fontDict.Private.vstore = topDict.VarStore
-
- elif name == "GlobalSubrs":
- subrCharStringClass = psCharStrings.T2CharString
- if not hasattr(self, "GlobalSubrs"):
- self.GlobalSubrs = GlobalSubrsIndex()
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- subr = subrCharStringClass()
- subr.fromXML(name, attrs, content)
- self.GlobalSubrs.append(subr)
- elif name == "major":
- self.major = int(attrs["value"])
- elif name == "minor":
- self.minor = int(attrs["value"])
-
- def convertCFFToCFF2(self, otFont):
- """Converts this object from CFF format to CFF2 format. This conversion
- is done 'in-place'. The conversion cannot be reversed.
-
- This assumes a decompiled CFF table. (i.e. that the object has been
- filled via :meth:`decompile`.)"""
- self.major = 2
- cff2GetGlyphOrder = self.otFont.getGlyphOrder
- topDictData = TopDictIndex(None, cff2GetGlyphOrder)
- topDictData.items = self.topDictIndex.items
- self.topDictIndex = topDictData
- topDict = topDictData[0]
- if hasattr(topDict, "Private"):
- privateDict = topDict.Private
- else:
- privateDict = None
- opOrder = buildOrder(topDictOperators2)
- topDict.order = opOrder
- topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
- for entry in topDictOperators:
- key = entry[1]
- if key not in opOrder:
- if key in topDict.rawDict:
- del topDict.rawDict[key]
- if hasattr(topDict, key):
- delattr(topDict, key)
-
- if not hasattr(topDict, "FDArray"):
- fdArray = topDict.FDArray = FDArrayIndex()
- fdArray.strings = None
- fdArray.GlobalSubrs = topDict.GlobalSubrs
- topDict.GlobalSubrs.fdArray = fdArray
- charStrings = topDict.CharStrings
- if charStrings.charStringsAreIndexed:
- charStrings.charStringsIndex.fdArray = fdArray
- else:
- charStrings.fdArray = fdArray
- fontDict = FontDict()
- fontDict.setCFF2(True)
- fdArray.append(fontDict)
- fontDict.Private = privateDict
- privateOpOrder = buildOrder(privateDictOperators2)
- for entry in privateDictOperators:
- key = entry[1]
- if key not in privateOpOrder:
- if key in privateDict.rawDict:
- # print "Removing private dict", key
- del privateDict.rawDict[key]
- if hasattr(privateDict, key):
- delattr(privateDict, key)
- # print "Removing privateDict attr", key
- else:
- # clean up the PrivateDicts in the fdArray
- fdArray = topDict.FDArray
- privateOpOrder = buildOrder(privateDictOperators2)
- for fontDict in fdArray:
- fontDict.setCFF2(True)
- for key in fontDict.rawDict.keys():
- if key not in fontDict.order:
- del fontDict.rawDict[key]
- if hasattr(fontDict, key):
- delattr(fontDict, key)
-
- privateDict = fontDict.Private
- for entry in privateDictOperators:
- key = entry[1]
- if key not in privateOpOrder:
- if key in privateDict.rawDict:
- # print "Removing private dict", key
- del privateDict.rawDict[key]
- if hasattr(privateDict, key):
- delattr(privateDict, key)
- # print "Removing privateDict attr", key
- # At this point, the Subrs and Charstrings are all still T2Charstring class
- # easiest to fix this by compiling, then decompiling again
- file = BytesIO()
- self.compile(file, otFont, isCFF2=True)
- file.seek(0)
- self.decompile(file, otFont, isCFF2=True)
-
- def desubroutinize(self):
- for fontName in self.fontNames:
- font = self[fontName]
- cs = font.CharStrings
- for g in font.charset:
- c, _ = cs.getItemAndSelector(g)
- c.decompile()
- subrs = getattr(c.private, "Subrs", [])
- decompiler = _DesubroutinizingT2Decompiler(
- subrs, c.globalSubrs, c.private
- )
- decompiler.execute(c)
- c.program = c._desubroutinized
- del c._desubroutinized
- # Delete all the local subrs
- if hasattr(font, "FDArray"):
- for fd in font.FDArray:
- pd = fd.Private
- if hasattr(pd, "Subrs"):
- del pd.Subrs
- if "Subrs" in pd.rawDict:
- del pd.rawDict["Subrs"]
- else:
- pd = font.Private
- if hasattr(pd, "Subrs"):
- del pd.Subrs
- if "Subrs" in pd.rawDict:
- del pd.rawDict["Subrs"]
- # as well as the global subrs
- self.GlobalSubrs.clear()
-
-
-class CFFWriter(object):
- """Helper class for serializing CFF data to binary. Used by
- :meth:`CFFFontSet.compile`."""
-
- def __init__(self, isCFF2):
- self.data = []
- self.isCFF2 = isCFF2
-
- def add(self, table):
- self.data.append(table)
-
- def toFile(self, file):
- lastPosList = None
- count = 1
- while True:
- log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count)
- count = count + 1
- pos = 0
- posList = [pos]
- for item in self.data:
- if hasattr(item, "getDataLength"):
- endPos = pos + item.getDataLength()
- if isinstance(item, TopDictIndexCompiler) and item.isCFF2:
- self.topDictSize = item.getDataLength()
- else:
- endPos = pos + len(item)
- if hasattr(item, "setPos"):
- item.setPos(pos, endPos)
- pos = endPos
- posList.append(pos)
- if posList == lastPosList:
- break
- lastPosList = posList
- log.log(DEBUG, "CFFWriter.toFile() writing to file.")
- begin = file.tell()
- if self.isCFF2:
- self.data[1] = struct.pack(">H", self.topDictSize)
- else:
- self.offSize = calcOffSize(lastPosList[-1])
- self.data[1] = struct.pack("B", self.offSize)
- posList = [0]
- for item in self.data:
- if hasattr(item, "toFile"):
- item.toFile(file)
- else:
- file.write(item)
- posList.append(file.tell() - begin)
- assert posList == lastPosList
-
-
-def calcOffSize(largestOffset):
- if largestOffset < 0x100:
- offSize = 1
- elif largestOffset < 0x10000:
- offSize = 2
- elif largestOffset < 0x1000000:
- offSize = 3
- else:
- offSize = 4
- return offSize
-
-
-class IndexCompiler(object):
- """Base class for writing CFF `INDEX data `_
- to binary."""
-
- def __init__(self, items, strings, parent, isCFF2=None):
- if isCFF2 is None and hasattr(parent, "isCFF2"):
- isCFF2 = parent.isCFF2
- assert isCFF2 is not None
- self.isCFF2 = isCFF2
- self.items = self.getItems(items, strings)
- self.parent = parent
-
- def getItems(self, items, strings):
- return items
-
- def getOffsets(self):
- # An empty INDEX contains only the count field.
- if self.items:
- pos = 1
- offsets = [pos]
- for item in self.items:
- if hasattr(item, "getDataLength"):
- pos = pos + item.getDataLength()
- else:
- pos = pos + len(item)
- offsets.append(pos)
- else:
- offsets = []
- return offsets
-
- def getDataLength(self):
- if self.isCFF2:
- countSize = 4
- else:
- countSize = 2
-
- if self.items:
- lastOffset = self.getOffsets()[-1]
- offSize = calcOffSize(lastOffset)
- dataLength = (
- countSize
- + 1 # count
- + (len(self.items) + 1) * offSize # offSize
- + lastOffset # the offsets
- - 1 # size of object data
- )
- else:
- # count. For empty INDEX tables, this is the only entry.
- dataLength = countSize
-
- return dataLength
-
- def toFile(self, file):
- offsets = self.getOffsets()
- if self.isCFF2:
- writeCard32(file, len(self.items))
- else:
- writeCard16(file, len(self.items))
- # An empty INDEX contains only the count field.
- if self.items:
- offSize = calcOffSize(offsets[-1])
- writeCard8(file, offSize)
- offSize = -offSize
- pack = struct.pack
- for offset in offsets:
- binOffset = pack(">l", offset)[offSize:]
- assert len(binOffset) == -offSize
- file.write(binOffset)
- for item in self.items:
- if hasattr(item, "toFile"):
- item.toFile(file)
- else:
- data = tobytes(item, encoding="latin1")
- file.write(data)
-
-
-class IndexedStringsCompiler(IndexCompiler):
- def getItems(self, items, strings):
- return items.strings
-
-
-class TopDictIndexCompiler(IndexCompiler):
- """Helper class for writing the TopDict to binary."""
-
- def getItems(self, items, strings):
- out = []
- for item in items:
- out.append(item.getCompiler(strings, self))
- return out
-
- def getChildren(self, strings):
- children = []
- for topDict in self.items:
- children.extend(topDict.getChildren(strings))
- return children
-
- def getOffsets(self):
- if self.isCFF2:
- offsets = [0, self.items[0].getDataLength()]
- return offsets
- else:
- return super(TopDictIndexCompiler, self).getOffsets()
-
- def getDataLength(self):
- if self.isCFF2:
- dataLength = self.items[0].getDataLength()
- return dataLength
- else:
- return super(TopDictIndexCompiler, self).getDataLength()
-
- def toFile(self, file):
- if self.isCFF2:
- self.items[0].toFile(file)
- else:
- super(TopDictIndexCompiler, self).toFile(file)
-
-
-class FDArrayIndexCompiler(IndexCompiler):
- """Helper class for writing the
- `Font DICT INDEX `_
- to binary."""
-
- def getItems(self, items, strings):
- out = []
- for item in items:
- out.append(item.getCompiler(strings, self))
- return out
-
- def getChildren(self, strings):
- children = []
- for fontDict in self.items:
- children.extend(fontDict.getChildren(strings))
- return children
-
- def toFile(self, file):
- offsets = self.getOffsets()
- if self.isCFF2:
- writeCard32(file, len(self.items))
- else:
- writeCard16(file, len(self.items))
- offSize = calcOffSize(offsets[-1])
- writeCard8(file, offSize)
- offSize = -offSize
- pack = struct.pack
- for offset in offsets:
- binOffset = pack(">l", offset)[offSize:]
- assert len(binOffset) == -offSize
- file.write(binOffset)
- for item in self.items:
- if hasattr(item, "toFile"):
- item.toFile(file)
- else:
- file.write(item)
-
- def setPos(self, pos, endPos):
- self.parent.rawDict["FDArray"] = pos
-
-
-class GlobalSubrsCompiler(IndexCompiler):
- """Helper class for writing the `global subroutine INDEX `_
- to binary."""
-
- def getItems(self, items, strings):
- out = []
- for cs in items:
- cs.compile(self.isCFF2)
- out.append(cs.bytecode)
- return out
-
-
-class SubrsCompiler(GlobalSubrsCompiler):
- """Helper class for writing the `local subroutine INDEX `_
- to binary."""
-
- def setPos(self, pos, endPos):
- offset = pos - self.parent.pos
- self.parent.rawDict["Subrs"] = offset
-
-
-class CharStringsCompiler(GlobalSubrsCompiler):
- """Helper class for writing the `CharStrings INDEX `_
- to binary."""
-
- def getItems(self, items, strings):
- out = []
- for cs in items:
- cs.compile(self.isCFF2)
- out.append(cs.bytecode)
- return out
-
- def setPos(self, pos, endPos):
- self.parent.rawDict["CharStrings"] = pos
-
-
-class Index(object):
- """This class represents what the CFF spec calls an INDEX (an array of
- variable-sized objects). `Index` items can be addressed and set using
- Python list indexing."""
-
- compilerClass = IndexCompiler
-
- def __init__(self, file=None, isCFF2=None):
- assert (isCFF2 is None) == (file is None)
- self.items = []
- name = self.__class__.__name__
- if file is None:
- return
- self._isCFF2 = isCFF2
- log.log(DEBUG, "loading %s at %s", name, file.tell())
- self.file = file
- if isCFF2:
- count = readCard32(file)
- else:
- count = readCard16(file)
- if count == 0:
- return
- self.items = [None] * count
- offSize = readCard8(file)
- log.log(DEBUG, " index count: %s offSize: %s", count, offSize)
- assert offSize <= 4, "offSize too large: %s" % offSize
- self.offsets = offsets = []
- pad = b"\0" * (4 - offSize)
- for index in range(count + 1):
- chunk = file.read(offSize)
- chunk = pad + chunk
- (offset,) = struct.unpack(">L", chunk)
- offsets.append(int(offset))
- self.offsetBase = file.tell() - 1
- file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot
- log.log(DEBUG, " end of %s at %s", name, file.tell())
-
- def __len__(self):
- return len(self.items)
-
- def __getitem__(self, index):
- item = self.items[index]
- if item is not None:
- return item
- offset = self.offsets[index] + self.offsetBase
- size = self.offsets[index + 1] - self.offsets[index]
- file = self.file
- file.seek(offset)
- data = file.read(size)
- assert len(data) == size
- item = self.produceItem(index, data, file, offset)
- self.items[index] = item
- return item
-
- def __setitem__(self, index, item):
- self.items[index] = item
-
- def produceItem(self, index, data, file, offset):
- return data
-
- def append(self, item):
- """Add an item to an INDEX."""
- self.items.append(item)
-
- def getCompiler(self, strings, parent, isCFF2=None):
- return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
-
- def clear(self):
- """Empty the INDEX."""
- del self.items[:]
-
-
-class GlobalSubrsIndex(Index):
- """This index contains all the global subroutines in the font. A global
- subroutine is a set of ``CharString`` data which is accessible to any
- glyph in the font, and are used to store repeated instructions - for
- example, components may be encoded as global subroutines, but so could
- hinting instructions.
-
- Remember that when interpreting a ``callgsubr`` instruction (or indeed
- a ``callsubr`` instruction) that you will need to add the "subroutine
- number bias" to number given:
-
- .. code:: python
-
- tt = ttLib.TTFont("Almendra-Bold.otf")
- u = tt["CFF "].cff[0].CharStrings["udieresis"]
- u.decompile()
-
- u.toXML(XMLWriter(sys.stdout))
- #
- # -64 callgsubr <-- Subroutine which implements the dieresis mark
- #
-
- tt["CFF "].cff[0].GlobalSubrs[-64] # <-- WRONG
- #
-
- tt["CFF "].cff[0].GlobalSubrs[-64 + 107] # <-- RIGHT
- #
-
- ("The bias applied depends on the number of subrs (gsubrs). If the number of
- subrs (gsubrs) is less than 1240, the bias is 107. Otherwise if it is less
- than 33900, it is 1131; otherwise it is 32768.",
- `Subroutine Operators `)
- """
-
- compilerClass = GlobalSubrsCompiler
- subrClass = psCharStrings.T2CharString
- charStringClass = psCharStrings.T2CharString
-
- def __init__(
- self,
- file=None,
- globalSubrs=None,
- private=None,
- fdSelect=None,
- fdArray=None,
- isCFF2=None,
- ):
- super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2)
- self.globalSubrs = globalSubrs
- self.private = private
- if fdSelect:
- self.fdSelect = fdSelect
- if fdArray:
- self.fdArray = fdArray
-
- def produceItem(self, index, data, file, offset):
- if self.private is not None:
- private = self.private
- elif hasattr(self, "fdArray") and self.fdArray is not None:
- if hasattr(self, "fdSelect") and self.fdSelect is not None:
- fdIndex = self.fdSelect[index]
- else:
- fdIndex = 0
- private = self.fdArray[fdIndex].Private
- else:
- private = None
- return self.subrClass(data, private=private, globalSubrs=self.globalSubrs)
-
- def toXML(self, xmlWriter):
- """Write the subroutines index into XML representation onto the given
- :class:`fontTools.misc.xmlWriter.XMLWriter`.
-
- .. code:: python
-
- writer = xmlWriter.XMLWriter(sys.stdout)
- tt["CFF "].cff[0].GlobalSubrs.toXML(writer)
-
- """
- xmlWriter.comment(
- "The 'index' attribute is only for humans; " "it is ignored when parsed."
- )
- xmlWriter.newline()
- for i in range(len(self)):
- subr = self[i]
- if subr.needsDecompilation():
- xmlWriter.begintag("CharString", index=i, raw=1)
- else:
- xmlWriter.begintag("CharString", index=i)
- xmlWriter.newline()
- subr.toXML(xmlWriter)
- xmlWriter.endtag("CharString")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content):
- if name != "CharString":
- return
- subr = self.subrClass()
- subr.fromXML(name, attrs, content)
- self.append(subr)
-
- def getItemAndSelector(self, index):
- sel = None
- if hasattr(self, "fdSelect"):
- sel = self.fdSelect[index]
- return self[index], sel
-
-
-class SubrsIndex(GlobalSubrsIndex):
- """This index contains a glyph's local subroutines. A local subroutine is a
- private set of ``CharString`` data which is accessible only to the glyph to
- which the index is attached."""
-
- compilerClass = SubrsCompiler
-
-
-class TopDictIndex(Index):
- """This index represents the array of ``TopDict`` structures in the font
- (again, usually only one entry is present). Hence the following calls are
- equivalent:
-
- .. code:: python
-
- tt["CFF "].cff[0]
- #
- tt["CFF "].cff.topDictIndex[0]
- #
-
- """
-
- compilerClass = TopDictIndexCompiler
-
- def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0, isCFF2=None):
- assert (isCFF2 is None) == (file is None)
- self.cff2GetGlyphOrder = cff2GetGlyphOrder
- if file is not None and isCFF2:
- self._isCFF2 = isCFF2
- self.items = []
- name = self.__class__.__name__
- log.log(DEBUG, "loading %s at %s", name, file.tell())
- self.file = file
- count = 1
- self.items = [None] * count
- self.offsets = [0, topSize]
- self.offsetBase = file.tell()
- # pretend we've read the whole lot
- file.seek(self.offsetBase + topSize)
- log.log(DEBUG, " end of %s at %s", name, file.tell())
- else:
- super(TopDictIndex, self).__init__(file, isCFF2=isCFF2)
-
- def produceItem(self, index, data, file, offset):
- top = TopDict(
- self.strings,
- file,
- offset,
- self.GlobalSubrs,
- self.cff2GetGlyphOrder,
- isCFF2=self._isCFF2,
- )
- top.decompile(data)
- return top
-
- def toXML(self, xmlWriter):
- for i in range(len(self)):
- xmlWriter.begintag("FontDict", index=i)
- xmlWriter.newline()
- self[i].toXML(xmlWriter)
- xmlWriter.endtag("FontDict")
- xmlWriter.newline()
-
-
-class FDArrayIndex(Index):
-
- compilerClass = FDArrayIndexCompiler
-
- def toXML(self, xmlWriter):
- for i in range(len(self)):
- xmlWriter.begintag("FontDict", index=i)
- xmlWriter.newline()
- self[i].toXML(xmlWriter)
- xmlWriter.endtag("FontDict")
- xmlWriter.newline()
-
- def produceItem(self, index, data, file, offset):
- fontDict = FontDict(
- self.strings,
- file,
- offset,
- self.GlobalSubrs,
- isCFF2=self._isCFF2,
- vstore=self.vstore,
- )
- fontDict.decompile(data)
- return fontDict
-
- def fromXML(self, name, attrs, content):
- if name != "FontDict":
- return
- fontDict = FontDict()
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- fontDict.fromXML(name, attrs, content)
- self.append(fontDict)
-
-
-class VarStoreData(object):
- def __init__(self, file=None, otVarStore=None):
- self.file = file
- self.data = None
- self.otVarStore = otVarStore
- self.font = TTFont() # dummy font for the decompile function.
-
- def decompile(self):
- if self.file:
- # read data in from file. Assume position is correct.
- length = readCard16(self.file)
- self.data = self.file.read(length)
- globalState = {}
- reader = OTTableReader(self.data, globalState)
- self.otVarStore = ot.VarStore()
- self.otVarStore.decompile(reader, self.font)
- return self
-
- def compile(self):
- writer = OTTableWriter()
- self.otVarStore.compile(writer, self.font)
- # Note that this omits the initial Card16 length from the CFF2
- # VarStore data block
- self.data = writer.getAllData()
-
- def writeXML(self, xmlWriter, name):
- self.otVarStore.toXML(xmlWriter, self.font)
-
- def xmlRead(self, name, attrs, content, parent):
- self.otVarStore = ot.VarStore()
- for element in content:
- if isinstance(element, tuple):
- name, attrs, content = element
- self.otVarStore.fromXML(name, attrs, content, self.font)
- else:
- pass
- return None
-
- def __len__(self):
- return len(self.data)
-
- def getNumRegions(self, vsIndex):
- if vsIndex is None:
- vsIndex = 0
- varData = self.otVarStore.VarData[vsIndex]
- numRegions = varData.VarRegionCount
- return numRegions
-
-
-class FDSelect(object):
- def __init__(self, file=None, numGlyphs=None, format=None):
- if file:
- # read data in from file
- self.format = readCard8(file)
- if self.format == 0:
- from array import array
-
- self.gidArray = array("B", file.read(numGlyphs)).tolist()
- elif self.format == 3:
- gidArray = [None] * numGlyphs
- nRanges = readCard16(file)
- fd = None
- prev = None
- for i in range(nRanges):
- first = readCard16(file)
- if prev is not None:
- for glyphID in range(prev, first):
- gidArray[glyphID] = fd
- prev = first
- fd = readCard8(file)
- if prev is not None:
- first = readCard16(file)
- for glyphID in range(prev, first):
- gidArray[glyphID] = fd
- self.gidArray = gidArray
- elif self.format == 4:
- gidArray = [None] * numGlyphs
- nRanges = readCard32(file)
- fd = None
- prev = None
- for i in range(nRanges):
- first = readCard32(file)
- if prev is not None:
- for glyphID in range(prev, first):
- gidArray[glyphID] = fd
- prev = first
- fd = readCard16(file)
- if prev is not None:
- first = readCard32(file)
- for glyphID in range(prev, first):
- gidArray[glyphID] = fd
- self.gidArray = gidArray
- else:
- assert False, "unsupported FDSelect format: %s" % format
- else:
- # reading from XML. Make empty gidArray, and leave format as passed in.
- # format is None will result in the smallest representation being used.
- self.format = format
- self.gidArray = []
-
- def __len__(self):
- return len(self.gidArray)
-
- def __getitem__(self, index):
- return self.gidArray[index]
-
- def __setitem__(self, index, fdSelectValue):
- self.gidArray[index] = fdSelectValue
-
- def append(self, fdSelectValue):
- self.gidArray.append(fdSelectValue)
-
-
-class CharStrings(object):
- """The ``CharStrings`` in the font represent the instructions for drawing
- each glyph. This object presents a dictionary interface to the font's
- CharStrings, indexed by glyph name:
-
- .. code:: python
-
- tt["CFF "].cff[0].CharStrings["a"]
- #
-
- See :class:`fontTools.misc.psCharStrings.T1CharString` and
- :class:`fontTools.misc.psCharStrings.T2CharString` for how to decompile,
- compile and interpret the glyph drawing instructions in the returned objects.
-
- """
-
- def __init__(
- self,
- file,
- charset,
- globalSubrs,
- private,
- fdSelect,
- fdArray,
- isCFF2=None,
- varStore=None,
- ):
- self.globalSubrs = globalSubrs
- self.varStore = varStore
- if file is not None:
- self.charStringsIndex = SubrsIndex(
- file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2
- )
- self.charStrings = charStrings = {}
- for i in range(len(charset)):
- charStrings[charset[i]] = i
- # read from OTF file: charStrings.values() are indices into
- # charStringsIndex.
- self.charStringsAreIndexed = 1
- else:
- self.charStrings = {}
- # read from ttx file: charStrings.values() are actual charstrings
- self.charStringsAreIndexed = 0
- self.private = private
- if fdSelect is not None:
- self.fdSelect = fdSelect
- if fdArray is not None:
- self.fdArray = fdArray
-
- def keys(self):
- return list(self.charStrings.keys())
-
- def values(self):
- if self.charStringsAreIndexed:
- return self.charStringsIndex
- else:
- return list(self.charStrings.values())
-
- def has_key(self, name):
- return name in self.charStrings
-
- __contains__ = has_key
-
- def __len__(self):
- return len(self.charStrings)
-
- def __getitem__(self, name):
- charString = self.charStrings[name]
- if self.charStringsAreIndexed:
- charString = self.charStringsIndex[charString]
- return charString
-
- def __setitem__(self, name, charString):
- if self.charStringsAreIndexed:
- index = self.charStrings[name]
- self.charStringsIndex[index] = charString
- else:
- self.charStrings[name] = charString
-
- def getItemAndSelector(self, name):
- if self.charStringsAreIndexed:
- index = self.charStrings[name]
- return self.charStringsIndex.getItemAndSelector(index)
- else:
- if hasattr(self, "fdArray"):
- if hasattr(self, "fdSelect"):
- sel = self.charStrings[name].fdSelectIndex
- else:
- sel = 0
- else:
- sel = None
- return self.charStrings[name], sel
-
- def toXML(self, xmlWriter):
- names = sorted(self.keys())
- for name in names:
- charStr, fdSelectIndex = self.getItemAndSelector(name)
- if charStr.needsDecompilation():
- raw = [("raw", 1)]
- else:
- raw = []
- if fdSelectIndex is None:
- xmlWriter.begintag("CharString", [("name", name)] + raw)
- else:
- xmlWriter.begintag(
- "CharString",
- [("name", name), ("fdSelectIndex", fdSelectIndex)] + raw,
- )
- xmlWriter.newline()
- charStr.toXML(xmlWriter)
- xmlWriter.endtag("CharString")
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content):
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- if name != "CharString":
- continue
- fdID = -1
- if hasattr(self, "fdArray"):
- try:
- fdID = safeEval(attrs["fdSelectIndex"])
- except KeyError:
- fdID = 0
- private = self.fdArray[fdID].Private
- else:
- private = self.private
-
- glyphName = attrs["name"]
- charStringClass = psCharStrings.T2CharString
- charString = charStringClass(private=private, globalSubrs=self.globalSubrs)
- charString.fromXML(name, attrs, content)
- if fdID >= 0:
- charString.fdSelectIndex = fdID
- self[glyphName] = charString
-
-
-def readCard8(file):
- return byteord(file.read(1))
-
-
-def readCard16(file):
- (value,) = struct.unpack(">H", file.read(2))
- return value
-
-
-def readCard32(file):
- (value,) = struct.unpack(">L", file.read(4))
- return value
-
-
-def writeCard8(file, value):
- file.write(bytechr(value))
-
-
-def writeCard16(file, value):
- file.write(struct.pack(">H", value))
-
-
-def writeCard32(file, value):
- file.write(struct.pack(">L", value))
-
-
-def packCard8(value):
- return bytechr(value)
-
-
-def packCard16(value):
- return struct.pack(">H", value)
-
-
-def packCard32(value):
- return struct.pack(">L", value)
-
-
-def buildOperatorDict(table):
- d = {}
- for op, name, arg, default, conv in table:
- d[op] = (name, arg)
- return d
-
-
-def buildOpcodeDict(table):
- d = {}
- for op, name, arg, default, conv in table:
- if isinstance(op, tuple):
- op = bytechr(op[0]) + bytechr(op[1])
- else:
- op = bytechr(op)
- d[name] = (op, arg)
- return d
-
-
-def buildOrder(table):
- l = []
- for op, name, arg, default, conv in table:
- l.append(name)
- return l
-
-
-def buildDefaults(table):
- d = {}
- for op, name, arg, default, conv in table:
- if default is not None:
- d[name] = default
- return d
-
-
-def buildConverters(table):
- d = {}
- for op, name, arg, default, conv in table:
- d[name] = conv
- return d
-
-
-class SimpleConverter(object):
- def read(self, parent, value):
- if not hasattr(parent, "file"):
- return self._read(parent, value)
- file = parent.file
- pos = file.tell()
- try:
- return self._read(parent, value)
- finally:
- file.seek(pos)
-
- def _read(self, parent, value):
- return value
-
- def write(self, parent, value):
- return value
-
- def xmlWrite(self, xmlWriter, name, value):
- xmlWriter.simpletag(name, value=value)
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- return attrs["value"]
-
-
-class ASCIIConverter(SimpleConverter):
- def _read(self, parent, value):
- return tostr(value, encoding="ascii")
-
- def write(self, parent, value):
- return tobytes(value, encoding="ascii")
-
- def xmlWrite(self, xmlWriter, name, value):
- xmlWriter.simpletag(name, value=tostr(value, encoding="ascii"))
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- return tobytes(attrs["value"], encoding=("ascii"))
-
-
-class Latin1Converter(SimpleConverter):
- def _read(self, parent, value):
- return tostr(value, encoding="latin1")
-
- def write(self, parent, value):
- return tobytes(value, encoding="latin1")
-
- def xmlWrite(self, xmlWriter, name, value):
- value = tostr(value, encoding="latin1")
- if name in ["Notice", "Copyright"]:
- value = re.sub(r"[\r\n]\s+", " ", value)
- xmlWriter.simpletag(name, value=value)
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- return tobytes(attrs["value"], encoding=("latin1"))
-
-
-def parseNum(s):
- try:
- value = int(s)
- except:
- value = float(s)
- return value
-
-
-def parseBlendList(s):
- valueList = []
- for element in s:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- blendList = attrs["value"].split()
- blendList = [eval(val) for val in blendList]
- valueList.append(blendList)
- if len(valueList) == 1:
- valueList = valueList[0]
- return valueList
-
-
-class NumberConverter(SimpleConverter):
- def xmlWrite(self, xmlWriter, name, value):
- if isinstance(value, list):
- xmlWriter.begintag(name)
- xmlWriter.newline()
- xmlWriter.indent()
- blendValue = " ".join([str(val) for val in value])
- xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
- xmlWriter.newline()
- xmlWriter.dedent()
- xmlWriter.endtag(name)
- xmlWriter.newline()
- else:
- xmlWriter.simpletag(name, value=value)
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- valueString = attrs.get("value", None)
- if valueString is None:
- value = parseBlendList(content)
- else:
- value = parseNum(attrs["value"])
- return value
-
-
-class ArrayConverter(SimpleConverter):
- def xmlWrite(self, xmlWriter, name, value):
- if value and isinstance(value[0], list):
- xmlWriter.begintag(name)
- xmlWriter.newline()
- xmlWriter.indent()
- for valueList in value:
- blendValue = " ".join([str(val) for val in valueList])
- xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
- xmlWriter.newline()
- xmlWriter.dedent()
- xmlWriter.endtag(name)
- xmlWriter.newline()
- else:
- value = " ".join([str(val) for val in value])
- xmlWriter.simpletag(name, value=value)
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- valueString = attrs.get("value", None)
- if valueString is None:
- valueList = parseBlendList(content)
- else:
- values = valueString.split()
- valueList = [parseNum(value) for value in values]
- return valueList
-
-
-class TableConverter(SimpleConverter):
- def xmlWrite(self, xmlWriter, name, value):
- xmlWriter.begintag(name)
- xmlWriter.newline()
- value.toXML(xmlWriter)
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- ob = self.getClass()()
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- ob.fromXML(name, attrs, content)
- return ob
-
-
-class PrivateDictConverter(TableConverter):
- def getClass(self):
- return PrivateDict
-
- def _read(self, parent, value):
- size, offset = value
- file = parent.file
- isCFF2 = parent._isCFF2
- try:
- vstore = parent.vstore
- except AttributeError:
- vstore = None
- priv = PrivateDict(parent.strings, file, offset, isCFF2=isCFF2, vstore=vstore)
- file.seek(offset)
- data = file.read(size)
- assert len(data) == size
- priv.decompile(data)
- return priv
-
- def write(self, parent, value):
- return (0, 0) # dummy value
-
-
-class SubrsConverter(TableConverter):
- def getClass(self):
- return SubrsIndex
-
- def _read(self, parent, value):
- file = parent.file
- isCFF2 = parent._isCFF2
- file.seek(parent.offset + value) # Offset(self)
- return SubrsIndex(file, isCFF2=isCFF2)
-
- def write(self, parent, value):
- return 0 # dummy value
-
-
-class CharStringsConverter(TableConverter):
- def _read(self, parent, value):
- file = parent.file
- isCFF2 = parent._isCFF2
- charset = parent.charset
- varStore = getattr(parent, "VarStore", None)
- globalSubrs = parent.GlobalSubrs
- if hasattr(parent, "FDArray"):
- fdArray = parent.FDArray
- if hasattr(parent, "FDSelect"):
- fdSelect = parent.FDSelect
- else:
- fdSelect = None
- private = None
- else:
- fdSelect, fdArray = None, None
- private = parent.Private
- file.seek(value) # Offset(0)
- charStrings = CharStrings(
- file,
- charset,
- globalSubrs,
- private,
- fdSelect,
- fdArray,
- isCFF2=isCFF2,
- varStore=varStore,
- )
- return charStrings
-
- def write(self, parent, value):
- return 0 # dummy value
-
- def xmlRead(self, name, attrs, content, parent):
- if hasattr(parent, "FDArray"):
- # if it is a CID-keyed font, then the private Dict is extracted from the
- # parent.FDArray
- fdArray = parent.FDArray
- if hasattr(parent, "FDSelect"):
- fdSelect = parent.FDSelect
- else:
- fdSelect = None
- private = None
- else:
- # if it is a name-keyed font, then the private dict is in the top dict,
- # and
- # there is no fdArray.
- private, fdSelect, fdArray = parent.Private, None, None
- charStrings = CharStrings(
- None,
- None,
- parent.GlobalSubrs,
- private,
- fdSelect,
- fdArray,
- varStore=getattr(parent, "VarStore", None),
- )
- charStrings.fromXML(name, attrs, content)
- return charStrings
-
-
-class CharsetConverter(SimpleConverter):
- def _read(self, parent, value):
- isCID = hasattr(parent, "ROS")
- if value > 2:
- numGlyphs = parent.numGlyphs
- file = parent.file
- file.seek(value)
- log.log(DEBUG, "loading charset at %s", value)
- format = readCard8(file)
- if format == 0:
- charset = parseCharset0(numGlyphs, file, parent.strings, isCID)
- elif format == 1 or format == 2:
- charset = parseCharset(numGlyphs, file, parent.strings, isCID, format)
- else:
- raise NotImplementedError
- assert len(charset) == numGlyphs
- log.log(DEBUG, " charset end at %s", file.tell())
- # make sure glyph names are unique
- allNames = {}
- newCharset = []
- for glyphName in charset:
- if glyphName in allNames:
- # make up a new glyphName that's unique
- n = allNames[glyphName]
- while (glyphName + "#" + str(n)) in allNames:
- n += 1
- allNames[glyphName] = n + 1
- glyphName = glyphName + "#" + str(n)
- allNames[glyphName] = 1
- newCharset.append(glyphName)
- charset = newCharset
- else: # offset == 0 -> no charset data.
- if isCID or "CharStrings" not in parent.rawDict:
- # We get here only when processing fontDicts from the FDArray of
- # CFF-CID fonts. Only the real topDict references the chrset.
- assert value == 0
- charset = None
- elif value == 0:
- charset = cffISOAdobeStrings
- elif value == 1:
- charset = cffIExpertStrings
- elif value == 2:
- charset = cffExpertSubsetStrings
- if charset and (len(charset) != parent.numGlyphs):
- charset = charset[: parent.numGlyphs]
- return charset
-
- def write(self, parent, value):
- return 0 # dummy value
-
- def xmlWrite(self, xmlWriter, name, value):
- # XXX only write charset when not in OT/TTX context, where we
- # dump charset as a separate "GlyphOrder" table.
- # # xmlWriter.simpletag("charset")
- xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element")
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- pass
-
-
-class CharsetCompiler(object):
- def __init__(self, strings, charset, parent):
- assert charset[0] == ".notdef"
- isCID = hasattr(parent.dictObj, "ROS")
- data0 = packCharset0(charset, isCID, strings)
- data = packCharset(charset, isCID, strings)
- if len(data) < len(data0):
- self.data = data
- else:
- self.data = data0
- self.parent = parent
-
- def setPos(self, pos, endPos):
- self.parent.rawDict["charset"] = pos
-
- def getDataLength(self):
- return len(self.data)
-
- def toFile(self, file):
- file.write(self.data)
-
-
-def getStdCharSet(charset):
- # check to see if we can use a predefined charset value.
- predefinedCharSetVal = None
- predefinedCharSets = [
- (cffISOAdobeStringCount, cffISOAdobeStrings, 0),
- (cffExpertStringCount, cffIExpertStrings, 1),
- (cffExpertSubsetStringCount, cffExpertSubsetStrings, 2),
- ]
- lcs = len(charset)
- for cnt, pcs, csv in predefinedCharSets:
- if predefinedCharSetVal is not None:
- break
- if lcs > cnt:
- continue
- predefinedCharSetVal = csv
- for i in range(lcs):
- if charset[i] != pcs[i]:
- predefinedCharSetVal = None
- break
- return predefinedCharSetVal
-
-
-def getCIDfromName(name, strings):
- return int(name[3:])
-
-
-def getSIDfromName(name, strings):
- return strings.getSID(name)
-
-
-def packCharset0(charset, isCID, strings):
- fmt = 0
- data = [packCard8(fmt)]
- if isCID:
- getNameID = getCIDfromName
- else:
- getNameID = getSIDfromName
-
- for name in charset[1:]:
- data.append(packCard16(getNameID(name, strings)))
- return bytesjoin(data)
-
-
-def packCharset(charset, isCID, strings):
- fmt = 1
- ranges = []
- first = None
- end = 0
- if isCID:
- getNameID = getCIDfromName
- else:
- getNameID = getSIDfromName
-
- for name in charset[1:]:
- SID = getNameID(name, strings)
- if first is None:
- first = SID
- elif end + 1 != SID:
- nLeft = end - first
- if nLeft > 255:
- fmt = 2
- ranges.append((first, nLeft))
- first = SID
- end = SID
- if end:
- nLeft = end - first
- if nLeft > 255:
- fmt = 2
- ranges.append((first, nLeft))
-
- data = [packCard8(fmt)]
- if fmt == 1:
- nLeftFunc = packCard8
- else:
- nLeftFunc = packCard16
- for first, nLeft in ranges:
- data.append(packCard16(first) + nLeftFunc(nLeft))
- return bytesjoin(data)
-
-
-def parseCharset0(numGlyphs, file, strings, isCID):
- charset = [".notdef"]
- if isCID:
- for i in range(numGlyphs - 1):
- CID = readCard16(file)
- charset.append("cid" + str(CID).zfill(5))
- else:
- for i in range(numGlyphs - 1):
- SID = readCard16(file)
- charset.append(strings[SID])
- return charset
-
-
-def parseCharset(numGlyphs, file, strings, isCID, fmt):
- charset = [".notdef"]
- count = 1
- if fmt == 1:
- nLeftFunc = readCard8
- else:
- nLeftFunc = readCard16
- while count < numGlyphs:
- first = readCard16(file)
- nLeft = nLeftFunc(file)
- if isCID:
- for CID in range(first, first + nLeft + 1):
- charset.append("cid" + str(CID).zfill(5))
- else:
- for SID in range(first, first + nLeft + 1):
- charset.append(strings[SID])
- count = count + nLeft + 1
- return charset
-
-
-class EncodingCompiler(object):
- def __init__(self, strings, encoding, parent):
- assert not isinstance(encoding, str)
- data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings)
- data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings)
- if len(data0) < len(data1):
- self.data = data0
- else:
- self.data = data1
- self.parent = parent
-
- def setPos(self, pos, endPos):
- self.parent.rawDict["Encoding"] = pos
-
- def getDataLength(self):
- return len(self.data)
-
- def toFile(self, file):
- file.write(self.data)
-
-
-class EncodingConverter(SimpleConverter):
- def _read(self, parent, value):
- if value == 0:
- return "StandardEncoding"
- elif value == 1:
- return "ExpertEncoding"
- else:
- assert value > 1
- file = parent.file
- file.seek(value)
- log.log(DEBUG, "loading Encoding at %s", value)
- fmt = readCard8(file)
- haveSupplement = fmt & 0x80
- if haveSupplement:
- raise NotImplementedError("Encoding supplements are not yet supported")
- fmt = fmt & 0x7F
- if fmt == 0:
- encoding = parseEncoding0(
- parent.charset, file, haveSupplement, parent.strings
- )
- elif fmt == 1:
- encoding = parseEncoding1(
- parent.charset, file, haveSupplement, parent.strings
- )
- return encoding
-
- def write(self, parent, value):
- if value == "StandardEncoding":
- return 0
- elif value == "ExpertEncoding":
- return 1
- return 0 # dummy value
-
- def xmlWrite(self, xmlWriter, name, value):
- if value in ("StandardEncoding", "ExpertEncoding"):
- xmlWriter.simpletag(name, name=value)
- xmlWriter.newline()
- return
- xmlWriter.begintag(name)
- xmlWriter.newline()
- for code in range(len(value)):
- glyphName = value[code]
- if glyphName != ".notdef":
- xmlWriter.simpletag("map", code=hex(code), name=glyphName)
- xmlWriter.newline()
- xmlWriter.endtag(name)
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- if "name" in attrs:
- return attrs["name"]
- encoding = [".notdef"] * 256
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- code = safeEval(attrs["code"])
- glyphName = attrs["name"]
- encoding[code] = glyphName
- return encoding
-
-
-def parseEncoding0(charset, file, haveSupplement, strings):
- nCodes = readCard8(file)
- encoding = [".notdef"] * 256
- for glyphID in range(1, nCodes + 1):
- code = readCard8(file)
- if code != 0:
- encoding[code] = charset[glyphID]
- return encoding
-
-
-def parseEncoding1(charset, file, haveSupplement, strings):
- nRanges = readCard8(file)
- encoding = [".notdef"] * 256
- glyphID = 1
- for i in range(nRanges):
- code = readCard8(file)
- nLeft = readCard8(file)
- for glyphID in range(glyphID, glyphID + nLeft + 1):
- encoding[code] = charset[glyphID]
- code = code + 1
- glyphID = glyphID + 1
- return encoding
-
-
-def packEncoding0(charset, encoding, strings):
- fmt = 0
- m = {}
- for code in range(len(encoding)):
- name = encoding[code]
- if name != ".notdef":
- m[name] = code
- codes = []
- for name in charset[1:]:
- code = m.get(name)
- codes.append(code)
-
- while codes and codes[-1] is None:
- codes.pop()
-
- data = [packCard8(fmt), packCard8(len(codes))]
- for code in codes:
- if code is None:
- code = 0
- data.append(packCard8(code))
- return bytesjoin(data)
-
-
-def packEncoding1(charset, encoding, strings):
- fmt = 1
- m = {}
- for code in range(len(encoding)):
- name = encoding[code]
- if name != ".notdef":
- m[name] = code
- ranges = []
- first = None
- end = 0
- for name in charset[1:]:
- code = m.get(name, -1)
- if first is None:
- first = code
- elif end + 1 != code:
- nLeft = end - first
- ranges.append((first, nLeft))
- first = code
- end = code
- nLeft = end - first
- ranges.append((first, nLeft))
-
- # remove unencoded glyphs at the end.
- while ranges and ranges[-1][0] == -1:
- ranges.pop()
-
- data = [packCard8(fmt), packCard8(len(ranges))]
- for first, nLeft in ranges:
- if first == -1: # unencoded
- first = 0
- data.append(packCard8(first) + packCard8(nLeft))
- return bytesjoin(data)
-
-
-class FDArrayConverter(TableConverter):
- def _read(self, parent, value):
- try:
- vstore = parent.VarStore
- except AttributeError:
- vstore = None
- file = parent.file
- isCFF2 = parent._isCFF2
- file.seek(value)
- fdArray = FDArrayIndex(file, isCFF2=isCFF2)
- fdArray.vstore = vstore
- fdArray.strings = parent.strings
- fdArray.GlobalSubrs = parent.GlobalSubrs
- return fdArray
-
- def write(self, parent, value):
- return 0 # dummy value
-
- def xmlRead(self, name, attrs, content, parent):
- fdArray = FDArrayIndex()
- for element in content:
- if isinstance(element, str):
- continue
- name, attrs, content = element
- fdArray.fromXML(name, attrs, content)
- return fdArray
-
-
-class FDSelectConverter(SimpleConverter):
- def _read(self, parent, value):
- file = parent.file
- file.seek(value)
- fdSelect = FDSelect(file, parent.numGlyphs)
- return fdSelect
-
- def write(self, parent, value):
- return 0 # dummy value
-
- # The FDSelect glyph data is written out to XML in the charstring keys,
- # so we write out only the format selector
- def xmlWrite(self, xmlWriter, name, value):
- xmlWriter.simpletag(name, [("format", value.format)])
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- fmt = safeEval(attrs["format"])
- file = None
- numGlyphs = None
- fdSelect = FDSelect(file, numGlyphs, fmt)
- return fdSelect
-
-
-class VarStoreConverter(SimpleConverter):
- def _read(self, parent, value):
- file = parent.file
- file.seek(value)
- varStore = VarStoreData(file)
- varStore.decompile()
- return varStore
-
- def write(self, parent, value):
- return 0 # dummy value
-
- def xmlWrite(self, xmlWriter, name, value):
- value.writeXML(xmlWriter, name)
-
- def xmlRead(self, name, attrs, content, parent):
- varStore = VarStoreData()
- varStore.xmlRead(name, attrs, content, parent)
- return varStore
-
-
-def packFDSelect0(fdSelectArray):
- fmt = 0
- data = [packCard8(fmt)]
- for index in fdSelectArray:
- data.append(packCard8(index))
- return bytesjoin(data)
-
-
-def packFDSelect3(fdSelectArray):
- fmt = 3
- fdRanges = []
- lenArray = len(fdSelectArray)
- lastFDIndex = -1
- for i in range(lenArray):
- fdIndex = fdSelectArray[i]
- if lastFDIndex != fdIndex:
- fdRanges.append([i, fdIndex])
- lastFDIndex = fdIndex
- sentinelGID = i + 1
-
- data = [packCard8(fmt)]
- data.append(packCard16(len(fdRanges)))
- for fdRange in fdRanges:
- data.append(packCard16(fdRange[0]))
- data.append(packCard8(fdRange[1]))
- data.append(packCard16(sentinelGID))
- return bytesjoin(data)
-
-
-def packFDSelect4(fdSelectArray):
- fmt = 4
- fdRanges = []
- lenArray = len(fdSelectArray)
- lastFDIndex = -1
- for i in range(lenArray):
- fdIndex = fdSelectArray[i]
- if lastFDIndex != fdIndex:
- fdRanges.append([i, fdIndex])
- lastFDIndex = fdIndex
- sentinelGID = i + 1
-
- data = [packCard8(fmt)]
- data.append(packCard32(len(fdRanges)))
- for fdRange in fdRanges:
- data.append(packCard32(fdRange[0]))
- data.append(packCard16(fdRange[1]))
- data.append(packCard32(sentinelGID))
- return bytesjoin(data)
-
-
-class FDSelectCompiler(object):
- def __init__(self, fdSelect, parent):
- fmt = fdSelect.format
- fdSelectArray = fdSelect.gidArray
- if fmt == 0:
- self.data = packFDSelect0(fdSelectArray)
- elif fmt == 3:
- self.data = packFDSelect3(fdSelectArray)
- elif fmt == 4:
- self.data = packFDSelect4(fdSelectArray)
- else:
- # choose smaller of the two formats
- data0 = packFDSelect0(fdSelectArray)
- data3 = packFDSelect3(fdSelectArray)
- if len(data0) < len(data3):
- self.data = data0
- fdSelect.format = 0
- else:
- self.data = data3
- fdSelect.format = 3
-
- self.parent = parent
-
- def setPos(self, pos, endPos):
- self.parent.rawDict["FDSelect"] = pos
-
- def getDataLength(self):
- return len(self.data)
-
- def toFile(self, file):
- file.write(self.data)
-
-
-class VarStoreCompiler(object):
- def __init__(self, varStoreData, parent):
- self.parent = parent
- if not varStoreData.data:
- varStoreData.compile()
- data = [packCard16(len(varStoreData.data)), varStoreData.data]
- self.data = bytesjoin(data)
-
- def setPos(self, pos, endPos):
- self.parent.rawDict["VarStore"] = pos
-
- def getDataLength(self):
- return len(self.data)
-
- def toFile(self, file):
- file.write(self.data)
-
-
-class ROSConverter(SimpleConverter):
- def xmlWrite(self, xmlWriter, name, value):
- registry, order, supplement = value
- xmlWriter.simpletag(
- name,
- [
- ("Registry", tostr(registry)),
- ("Order", tostr(order)),
- ("Supplement", supplement),
- ],
- )
- xmlWriter.newline()
-
- def xmlRead(self, name, attrs, content, parent):
- return (attrs["Registry"], attrs["Order"], safeEval(attrs["Supplement"]))
-
-
-topDictOperators = [
- # opcode name argument type default converter
- (25, "maxstack", "number", None, None),
- ((12, 30), "ROS", ("SID", "SID", "number"), None, ROSConverter()),
- ((12, 20), "SyntheticBase", "number", None, None),
- (0, "version", "SID", None, None),
- (1, "Notice", "SID", None, Latin1Converter()),
- ((12, 0), "Copyright", "SID", None, Latin1Converter()),
- (2, "FullName", "SID", None, Latin1Converter()),
- ((12, 38), "FontName", "SID", None, Latin1Converter()),
- (3, "FamilyName", "SID", None, Latin1Converter()),
- (4, "Weight", "SID", None, None),
- ((12, 1), "isFixedPitch", "number", 0, None),
- ((12, 2), "ItalicAngle", "number", 0, None),
- ((12, 3), "UnderlinePosition", "number", -100, None),
- ((12, 4), "UnderlineThickness", "number", 50, None),
- ((12, 5), "PaintType", "number", 0, None),
- ((12, 6), "CharstringType", "number", 2, None),
- ((12, 7), "FontMatrix", "array", [0.001, 0, 0, 0.001, 0, 0], None),
- (13, "UniqueID", "number", None, None),
- (5, "FontBBox", "array", [0, 0, 0, 0], None),
- ((12, 8), "StrokeWidth", "number", 0, None),
- (14, "XUID", "array", None, None),
- ((12, 21), "PostScript", "SID", None, None),
- ((12, 22), "BaseFontName", "SID", None, None),
- ((12, 23), "BaseFontBlend", "delta", None, None),
- ((12, 31), "CIDFontVersion", "number", 0, None),
- ((12, 32), "CIDFontRevision", "number", 0, None),
- ((12, 33), "CIDFontType", "number", 0, None),
- ((12, 34), "CIDCount", "number", 8720, None),
- (15, "charset", "number", None, CharsetConverter()),
- ((12, 35), "UIDBase", "number", None, None),
- (16, "Encoding", "number", 0, EncodingConverter()),
- (18, "Private", ("number", "number"), None, PrivateDictConverter()),
- ((12, 37), "FDSelect", "number", None, FDSelectConverter()),
- ((12, 36), "FDArray", "number", None, FDArrayConverter()),
- (17, "CharStrings", "number", None, CharStringsConverter()),
- (24, "VarStore", "number", None, VarStoreConverter()),
-]
-
-topDictOperators2 = [
- # opcode name argument type default converter
- (25, "maxstack", "number", None, None),
- ((12, 7), "FontMatrix", "array", [0.001, 0, 0, 0.001, 0, 0], None),
- ((12, 37), "FDSelect", "number", None, FDSelectConverter()),
- ((12, 36), "FDArray", "number", None, FDArrayConverter()),
- (17, "CharStrings", "number", None, CharStringsConverter()),
- (24, "VarStore", "number", None, VarStoreConverter()),
-]
-
-# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order,
-# in order for the font to compile back from xml.
-
-kBlendDictOpName = "blend"
-blendOp = 23
-
-privateDictOperators = [
- # opcode name argument type default converter
- (22, "vsindex", "number", None, None),
- (
- blendOp,
- kBlendDictOpName,
- "blendList",
- None,
- None,
- ), # This is for reading to/from XML: it not written to CFF.
- (6, "BlueValues", "delta", None, None),
- (7, "OtherBlues", "delta", None, None),
- (8, "FamilyBlues", "delta", None, None),
- (9, "FamilyOtherBlues", "delta", None, None),
- ((12, 9), "BlueScale", "number", 0.039625, None),
- ((12, 10), "BlueShift", "number", 7, None),
- ((12, 11), "BlueFuzz", "number", 1, None),
- (10, "StdHW", "number", None, None),
- (11, "StdVW", "number", None, None),
- ((12, 12), "StemSnapH", "delta", None, None),
- ((12, 13), "StemSnapV", "delta", None, None),
- ((12, 14), "ForceBold", "number", 0, None),
- ((12, 15), "ForceBoldThreshold", "number", None, None), # deprecated
- ((12, 16), "lenIV", "number", None, None), # deprecated
- ((12, 17), "LanguageGroup", "number", 0, None),
- ((12, 18), "ExpansionFactor", "number", 0.06, None),
- ((12, 19), "initialRandomSeed", "number", 0, None),
- (20, "defaultWidthX", "number", 0, None),
- (21, "nominalWidthX", "number", 0, None),
- (19, "Subrs", "number", None, SubrsConverter()),
-]
-
-privateDictOperators2 = [
- # opcode name argument type default converter
- (22, "vsindex", "number", None, None),
- (
- blendOp,
- kBlendDictOpName,
- "blendList",
- None,
- None,
- ), # This is for reading to/from XML: it not written to CFF.
- (6, "BlueValues", "delta", None, None),
- (7, "OtherBlues", "delta", None, None),
- (8, "FamilyBlues", "delta", None, None),
- (9, "FamilyOtherBlues", "delta", None, None),
- ((12, 9), "BlueScale", "number", 0.039625, None),
- ((12, 10), "BlueShift", "number", 7, None),
- ((12, 11), "BlueFuzz", "number", 1, None),
- (10, "StdHW", "number", None, None),
- (11, "StdVW", "number", None, None),
- ((12, 12), "StemSnapH", "delta", None, None),
- ((12, 13), "StemSnapV", "delta", None, None),
- ((12, 17), "LanguageGroup", "number", 0, None),
- ((12, 18), "ExpansionFactor", "number", 0.06, None),
- (19, "Subrs", "number", None, SubrsConverter()),
-]
-
-
-def addConverters(table):
- for i in range(len(table)):
- op, name, arg, default, conv = table[i]
- if conv is not None:
- continue
- if arg in ("delta", "array"):
- conv = ArrayConverter()
- elif arg == "number":
- conv = NumberConverter()
- elif arg == "SID":
- conv = ASCIIConverter()
- elif arg == "blendList":
- conv = None
- else:
- assert False
- table[i] = op, name, arg, default, conv
-
-
-addConverters(privateDictOperators)
-addConverters(topDictOperators)
-
-
-class TopDictDecompiler(psCharStrings.DictDecompiler):
- operators = buildOperatorDict(topDictOperators)
-
-
-class PrivateDictDecompiler(psCharStrings.DictDecompiler):
- operators = buildOperatorDict(privateDictOperators)
-
-
-class DictCompiler(object):
- maxBlendStack = 0
-
- def __init__(self, dictObj, strings, parent, isCFF2=None):
- if strings:
- assert isinstance(strings, IndexedStrings)
- if isCFF2 is None and hasattr(parent, "isCFF2"):
- isCFF2 = parent.isCFF2
- assert isCFF2 is not None
- self.isCFF2 = isCFF2
- self.dictObj = dictObj
- self.strings = strings
- self.parent = parent
- rawDict = {}
- for name in dictObj.order:
- value = getattr(dictObj, name, None)
- if value is None:
- continue
- conv = dictObj.converters[name]
- value = conv.write(dictObj, value)
- if value == dictObj.defaults.get(name):
- continue
- rawDict[name] = value
- self.rawDict = rawDict
-
- def setPos(self, pos, endPos):
- pass
-
- def getDataLength(self):
- return len(self.compile("getDataLength"))
-
- def compile(self, reason):
- log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason)
- rawDict = self.rawDict
- data = []
- for name in self.dictObj.order:
- value = rawDict.get(name)
- if value is None:
- continue
- op, argType = self.opcodes[name]
- if isinstance(argType, tuple):
- l = len(argType)
- assert len(value) == l, "value doesn't match arg type"
- for i in range(l):
- arg = argType[i]
- v = value[i]
- arghandler = getattr(self, "arg_" + arg)
- data.append(arghandler(v))
- else:
- arghandler = getattr(self, "arg_" + argType)
- data.append(arghandler(value))
- data.append(op)
- data = bytesjoin(data)
- return data
-
- def toFile(self, file):
- data = self.compile("toFile")
- file.write(data)
-
- def arg_number(self, num):
- if isinstance(num, list):
- data = [encodeNumber(val) for val in num]
- data.append(encodeNumber(1))
- data.append(bytechr(blendOp))
- datum = bytesjoin(data)
- else:
- datum = encodeNumber(num)
- return datum
-
- def arg_SID(self, s):
- return psCharStrings.encodeIntCFF(self.strings.getSID(s))
-
- def arg_array(self, value):
- data = []
- for num in value:
- data.append(self.arg_number(num))
- return bytesjoin(data)
-
- def arg_delta(self, value):
- if not value:
- return b""
- val0 = value[0]
- if isinstance(val0, list):
- data = self.arg_delta_blend(value)
- else:
- out = []
- last = 0
- for v in value:
- out.append(v - last)
- last = v
- data = []
- for num in out:
- data.append(encodeNumber(num))
- return bytesjoin(data)
-
- def arg_delta_blend(self, value):
- """A delta list with blend lists has to be *all* blend lists.
-
- The value is a list is arranged as follows::
-
- [
- [V0, d0..dn]
- [V1, d0..dn]
- ...
- [Vm, d0..dn]
- ]
-
- ``V`` is the absolute coordinate value from the default font, and ``d0-dn``
- are the delta values from the *n* regions. Each ``V`` is an absolute
- coordinate from the default font.
-
- We want to return a list::
-
- [
- [v0, v1..vm]
- [d0..dn]
- ...
- [d0..dn]
- numBlends
- blendOp
- ]
-
- where each ``v`` is relative to the previous default font value.
- """
- numMasters = len(value[0])
- numBlends = len(value)
- numStack = (numBlends * numMasters) + 1
- if numStack > self.maxBlendStack:
- # Figure out the max number of value we can blend
- # and divide this list up into chunks of that size.
-
- numBlendValues = int((self.maxBlendStack - 1) / numMasters)
- out = []
- while True:
- numVal = min(len(value), numBlendValues)
- if numVal == 0:
- break
- valList = value[0:numVal]
- out1 = self.arg_delta_blend(valList)
- out.extend(out1)
- value = value[numVal:]
- else:
- firstList = [0] * numBlends
- deltaList = [None] * numBlends
- i = 0
- prevVal = 0
- while i < numBlends:
- # For PrivateDict BlueValues, the default font
- # values are absolute, not relative.
- # Must convert these back to relative coordinates
- # befor writing to CFF2.
- defaultValue = value[i][0]
- firstList[i] = defaultValue - prevVal
- prevVal = defaultValue
- deltaList[i] = value[i][1:]
- i += 1
-
- relValueList = firstList
- for blendList in deltaList:
- relValueList.extend(blendList)
- out = [encodeNumber(val) for val in relValueList]
- out.append(encodeNumber(numBlends))
- out.append(bytechr(blendOp))
- return out
-
-
-def encodeNumber(num):
- if isinstance(num, float):
- return psCharStrings.encodeFloat(num)
- else:
- return psCharStrings.encodeIntCFF(num)
-
-
-class TopDictCompiler(DictCompiler):
-
- opcodes = buildOpcodeDict(topDictOperators)
-
- def getChildren(self, strings):
- isCFF2 = self.isCFF2
- children = []
- if self.dictObj.cff2GetGlyphOrder is None:
- if hasattr(self.dictObj, "charset") and self.dictObj.charset:
- if hasattr(self.dictObj, "ROS"): # aka isCID
- charsetCode = None
- else:
- charsetCode = getStdCharSet(self.dictObj.charset)
- if charsetCode is None:
- children.append(
- CharsetCompiler(strings, self.dictObj.charset, self)
- )
- else:
- self.rawDict["charset"] = charsetCode
- if hasattr(self.dictObj, "Encoding") and self.dictObj.Encoding:
- encoding = self.dictObj.Encoding
- if not isinstance(encoding, str):
- children.append(EncodingCompiler(strings, encoding, self))
- else:
- if hasattr(self.dictObj, "VarStore"):
- varStoreData = self.dictObj.VarStore
- varStoreComp = VarStoreCompiler(varStoreData, self)
- children.append(varStoreComp)
- if hasattr(self.dictObj, "FDSelect"):
- # I have not yet supported merging a ttx CFF-CID font, as there are
- # interesting issues about merging the FDArrays. Here I assume that
- # either the font was read from XML, and the FDSelect indices are all
- # in the charstring data, or the FDSelect array is already fully defined.
- fdSelect = self.dictObj.FDSelect
- # probably read in from XML; assume fdIndex in CharString data
- if len(fdSelect) == 0:
- charStrings = self.dictObj.CharStrings
- for name in self.dictObj.charset:
- fdSelect.append(charStrings[name].fdSelectIndex)
- fdSelectComp = FDSelectCompiler(fdSelect, self)
- children.append(fdSelectComp)
- if hasattr(self.dictObj, "CharStrings"):
- items = []
- charStrings = self.dictObj.CharStrings
- for name in self.dictObj.charset:
- items.append(charStrings[name])
- charStringsComp = CharStringsCompiler(items, strings, self, isCFF2=isCFF2)
- children.append(charStringsComp)
- if hasattr(self.dictObj, "FDArray"):
- # I have not yet supported merging a ttx CFF-CID font, as there are
- # interesting issues about merging the FDArrays. Here I assume that the
- # FDArray info is correct and complete.
- fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self)
- children.append(fdArrayIndexComp)
- children.extend(fdArrayIndexComp.getChildren(strings))
- if hasattr(self.dictObj, "Private"):
- privComp = self.dictObj.Private.getCompiler(strings, self)
- children.append(privComp)
- children.extend(privComp.getChildren(strings))
- return children
-
-
-class FontDictCompiler(DictCompiler):
- opcodes = buildOpcodeDict(topDictOperators)
-
- def __init__(self, dictObj, strings, parent, isCFF2=None):
- super(FontDictCompiler, self).__init__(dictObj, strings, parent, isCFF2=isCFF2)
- #
- # We now take some effort to detect if there were any key/value pairs
- # supplied that were ignored in the FontDict context, and issue a warning
- # for those cases.
- #
- ignoredNames = []
- dictObj = self.dictObj
- for name in sorted(set(dictObj.converters) - set(dictObj.order)):
- if name in dictObj.rawDict:
- # The font was directly read from binary. In this
- # case, we want to report *all* "useless" key/value
- # pairs that are in the font, not just the ones that
- # are different from the default.
- ignoredNames.append(name)
- else:
- # The font was probably read from a TTX file. We only
- # warn about keys whos value is not the default. The
- # ones that have the default value will not be written
- # to binary anyway.
- default = dictObj.defaults.get(name)
- if default is not None:
- conv = dictObj.converters[name]
- default = conv.read(dictObj, default)
- if getattr(dictObj, name, None) != default:
- ignoredNames.append(name)
- if ignoredNames:
- log.warning(
- "Some CFF FDArray/FontDict keys were ignored upon compile: "
- + " ".join(sorted(ignoredNames))
- )
-
- def getChildren(self, strings):
- children = []
- if hasattr(self.dictObj, "Private"):
- privComp = self.dictObj.Private.getCompiler(strings, self)
- children.append(privComp)
- children.extend(privComp.getChildren(strings))
- return children
-
-
-class PrivateDictCompiler(DictCompiler):
-
- maxBlendStack = maxStackLimit
- opcodes = buildOpcodeDict(privateDictOperators)
-
- def setPos(self, pos, endPos):
- size = endPos - pos
- self.parent.rawDict["Private"] = size, pos
- self.pos = pos
-
- def getChildren(self, strings):
- children = []
- if hasattr(self.dictObj, "Subrs"):
- children.append(self.dictObj.Subrs.getCompiler(strings, self))
- return children
-
-
-class BaseDict(object):
- def __init__(self, strings=None, file=None, offset=None, isCFF2=None):
- assert (isCFF2 is None) == (file is None)
- self.rawDict = {}
- self.skipNames = []
- self.strings = strings
- if file is None:
- return
- self._isCFF2 = isCFF2
- self.file = file
- if offset is not None:
- log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset)
- self.offset = offset
-
- def decompile(self, data):
- log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data))
- dec = self.decompilerClass(self.strings, self)
- dec.decompile(data)
- self.rawDict = dec.getDict()
- self.postDecompile()
-
- def postDecompile(self):
- pass
-
- def getCompiler(self, strings, parent, isCFF2=None):
- return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
-
- def __getattr__(self, name):
- if name[:2] == name[-2:] == "__":
- # to make deepcopy() and pickle.load() work, we need to signal with
- # AttributeError that dunder methods like '__deepcopy__' or '__getstate__'
- # aren't implemented. For more details, see:
- # https://github.com/fonttools/fonttools/pull/1488
- raise AttributeError(name)
- value = self.rawDict.get(name, None)
- if value is None:
- value = self.defaults.get(name)
- if value is None:
- raise AttributeError(name)
- conv = self.converters[name]
- value = conv.read(self, value)
- setattr(self, name, value)
- return value
-
- def toXML(self, xmlWriter):
- for name in self.order:
- if name in self.skipNames:
- continue
- value = getattr(self, name, None)
- # XXX For "charset" we never skip calling xmlWrite even if the
- # value is None, so we always write the following XML comment:
- #
- #
- #
- # Charset is None when 'CFF ' table is imported from XML into an
- # empty TTFont(). By writing this comment all the time, we obtain
- # the same XML output whether roundtripping XML-to-XML or
- # dumping binary-to-XML
- if value is None and name != "charset":
- continue
- conv = self.converters[name]
- conv.xmlWrite(xmlWriter, name, value)
- ignoredNames = set(self.rawDict) - set(self.order)
- if ignoredNames:
- xmlWriter.comment(
- "some keys were ignored: %s" % " ".join(sorted(ignoredNames))
- )
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content):
- conv = self.converters[name]
- value = conv.xmlRead(name, attrs, content, self)
- setattr(self, name, value)
-
-
-class TopDict(BaseDict):
- """The ``TopDict`` represents the top-level dictionary holding font
- information. CFF2 tables contain a restricted set of top-level entries
- as described `here `_,
- but CFF tables may contain a wider range of information. This information
- can be accessed through attributes or through the dictionary returned
- through the ``rawDict`` property:
-
- .. code:: python
-
- font = tt["CFF "].cff[0]
- font.FamilyName
- # 'Linux Libertine O'
- font.rawDict["FamilyName"]
- # 'Linux Libertine O'
-
- More information is available in the CFF file's private dictionary, accessed
- via the ``Private`` property:
-
- .. code:: python
-
- tt["CFF "].cff[0].Private.BlueValues
- # [-15, 0, 515, 515, 666, 666]
-
- """
-
- defaults = buildDefaults(topDictOperators)
- converters = buildConverters(topDictOperators)
- compilerClass = TopDictCompiler
- order = buildOrder(topDictOperators)
- decompilerClass = TopDictDecompiler
-
- def __init__(
- self,
- strings=None,
- file=None,
- offset=None,
- GlobalSubrs=None,
- cff2GetGlyphOrder=None,
- isCFF2=None,
- ):
- super(TopDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
- self.cff2GetGlyphOrder = cff2GetGlyphOrder
- self.GlobalSubrs = GlobalSubrs
- if isCFF2:
- self.defaults = buildDefaults(topDictOperators2)
- self.charset = cff2GetGlyphOrder()
- self.order = buildOrder(topDictOperators2)
- else:
- self.defaults = buildDefaults(topDictOperators)
- self.order = buildOrder(topDictOperators)
-
- def getGlyphOrder(self):
- """Returns a list of glyph names in the CFF font."""
- return self.charset
-
- def postDecompile(self):
- offset = self.rawDict.get("CharStrings")
- if offset is None:
- return
- # get the number of glyphs beforehand.
- self.file.seek(offset)
- if self._isCFF2:
- self.numGlyphs = readCard32(self.file)
- else:
- self.numGlyphs = readCard16(self.file)
-
- def toXML(self, xmlWriter):
- if hasattr(self, "CharStrings"):
- self.decompileAllCharStrings()
- if hasattr(self, "ROS"):
- self.skipNames = ["Encoding"]
- if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"):
- # these values have default values, but I only want them to show up
- # in CID fonts.
- self.skipNames = [
- "CIDFontVersion",
- "CIDFontRevision",
- "CIDFontType",
- "CIDCount",
- ]
- BaseDict.toXML(self, xmlWriter)
-
- def decompileAllCharStrings(self):
- # Make sure that all the Private Dicts have been instantiated.
- for i, charString in enumerate(self.CharStrings.values()):
- try:
- charString.decompile()
- except:
- log.error("Error in charstring %s", i)
- raise
-
- def recalcFontBBox(self):
- fontBBox = None
- for charString in self.CharStrings.values():
- bounds = charString.calcBounds(self.CharStrings)
- if bounds is not None:
- if fontBBox is not None:
- fontBBox = unionRect(fontBBox, bounds)
- else:
- fontBBox = bounds
-
- if fontBBox is None:
- self.FontBBox = self.defaults["FontBBox"][:]
- else:
- self.FontBBox = list(intRect(fontBBox))
-
-
-class FontDict(BaseDict):
- #
- # Since fonttools used to pass a lot of fields that are not relevant in the FDArray
- # FontDict, there are 'ttx' files in the wild that contain all these. These got in
- # the ttx files because fonttools writes explicit values for all the TopDict default
- # values. These are not actually illegal in the context of an FDArray FontDict - you
- # can legally, per spec, put any arbitrary key/value pair in a FontDict - but are
- # useless since current major company CFF interpreters ignore anything but the set
- # listed in this file. So, we just silently skip them. An exception is Weight: this
- # is not used by any interpreter, but some foundries have asked that this be
- # supported in FDArray FontDicts just to preserve information about the design when
- # the font is being inspected.
- #
- # On top of that, there are fonts out there that contain such useless FontDict values.
- #
- # By subclassing TopDict, we *allow* all key/values from TopDict, both when reading
- # from binary or when reading from XML, but by overriding `order` with a limited
- # list of names, we ensure that only the useful names ever get exported to XML and
- # ever get compiled into the binary font.
- #
- # We override compilerClass so we can warn about "useless" key/value pairs, either
- # from the original binary font or from TTX input.
- #
- # See:
- # - https://github.com/fonttools/fonttools/issues/740
- # - https://github.com/fonttools/fonttools/issues/601
- # - https://github.com/adobe-type-tools/afdko/issues/137
- #
- defaults = {}
- converters = buildConverters(topDictOperators)
- compilerClass = FontDictCompiler
- orderCFF = ["FontName", "FontMatrix", "Weight", "Private"]
- orderCFF2 = ["Private"]
- decompilerClass = TopDictDecompiler
-
- def __init__(
- self,
- strings=None,
- file=None,
- offset=None,
- GlobalSubrs=None,
- isCFF2=None,
- vstore=None,
- ):
- super(FontDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
- self.vstore = vstore
- self.setCFF2(isCFF2)
-
- def setCFF2(self, isCFF2):
- # isCFF2 may be None.
- if isCFF2:
- self.order = self.orderCFF2
- self._isCFF2 = True
- else:
- self.order = self.orderCFF
- self._isCFF2 = False
-
-
-class PrivateDict(BaseDict):
- defaults = buildDefaults(privateDictOperators)
- converters = buildConverters(privateDictOperators)
- order = buildOrder(privateDictOperators)
- decompilerClass = PrivateDictDecompiler
- compilerClass = PrivateDictCompiler
-
- def __init__(self, strings=None, file=None, offset=None, isCFF2=None, vstore=None):
- super(PrivateDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
- self.vstore = vstore
- if isCFF2:
- self.defaults = buildDefaults(privateDictOperators2)
- self.order = buildOrder(privateDictOperators2)
- # Provide dummy values. This avoids needing to provide
- # an isCFF2 state in a lot of places.
- self.nominalWidthX = self.defaultWidthX = None
- else:
- self.defaults = buildDefaults(privateDictOperators)
- self.order = buildOrder(privateDictOperators)
-
- @property
- def in_cff2(self):
- return self._isCFF2
-
- def getNumRegions(self, vi=None): # called from misc/psCharStrings.py
- # if getNumRegions is being called, we can assume that VarStore exists.
- if vi is None:
- if hasattr(self, "vsindex"):
- vi = self.vsindex
- else:
- vi = 0
- numRegions = self.vstore.getNumRegions(vi)
- return numRegions
-
-
-class IndexedStrings(object):
-
- """SID -> string mapping."""
-
- def __init__(self, file=None):
- if file is None:
- strings = []
- else:
- strings = [tostr(s, encoding="latin1") for s in Index(file, isCFF2=False)]
- self.strings = strings
-
- def getCompiler(self):
- return IndexedStringsCompiler(self, None, self, isCFF2=False)
-
- def __len__(self):
- return len(self.strings)
-
- def __getitem__(self, SID):
- if SID < cffStandardStringCount:
- return cffStandardStrings[SID]
- else:
- return self.strings[SID - cffStandardStringCount]
-
- def getSID(self, s):
- if not hasattr(self, "stringMapping"):
- self.buildStringMapping()
- s = tostr(s, encoding="latin1")
- if s in cffStandardStringMapping:
- SID = cffStandardStringMapping[s]
- elif s in self.stringMapping:
- SID = self.stringMapping[s]
- else:
- SID = len(self.strings) + cffStandardStringCount
- self.strings.append(s)
- self.stringMapping[s] = SID
- return SID
-
- def getStrings(self):
- return self.strings
-
- def buildStringMapping(self):
- self.stringMapping = {}
- for index in range(len(self.strings)):
- self.stringMapping[self.strings[index]] = index + cffStandardStringCount
-
-
-# The 391 Standard Strings as used in the CFF format.
-# from Adobe Technical None #5176, version 1.0, 18 March 1998
-
-cffStandardStrings = [
- ".notdef",
- "space",
- "exclam",
- "quotedbl",
- "numbersign",
- "dollar",
- "percent",
- "ampersand",
- "quoteright",
- "parenleft",
- "parenright",
- "asterisk",
- "plus",
- "comma",
- "hyphen",
- "period",
- "slash",
- "zero",
- "one",
- "two",
- "three",
- "four",
- "five",
- "six",
- "seven",
- "eight",
- "nine",
- "colon",
- "semicolon",
- "less",
- "equal",
- "greater",
- "question",
- "at",
- "A",
- "B",
- "C",
- "D",
- "E",
- "F",
- "G",
- "H",
- "I",
- "J",
- "K",
- "L",
- "M",
- "N",
- "O",
- "P",
- "Q",
- "R",
- "S",
- "T",
- "U",
- "V",
- "W",
- "X",
- "Y",
- "Z",
- "bracketleft",
- "backslash",
- "bracketright",
- "asciicircum",
- "underscore",
- "quoteleft",
- "a",
- "b",
- "c",
- "d",
- "e",
- "f",
- "g",
- "h",
- "i",
- "j",
- "k",
- "l",
- "m",
- "n",
- "o",
- "p",
- "q",
- "r",
- "s",
- "t",
- "u",
- "v",
- "w",
- "x",
- "y",
- "z",
- "braceleft",
- "bar",
- "braceright",
- "asciitilde",
- "exclamdown",
- "cent",
- "sterling",
- "fraction",
- "yen",
- "florin",
- "section",
- "currency",
- "quotesingle",
- "quotedblleft",
- "guillemotleft",
- "guilsinglleft",
- "guilsinglright",
- "fi",
- "fl",
- "endash",
- "dagger",
- "daggerdbl",
- "periodcentered",
- "paragraph",
- "bullet",
- "quotesinglbase",
- "quotedblbase",
- "quotedblright",
- "guillemotright",
- "ellipsis",
- "perthousand",
- "questiondown",
- "grave",
- "acute",
- "circumflex",
- "tilde",
- "macron",
- "breve",
- "dotaccent",
- "dieresis",
- "ring",
- "cedilla",
- "hungarumlaut",
- "ogonek",
- "caron",
- "emdash",
- "AE",
- "ordfeminine",
- "Lslash",
- "Oslash",
- "OE",
- "ordmasculine",
- "ae",
- "dotlessi",
- "lslash",
- "oslash",
- "oe",
- "germandbls",
- "onesuperior",
- "logicalnot",
- "mu",
- "trademark",
- "Eth",
- "onehalf",
- "plusminus",
- "Thorn",
- "onequarter",
- "divide",
- "brokenbar",
- "degree",
- "thorn",
- "threequarters",
- "twosuperior",
- "registered",
- "minus",
- "eth",
- "multiply",
- "threesuperior",
- "copyright",
- "Aacute",
- "Acircumflex",
- "Adieresis",
- "Agrave",
- "Aring",
- "Atilde",
- "Ccedilla",
- "Eacute",
- "Ecircumflex",
- "Edieresis",
- "Egrave",
- "Iacute",
- "Icircumflex",
- "Idieresis",
- "Igrave",
- "Ntilde",
- "Oacute",
- "Ocircumflex",
- "Odieresis",
- "Ograve",
- "Otilde",
- "Scaron",
- "Uacute",
- "Ucircumflex",
- "Udieresis",
- "Ugrave",
- "Yacute",
- "Ydieresis",
- "Zcaron",
- "aacute",
- "acircumflex",
- "adieresis",
- "agrave",
- "aring",
- "atilde",
- "ccedilla",
- "eacute",
- "ecircumflex",
- "edieresis",
- "egrave",
- "iacute",
- "icircumflex",
- "idieresis",
- "igrave",
- "ntilde",
- "oacute",
- "ocircumflex",
- "odieresis",
- "ograve",
- "otilde",
- "scaron",
- "uacute",
- "ucircumflex",
- "udieresis",
- "ugrave",
- "yacute",
- "ydieresis",
- "zcaron",
- "exclamsmall",
- "Hungarumlautsmall",
- "dollaroldstyle",
- "dollarsuperior",
- "ampersandsmall",
- "Acutesmall",
- "parenleftsuperior",
- "parenrightsuperior",
- "twodotenleader",
- "onedotenleader",
- "zerooldstyle",
- "oneoldstyle",
- "twooldstyle",
- "threeoldstyle",
- "fouroldstyle",
- "fiveoldstyle",
- "sixoldstyle",
- "sevenoldstyle",
- "eightoldstyle",
- "nineoldstyle",
- "commasuperior",
- "threequartersemdash",
- "periodsuperior",
- "questionsmall",
- "asuperior",
- "bsuperior",
- "centsuperior",
- "dsuperior",
- "esuperior",
- "isuperior",
- "lsuperior",
- "msuperior",
- "nsuperior",
- "osuperior",
- "rsuperior",
- "ssuperior",
- "tsuperior",
- "ff",
- "ffi",
- "ffl",
- "parenleftinferior",
- "parenrightinferior",
- "Circumflexsmall",
- "hyphensuperior",
- "Gravesmall",
- "Asmall",
- "Bsmall",
- "Csmall",
- "Dsmall",
- "Esmall",
- "Fsmall",
- "Gsmall",
- "Hsmall",
- "Ismall",
- "Jsmall",
- "Ksmall",
- "Lsmall",
- "Msmall",
- "Nsmall",
- "Osmall",
- "Psmall",
- "Qsmall",
- "Rsmall",
- "Ssmall",
- "Tsmall",
- "Usmall",
- "Vsmall",
- "Wsmall",
- "Xsmall",
- "Ysmall",
- "Zsmall",
- "colonmonetary",
- "onefitted",
- "rupiah",
- "Tildesmall",
- "exclamdownsmall",
- "centoldstyle",
- "Lslashsmall",
- "Scaronsmall",
- "Zcaronsmall",
- "Dieresissmall",
- "Brevesmall",
- "Caronsmall",
- "Dotaccentsmall",
- "Macronsmall",
- "figuredash",
- "hypheninferior",
- "Ogoneksmall",
- "Ringsmall",
- "Cedillasmall",
- "questiondownsmall",
- "oneeighth",
- "threeeighths",
- "fiveeighths",
- "seveneighths",
- "onethird",
- "twothirds",
- "zerosuperior",
- "foursuperior",
- "fivesuperior",
- "sixsuperior",
- "sevensuperior",
- "eightsuperior",
- "ninesuperior",
- "zeroinferior",
- "oneinferior",
- "twoinferior",
- "threeinferior",
- "fourinferior",
- "fiveinferior",
- "sixinferior",
- "seveninferior",
- "eightinferior",
- "nineinferior",
- "centinferior",
- "dollarinferior",
- "periodinferior",
- "commainferior",
- "Agravesmall",
- "Aacutesmall",
- "Acircumflexsmall",
- "Atildesmall",
- "Adieresissmall",
- "Aringsmall",
- "AEsmall",
- "Ccedillasmall",
- "Egravesmall",
- "Eacutesmall",
- "Ecircumflexsmall",
- "Edieresissmall",
- "Igravesmall",
- "Iacutesmall",
- "Icircumflexsmall",
- "Idieresissmall",
- "Ethsmall",
- "Ntildesmall",
- "Ogravesmall",
- "Oacutesmall",
- "Ocircumflexsmall",
- "Otildesmall",
- "Odieresissmall",
- "OEsmall",
- "Oslashsmall",
- "Ugravesmall",
- "Uacutesmall",
- "Ucircumflexsmall",
- "Udieresissmall",
- "Yacutesmall",
- "Thornsmall",
- "Ydieresissmall",
- "001.000",
- "001.001",
- "001.002",
- "001.003",
- "Black",
- "Bold",
- "Book",
- "Light",
- "Medium",
- "Regular",
- "Roman",
- "Semibold",
-]
-
-cffStandardStringCount = 391
-assert len(cffStandardStrings) == cffStandardStringCount
-# build reverse mapping
-cffStandardStringMapping = {}
-for _i in range(cffStandardStringCount):
- cffStandardStringMapping[cffStandardStrings[_i]] = _i
-
-cffISOAdobeStrings = [
- ".notdef",
- "space",
- "exclam",
- "quotedbl",
- "numbersign",
- "dollar",
- "percent",
- "ampersand",
- "quoteright",
- "parenleft",
- "parenright",
- "asterisk",
- "plus",
- "comma",
- "hyphen",
- "period",
- "slash",
- "zero",
- "one",
- "two",
- "three",
- "four",
- "five",
- "six",
- "seven",
- "eight",
- "nine",
- "colon",
- "semicolon",
- "less",
- "equal",
- "greater",
- "question",
- "at",
- "A",
- "B",
- "C",
- "D",
- "E",
- "F",
- "G",
- "H",
- "I",
- "J",
- "K",
- "L",
- "M",
- "N",
- "O",
- "P",
- "Q",
- "R",
- "S",
- "T",
- "U",
- "V",
- "W",
- "X",
- "Y",
- "Z",
- "bracketleft",
- "backslash",
- "bracketright",
- "asciicircum",
- "underscore",
- "quoteleft",
- "a",
- "b",
- "c",
- "d",
- "e",
- "f",
- "g",
- "h",
- "i",
- "j",
- "k",
- "l",
- "m",
- "n",
- "o",
- "p",
- "q",
- "r",
- "s",
- "t",
- "u",
- "v",
- "w",
- "x",
- "y",
- "z",
- "braceleft",
- "bar",
- "braceright",
- "asciitilde",
- "exclamdown",
- "cent",
- "sterling",
- "fraction",
- "yen",
- "florin",
- "section",
- "currency",
- "quotesingle",
- "quotedblleft",
- "guillemotleft",
- "guilsinglleft",
- "guilsinglright",
- "fi",
- "fl",
- "endash",
- "dagger",
- "daggerdbl",
- "periodcentered",
- "paragraph",
- "bullet",
- "quotesinglbase",
- "quotedblbase",
- "quotedblright",
- "guillemotright",
- "ellipsis",
- "perthousand",
- "questiondown",
- "grave",
- "acute",
- "circumflex",
- "tilde",
- "macron",
- "breve",
- "dotaccent",
- "dieresis",
- "ring",
- "cedilla",
- "hungarumlaut",
- "ogonek",
- "caron",
- "emdash",
- "AE",
- "ordfeminine",
- "Lslash",
- "Oslash",
- "OE",
- "ordmasculine",
- "ae",
- "dotlessi",
- "lslash",
- "oslash",
- "oe",
- "germandbls",
- "onesuperior",
- "logicalnot",
- "mu",
- "trademark",
- "Eth",
- "onehalf",
- "plusminus",
- "Thorn",
- "onequarter",
- "divide",
- "brokenbar",
- "degree",
- "thorn",
- "threequarters",
- "twosuperior",
- "registered",
- "minus",
- "eth",
- "multiply",
- "threesuperior",
- "copyright",
- "Aacute",
- "Acircumflex",
- "Adieresis",
- "Agrave",
- "Aring",
- "Atilde",
- "Ccedilla",
- "Eacute",
- "Ecircumflex",
- "Edieresis",
- "Egrave",
- "Iacute",
- "Icircumflex",
- "Idieresis",
- "Igrave",
- "Ntilde",
- "Oacute",
- "Ocircumflex",
- "Odieresis",
- "Ograve",
- "Otilde",
- "Scaron",
- "Uacute",
- "Ucircumflex",
- "Udieresis",
- "Ugrave",
- "Yacute",
- "Ydieresis",
- "Zcaron",
- "aacute",
- "acircumflex",
- "adieresis",
- "agrave",
- "aring",
- "atilde",
- "ccedilla",
- "eacute",
- "ecircumflex",
- "edieresis",
- "egrave",
- "iacute",
- "icircumflex",
- "idieresis",
- "igrave",
- "ntilde",
- "oacute",
- "ocircumflex",
- "odieresis",
- "ograve",
- "otilde",
- "scaron",
- "uacute",
- "ucircumflex",
- "udieresis",
- "ugrave",
- "yacute",
- "ydieresis",
- "zcaron",
-]
-
-cffISOAdobeStringCount = 229
-assert len(cffISOAdobeStrings) == cffISOAdobeStringCount
-
-cffIExpertStrings = [
- ".notdef",
- "space",
- "exclamsmall",
- "Hungarumlautsmall",
- "dollaroldstyle",
- "dollarsuperior",
- "ampersandsmall",
- "Acutesmall",
- "parenleftsuperior",
- "parenrightsuperior",
- "twodotenleader",
- "onedotenleader",
- "comma",
- "hyphen",
- "period",
- "fraction",
- "zerooldstyle",
- "oneoldstyle",
- "twooldstyle",
- "threeoldstyle",
- "fouroldstyle",
- "fiveoldstyle",
- "sixoldstyle",
- "sevenoldstyle",
- "eightoldstyle",
- "nineoldstyle",
- "colon",
- "semicolon",
- "commasuperior",
- "threequartersemdash",
- "periodsuperior",
- "questionsmall",
- "asuperior",
- "bsuperior",
- "centsuperior",
- "dsuperior",
- "esuperior",
- "isuperior",
- "lsuperior",
- "msuperior",
- "nsuperior",
- "osuperior",
- "rsuperior",
- "ssuperior",
- "tsuperior",
- "ff",
- "fi",
- "fl",
- "ffi",
- "ffl",
- "parenleftinferior",
- "parenrightinferior",
- "Circumflexsmall",
- "hyphensuperior",
- "Gravesmall",
- "Asmall",
- "Bsmall",
- "Csmall",
- "Dsmall",
- "Esmall",
- "Fsmall",
- "Gsmall",
- "Hsmall",
- "Ismall",
- "Jsmall",
- "Ksmall",
- "Lsmall",
- "Msmall",
- "Nsmall",
- "Osmall",
- "Psmall",
- "Qsmall",
- "Rsmall",
- "Ssmall",
- "Tsmall",
- "Usmall",
- "Vsmall",
- "Wsmall",
- "Xsmall",
- "Ysmall",
- "Zsmall",
- "colonmonetary",
- "onefitted",
- "rupiah",
- "Tildesmall",
- "exclamdownsmall",
- "centoldstyle",
- "Lslashsmall",
- "Scaronsmall",
- "Zcaronsmall",
- "Dieresissmall",
- "Brevesmall",
- "Caronsmall",
- "Dotaccentsmall",
- "Macronsmall",
- "figuredash",
- "hypheninferior",
- "Ogoneksmall",
- "Ringsmall",
- "Cedillasmall",
- "onequarter",
- "onehalf",
- "threequarters",
- "questiondownsmall",
- "oneeighth",
- "threeeighths",
- "fiveeighths",
- "seveneighths",
- "onethird",
- "twothirds",
- "zerosuperior",
- "onesuperior",
- "twosuperior",
- "threesuperior",
- "foursuperior",
- "fivesuperior",
- "sixsuperior",
- "sevensuperior",
- "eightsuperior",
- "ninesuperior",
- "zeroinferior",
- "oneinferior",
- "twoinferior",
- "threeinferior",
- "fourinferior",
- "fiveinferior",
- "sixinferior",
- "seveninferior",
- "eightinferior",
- "nineinferior",
- "centinferior",
- "dollarinferior",
- "periodinferior",
- "commainferior",
- "Agravesmall",
- "Aacutesmall",
- "Acircumflexsmall",
- "Atildesmall",
- "Adieresissmall",
- "Aringsmall",
- "AEsmall",
- "Ccedillasmall",
- "Egravesmall",
- "Eacutesmall",
- "Ecircumflexsmall",
- "Edieresissmall",
- "Igravesmall",
- "Iacutesmall",
- "Icircumflexsmall",
- "Idieresissmall",
- "Ethsmall",
- "Ntildesmall",
- "Ogravesmall",
- "Oacutesmall",
- "Ocircumflexsmall",
- "Otildesmall",
- "Odieresissmall",
- "OEsmall",
- "Oslashsmall",
- "Ugravesmall",
- "Uacutesmall",
- "Ucircumflexsmall",
- "Udieresissmall",
- "Yacutesmall",
- "Thornsmall",
- "Ydieresissmall",
-]
-
-cffExpertStringCount = 166
-assert len(cffIExpertStrings) == cffExpertStringCount
-
-cffExpertSubsetStrings = [
- ".notdef",
- "space",
- "dollaroldstyle",
- "dollarsuperior",
- "parenleftsuperior",
- "parenrightsuperior",
- "twodotenleader",
- "onedotenleader",
- "comma",
- "hyphen",
- "period",
- "fraction",
- "zerooldstyle",
- "oneoldstyle",
- "twooldstyle",
- "threeoldstyle",
- "fouroldstyle",
- "fiveoldstyle",
- "sixoldstyle",
- "sevenoldstyle",
- "eightoldstyle",
- "nineoldstyle",
- "colon",
- "semicolon",
- "commasuperior",
- "threequartersemdash",
- "periodsuperior",
- "asuperior",
- "bsuperior",
- "centsuperior",
- "dsuperior",
- "esuperior",
- "isuperior",
- "lsuperior",
- "msuperior",
- "nsuperior",
- "osuperior",
- "rsuperior",
- "ssuperior",
- "tsuperior",
- "ff",
- "fi",
- "fl",
- "ffi",
- "ffl",
- "parenleftinferior",
- "parenrightinferior",
- "hyphensuperior",
- "colonmonetary",
- "onefitted",
- "rupiah",
- "centoldstyle",
- "figuredash",
- "hypheninferior",
- "onequarter",
- "onehalf",
- "threequarters",
- "oneeighth",
- "threeeighths",
- "fiveeighths",
- "seveneighths",
- "onethird",
- "twothirds",
- "zerosuperior",
- "onesuperior",
- "twosuperior",
- "threesuperior",
- "foursuperior",
- "fivesuperior",
- "sixsuperior",
- "sevensuperior",
- "eightsuperior",
- "ninesuperior",
- "zeroinferior",
- "oneinferior",
- "twoinferior",
- "threeinferior",
- "fourinferior",
- "fiveinferior",
- "sixinferior",
- "seveninferior",
- "eightinferior",
- "nineinferior",
- "centinferior",
- "dollarinferior",
- "periodinferior",
- "commainferior",
-]
-
-cffExpertSubsetStringCount = 87
-assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/subset/__main__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/subset/__main__.py
deleted file mode 100644
index decf9ee6e50a612c65a87ebeaa8be115f1d25242..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/subset/__main__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import sys
-from fontTools.subset import main
-
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I__2.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I__2.py
deleted file mode 100644
index 43a17f6f1ffa82cd803a44ab61832c99259c9ea9..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I__2.py
+++ /dev/null
@@ -1,15 +0,0 @@
-""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
-tool to store its hinting source data.
-
-TSI2 is the index table containing the lengths and offsets for the glyph
-programs that are contained in the TSI3 table. It uses the same format as
-the TSI0 table.
-"""
-from fontTools import ttLib
-
-superclass = ttLib.getTableClass("TSI0")
-
-
-class table_T_S_I__2(superclass):
-
- dependencies = ["TSI3"]
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/implementations/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/implementations/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/utils/logging.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/utils/logging.py
deleted file mode 100644
index 187641d03bc5770b817d6250409066322db71539..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/utils/logging.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# coding=utf-8
-# Copyright 2020 Optuna, Hugging Face
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" Logging utilities."""
-
-import logging
-import os
-from logging import (
- CRITICAL, # NOQA
- DEBUG, # NOQA
- ERROR, # NOQA
- FATAL, # NOQA
- INFO, # NOQA
- NOTSET, # NOQA
- WARN, # NOQA
- WARNING, # NOQA
-)
-from typing import Optional
-
-
-log_levels = {
- "debug": logging.DEBUG,
- "info": logging.INFO,
- "warning": logging.WARNING,
- "error": logging.ERROR,
- "critical": logging.CRITICAL,
-}
-
-_default_log_level = logging.WARNING
-
-
-def _get_library_name() -> str:
- return __name__.split(".")[0]
-
-
-def _get_library_root_logger() -> logging.Logger:
- return logging.getLogger(_get_library_name())
-
-
-def _get_default_logging_level():
- """
- If HUGGINGFACE_HUB_VERBOSITY env var is set to one of the valid choices
- return that as the new default level. If it is not - fall back to
- `_default_log_level`
- """
- env_level_str = os.getenv("HUGGINGFACE_HUB_VERBOSITY", None)
- if env_level_str:
- if env_level_str in log_levels:
- return log_levels[env_level_str]
- else:
- logging.getLogger().warning(
- f"Unknown option HUGGINGFACE_HUB_VERBOSITY={env_level_str}, "
- f"has to be one of: { ', '.join(log_levels.keys()) }"
- )
- return _default_log_level
-
-
-def _configure_library_root_logger() -> None:
- library_root_logger = _get_library_root_logger()
- library_root_logger.addHandler(logging.StreamHandler())
- library_root_logger.setLevel(_get_default_logging_level())
-
-
-def _reset_library_root_logger() -> None:
- library_root_logger = _get_library_root_logger()
- library_root_logger.setLevel(logging.NOTSET)
-
-
-def get_logger(name: Optional[str] = None) -> logging.Logger:
- """
- Returns a logger with the specified name. This function is not supposed
- to be directly accessed by library users.
-
- Args:
- name (`str`, *optional*):
- The name of the logger to get, usually the filename
-
- Example:
-
- ```python
- >>> from huggingface_hub import get_logger
-
- >>> logger = get_logger(__file__)
- >>> logger.set_verbosity_info()
- ```
- """
-
- if name is None:
- name = _get_library_name()
-
- return logging.getLogger(name)
-
-
-def get_verbosity() -> int:
- """Return the current level for the HuggingFace Hub's root logger.
-
- Returns:
- Logging level, e.g., `huggingface_hub.logging.DEBUG` and
- `huggingface_hub.logging.INFO`.
-
-
-
- HuggingFace Hub has following logging levels:
-
- - `huggingface_hub.logging.CRITICAL`, `huggingface_hub.logging.FATAL`
- - `huggingface_hub.logging.ERROR`
- - `huggingface_hub.logging.WARNING`, `huggingface_hub.logging.WARN`
- - `huggingface_hub.logging.INFO`
- - `huggingface_hub.logging.DEBUG`
-
-
- """
- return _get_library_root_logger().getEffectiveLevel()
-
-
-def set_verbosity(verbosity: int) -> None:
- """
- Sets the level for the HuggingFace Hub's root logger.
-
- Args:
- verbosity (`int`):
- Logging level, e.g., `huggingface_hub.logging.DEBUG` and
- `huggingface_hub.logging.INFO`.
- """
- _get_library_root_logger().setLevel(verbosity)
-
-
-def set_verbosity_info():
- """
- Sets the verbosity to `logging.INFO`.
- """
- return set_verbosity(INFO)
-
-
-def set_verbosity_warning():
- """
- Sets the verbosity to `logging.WARNING`.
- """
- return set_verbosity(WARNING)
-
-
-def set_verbosity_debug():
- """
- Sets the verbosity to `logging.DEBUG`.
- """
- return set_verbosity(DEBUG)
-
-
-def set_verbosity_error():
- """
- Sets the verbosity to `logging.ERROR`.
- """
- return set_verbosity(ERROR)
-
-
-def disable_propagation() -> None:
- """
- Disable propagation of the library log outputs. Note that log propagation is
- disabled by default.
- """
- _get_library_root_logger().propagate = False
-
-
-def enable_propagation() -> None:
- """
- Enable propagation of the library log outputs. Please disable the
- HuggingFace Hub's default handler to prevent double logging if the root
- logger has been configured.
- """
- _get_library_root_logger().propagate = True
-
-
-_configure_library_root_logger()
diff --git a/spaces/ddosxd/sydney-inpaint/obfuscate.py b/spaces/ddosxd/sydney-inpaint/obfuscate.py
deleted file mode 100644
index 0f2822f80fa88d8362a88f2a4f06d2fff60382d2..0000000000000000000000000000000000000000
--- a/spaces/ddosxd/sydney-inpaint/obfuscate.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import subprocess, random
-
-def obfuscate_js(code):
-
- x = random.randint(1,100000)
-
- with open(f'input{x}.js', 'w') as file:
- file.write(code)
-
- subprocess.run(['node', 'obfuscace', f'input{x}.js', f'output{x}.js'])
-
- with open(f'output{x}.js', 'r') as file:
- obfuscated_code = file.read()
-
- subprocess.run(['rm', f'input{x}.js'])
- subprocess.run(['rm', f'output{x}.js'])
-
- return f"eval({obfuscated_code})"
\ No newline at end of file
diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/unclip/__init__.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/unclip/__init__.py
deleted file mode 100644
index 075e66bb680aca294b36aa7ad0abb8d0f651cd92..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/unclip/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from ...utils import (
- OptionalDependencyNotAvailable,
- is_torch_available,
- is_transformers_available,
- is_transformers_version,
-)
-
-
-try:
- if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
- raise OptionalDependencyNotAvailable()
-except OptionalDependencyNotAvailable:
- from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
-else:
- from .pipeline_unclip import UnCLIPPipeline
- from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
- from .text_proj import UnCLIPTextProjModel
diff --git a/spaces/deepwisdom/MetaGPT/metagpt/actions/write_code.py b/spaces/deepwisdom/MetaGPT/metagpt/actions/write_code.py
deleted file mode 100644
index fd54ce6992ce535cd935402c58adf1a52936cb8e..0000000000000000000000000000000000000000
--- a/spaces/deepwisdom/MetaGPT/metagpt/actions/write_code.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/5/11 17:45
-@Author : alexanderwu
-@File : write_code.py
-"""
-from tenacity import retry, stop_after_attempt, wait_fixed
-
-from metagpt.actions.action import Action
-from metagpt.logs import logger
-from metagpt.schema import Message
-from metagpt.utils.common import CodeParser
-
-PROMPT_TEMPLATE = """
-NOTICE
-Role: You are a professional engineer; the main goal is to write PEP8 compliant, elegant, modular, easy to read and maintain Python 3.9 code (but you can also use other programming language)
-ATTENTION: Use '##' to SPLIT SECTIONS, not '#'. Output format carefully referenced "Format example".
-
-## Code: {filename} Write code with triple quoto, based on the following list and context.
-1. Do your best to implement THIS ONLY ONE FILE. ONLY USE EXISTING API. IF NO API, IMPLEMENT IT.
-2. Requirement: Based on the context, implement one following code file, note to return only in code form, your code will be part of the entire project, so please implement complete, reliable, reusable code snippets
-3. Attention1: If there is any setting, ALWAYS SET A DEFAULT VALUE, ALWAYS USE STRONG TYPE AND EXPLICIT VARIABLE.
-4. Attention2: YOU MUST FOLLOW "Data structures and interface definitions". DONT CHANGE ANY DESIGN.
-5. Think before writing: What should be implemented and provided in this document?
-6. CAREFULLY CHECK THAT YOU DONT MISS ANY NECESSARY CLASS/FUNCTION IN THIS FILE.
-7. Do not use public member functions that do not exist in your design.
-
------
-# Context
-{context}
------
-## Format example
------
-## Code: {filename}
-```python
-## {filename}
-...
-```
------
-"""
-
-
-class WriteCode(Action):
- def __init__(self, name="WriteCode", context: list[Message] = None, llm=None):
- super().__init__(name, context, llm)
-
- def _is_invalid(self, filename):
- return any(i in filename for i in ["mp3", "wav"])
-
- @retry(stop=stop_after_attempt(2), wait=wait_fixed(1))
- async def write_code(self, prompt):
- code_rsp = await self._aask(prompt)
- code = CodeParser.parse_code(block="", text=code_rsp)
- return code
-
- async def run(self, context, filename):
- prompt = PROMPT_TEMPLATE.format(context=context, filename=filename)
- logger.info(f"Writing {filename}..")
- code = await self.write_code(prompt)
- # code_rsp = await self._aask_v1(prompt, "code_rsp", OUTPUT_MAPPING)
- # self._save(context, filename, code)
- return code
diff --git a/spaces/deepwisdom/MetaGPT/metagpt/utils/singleton.py b/spaces/deepwisdom/MetaGPT/metagpt/utils/singleton.py
deleted file mode 100644
index a9e0862c050777981a753fa3f6449578f07e737c..0000000000000000000000000000000000000000
--- a/spaces/deepwisdom/MetaGPT/metagpt/utils/singleton.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/5/11 16:15
-@Author : alexanderwu
-@File : singleton.py
-"""
-import abc
-
-
-class Singleton(abc.ABCMeta, type):
- """
- Singleton metaclass for ensuring only one instance of a class.
- """
-
- _instances = {}
-
- def __call__(cls, *args, **kwargs):
- """Call method for the singleton metaclass."""
- if cls not in cls._instances:
- cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
- return cls._instances[cls]
diff --git a/spaces/deepwisdom/MetaGPT/tests/metagpt/document_store/test_document.py b/spaces/deepwisdom/MetaGPT/tests/metagpt/document_store/test_document.py
deleted file mode 100644
index 5ae357fb100ba0c24df472c1ebfe62b3a76d27e3..0000000000000000000000000000000000000000
--- a/spaces/deepwisdom/MetaGPT/tests/metagpt/document_store/test_document.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/6/11 19:46
-@Author : alexanderwu
-@File : test_document.py
-"""
-import pytest
-
-from metagpt.const import DATA_PATH
-from metagpt.document_store.document import Document
-
-CASES = [
- ("st/faq.xlsx", "Question", "Answer", 1),
- ("cases/faq.csv", "Question", "Answer", 1),
- # ("cases/faq.json", "Question", "Answer", 1),
- ("docx/faq.docx", None, None, 1),
- ("cases/faq.pdf", None, None, 0), # 这是因为pdf默认没有分割段落
- ("cases/faq.txt", None, None, 0), # 这是因为txt按照256分割段落
-]
-
-
-@pytest.mark.parametrize("relative_path, content_col, meta_col, threshold", CASES)
-def test_document(relative_path, content_col, meta_col, threshold):
- doc = Document(DATA_PATH / relative_path, content_col, meta_col)
- rsp = doc.get_docs_and_metadatas()
- assert len(rsp[0]) > threshold
- assert len(rsp[1]) > threshold
diff --git "a/spaces/derful/Chatgpt-academic/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" "b/spaces/derful/Chatgpt-academic/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py"
deleted file mode 100644
index 38421a6fd000e90e395ff18b2173a9397103f01a..0000000000000000000000000000000000000000
--- "a/spaces/derful/Chatgpt-academic/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py"
+++ /dev/null
@@ -1,25 +0,0 @@
-from predict import predict_no_ui_long_connection
-from toolbox import CatchException, report_execption, write_results_to_file
-import datetime
-
-@CatchException
-def 高阶功能模板函数(api, txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
- history = [] # 清空历史,以免输入溢出
- chatbot.append(("这是什么功能?", "[Local Message] 请注意,您正在调用一个函数模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板。为了做到简单易读,该函数只有25行代码,不会实时反馈文字流或心跳,请耐心等待程序输出完成。另外您若希望分享新的功能模组,请不吝PR!"))
- yield chatbot, history, '正常' # 由于请求gpt需要一段时间,我们先及时地做一次状态显示
-
- for i in range(5):
- currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
- currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day
- i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述改事件的三个最重要的单词。'
- chatbot.append((i_say, "[Local Message] waiting gpt response."))
- yield chatbot, history, '正常' # 由于请求gpt需要一段时间,我们先及时地做一次状态显示
-
- # history = [] 每次询问不携带之前的询问历史
- gpt_say = predict_no_ui_long_connection(
- api=api, inputs=i_say, top_p=top_p, temperature=temperature, history=[],
- sys_prompt="当你想发送一张照片时,请使用Markdown, 并且不要有反斜线, 不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/? < PUT_YOUR_QUERY_HERE >)。") # 请求gpt,需要一段时间
-
- chatbot[-1] = (i_say, gpt_say)
- history.append(i_say);history.append(gpt_say)
- yield chatbot, history, '正常' # 显示
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Dockerfile b/spaces/diacanFperku/AutoGPT/Dockerfile
deleted file mode 100644
index 8396154998f32a50d55c199a674b638d5cf7bda2..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Dockerfile
+++ /dev/null
@@ -1,38 +0,0 @@
-# Use an official Python base image from the Docker Hub
-FROM python:3.10-slim
-
-# Install git
-RUN apt-get -y update
-RUN apt-get -y install git chromium-driver
-
-# Install Xvfb and other dependencies for headless browser testing
-RUN apt-get update \
- && apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates
-
-# Install Firefox / Chromium
-RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \
- && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \
- && apt-get update \
- && apt-get install -y chromium firefox-esr
-
-# Set environment variables
-ENV PIP_NO_CACHE_DIR=yes \
- PYTHONUNBUFFERED=1 \
- PYTHONDONTWRITEBYTECODE=1
-
-# Create a non-root user and set permissions
-RUN useradd --create-home appuser
-WORKDIR /home/appuser
-RUN chown appuser:appuser /home/appuser
-USER appuser
-
-# Copy the requirements.txt file and install the requirements
-COPY --chown=appuser:appuser requirements.txt .
-RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
- pip install --no-cache-dir --user -r requirements.txt
-
-# Copy the application files
-COPY --chown=appuser:appuser autogpt/ ./autogpt
-
-# Set the entrypoint
-ENTRYPOINT ["python", "-m", "autogpt"]
diff --git a/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2/data_utils.py b/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2/data_utils.py
deleted file mode 100644
index 2c98d3dc8b9572bd05859033a74d155425a2a2ab..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2/data_utils.py
+++ /dev/null
@@ -1,332 +0,0 @@
-import time
-import os
-import random
-import numpy as np
-import torch
-import torch.utils.data
-import torchaudio
-import commons
-from mel_processing import spectrogram_torch, mel_spectrogram_torch, spec_to_mel_torch
-from utils import load_wav_to_torch, load_filepaths_and_text
-from text import cleaned_text_to_sequence, get_bert
-
-"""Multi speaker version"""
-
-
-class TextAudioSpeakerLoader(torch.utils.data.Dataset):
- """
- 1) loads audio, speaker_id, text pairs
- 2) normalizes text and converts them to sequences of integers
- 3) computes spectrograms from audio files.
- """
-
- def __init__(self, audiopaths_sid_text, hparams):
- self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
- self.max_wav_value = hparams.max_wav_value
- self.sampling_rate = hparams.sampling_rate
- self.filter_length = hparams.filter_length
- self.hop_length = hparams.hop_length
- self.win_length = hparams.win_length
- self.sampling_rate = hparams.sampling_rate
- self.spk_map = hparams.spk2id
- self.hparams = hparams
-
- self.use_mel_spec_posterior = getattr(hparams, "use_mel_posterior_encoder", False)
- if self.use_mel_spec_posterior:
- self.n_mel_channels = getattr(hparams, "n_mel_channels", 80)
-
- self.cleaned_text = getattr(hparams, "cleaned_text", False)
-
- self.add_blank = hparams.add_blank
- self.min_text_len = getattr(hparams, "min_text_len", 1)
- self.max_text_len = getattr(hparams, "max_text_len", 300)
-
- random.seed(1234)
- random.shuffle(self.audiopaths_sid_text)
- self._filter()
-
- def _filter(self):
- """
- Filter text & store spec lengths
- """
- # Store spectrogram lengths for Bucketing
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
- # spec_length = wav_length // hop_length
-
- audiopaths_sid_text_new = []
- lengths = []
- skipped = 0
- for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text:
- audiopath = f'{_id}'
- if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:
- phones = phones.split(" ")
- tone = [int(i) for i in tone.split(" ")]
- word2ph = [int(i) for i in word2ph.split(" ")]
- audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph])
- lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
- else:
- skipped += 1
- print("skipped: ", skipped, ", total: ", len(self.audiopaths_sid_text))
- self.audiopaths_sid_text = audiopaths_sid_text_new
- self.lengths = lengths
-
- def get_audio_text_speaker_pair(self, audiopath_sid_text):
- # separate filename, speaker_id and text
- audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text
-
- bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath)
-
- spec, wav = self.get_audio(audiopath)
- sid = torch.LongTensor([int(self.spk_map[sid])])
- return (phones, spec, wav, sid, tone, language, bert)
-
- def get_audio(self, filename):
- audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)
- '''
- audio, sampling_rate = load_wav_to_torch(filename)
- if sampling_rate != self.sampling_rate:
- raise ValueError("{} {} SR doesn't match target {} SR".format(
- sampling_rate, self.sampling_rate))
- audio_norm = audio / self.max_wav_value
- audio_norm = audio_norm.unsqueeze(0)
- '''
- spec_filename = filename.replace(".wav", ".spec.pt")
- if self.use_mel_spec_posterior:
- spec_filename = spec_filename.replace(".spec.pt", ".mel.pt")
- if os.path.exists(spec_filename):
- spec = torch.load(spec_filename)
- else:
- if self.use_mel_spec_posterior:
- # if os.path.exists(filename.replace(".wav", ".spec.pt")):
- # # spec, n_fft, num_mels, sampling_rate, fmin, fmax
- # spec = spec_to_mel_torch(
- # torch.load(filename.replace(".wav", ".spec.pt")),
- # self.filter_length, self.n_mel_channels, self.sampling_rate,
- # self.hparams.mel_fmin, self.hparams.mel_fmax)
- spec = mel_spectrogram_torch(audio_norm, self.filter_length,
- self.n_mel_channels, self.sampling_rate, self.hop_length,
- self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False)
- else:
- spec = spectrogram_torch(audio_norm, self.filter_length,
- self.sampling_rate, self.hop_length, self.win_length,
- center=False)
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename)
- return spec, audio_norm
-
- def get_text(self, text, word2ph, phone, tone, language_str, wav_path):
- # print(text, word2ph,phone, tone, language_str)
- pold = phone
- w2pho = [i for i in word2ph]
- word2ph = [i for i in word2ph]
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
- pold2 = phone
-
- if self.add_blank:
- p1 = len(phone)
- phone = commons.intersperse(phone, 0)
- p2 = len(phone)
- t1 = len(tone)
- tone = commons.intersperse(tone, 0)
- t2 = len(tone)
- language = commons.intersperse(language, 0)
- for i in range(len(word2ph)):
- word2ph[i] = word2ph[i] * 2
- word2ph[0] += 1
- bert_path = wav_path.replace(".wav", ".bert.pt")
- try:
- bert = torch.load(bert_path)
- assert bert.shape[-1] == len(phone)
- except:
- bert = get_bert(text, word2ph, language_str)
- torch.save(bert, bert_path)
- #print(bert.shape[-1], bert_path, text, pold)
- assert bert.shape[-1] == len(phone)
-
- assert bert.shape[-1] == len(phone), (
- bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho)
- phone = torch.LongTensor(phone)
- tone = torch.LongTensor(tone)
- language = torch.LongTensor(language)
- return bert, phone, tone, language
-
- def get_sid(self, sid):
- sid = torch.LongTensor([int(sid)])
- return sid
-
- def __getitem__(self, index):
- return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
-
- def __len__(self):
- return len(self.audiopaths_sid_text)
-
-
-class TextAudioSpeakerCollate():
- """ Zero-pads model inputs and targets
- """
-
- def __init__(self, return_ids=False):
- self.return_ids = return_ids
-
- def __call__(self, batch):
- """Collate's training batch from normalized text, audio and speaker identities
- PARAMS
- ------
- batch: [text_normalized, spec_normalized, wav_normalized, sid]
- """
- # Right zero-pad all one-hot text sequences to max input length
- _, ids_sorted_decreasing = torch.sort(
- torch.LongTensor([x[1].size(1) for x in batch]),
- dim=0, descending=True)
-
- max_text_len = max([len(x[0]) for x in batch])
- max_spec_len = max([x[1].size(1) for x in batch])
- max_wav_len = max([x[2].size(1) for x in batch])
-
- text_lengths = torch.LongTensor(len(batch))
- spec_lengths = torch.LongTensor(len(batch))
- wav_lengths = torch.LongTensor(len(batch))
- sid = torch.LongTensor(len(batch))
-
- text_padded = torch.LongTensor(len(batch), max_text_len)
- tone_padded = torch.LongTensor(len(batch), max_text_len)
- language_padded = torch.LongTensor(len(batch), max_text_len)
- bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)
-
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
- text_padded.zero_()
- tone_padded.zero_()
- language_padded.zero_()
- spec_padded.zero_()
- wav_padded.zero_()
- bert_padded.zero_()
- for i in range(len(ids_sorted_decreasing)):
- row = batch[ids_sorted_decreasing[i]]
-
- text = row[0]
- text_padded[i, :text.size(0)] = text
- text_lengths[i] = text.size(0)
-
- spec = row[1]
- spec_padded[i, :, :spec.size(1)] = spec
- spec_lengths[i] = spec.size(1)
-
- wav = row[2]
- wav_padded[i, :, :wav.size(1)] = wav
- wav_lengths[i] = wav.size(1)
-
- sid[i] = row[3]
-
- tone = row[4]
- tone_padded[i, :tone.size(0)] = tone
-
- language = row[5]
- language_padded[i, :language.size(0)] = language
-
- bert = row[6]
- bert_padded[i, :, :bert.size(1)] = bert
-
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded
-
-
-class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
- """
- Maintain similar input lengths in a batch.
- Length groups are specified by boundaries.
- Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
-
- It removes samples which are not included in the boundaries.
- Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
- """
-
- def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
- super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
- self.lengths = dataset.lengths
- self.batch_size = batch_size
- self.boundaries = boundaries
-
- self.buckets, self.num_samples_per_bucket = self._create_buckets()
- self.total_size = sum(self.num_samples_per_bucket)
- self.num_samples = self.total_size // self.num_replicas
-
- def _create_buckets(self):
- buckets = [[] for _ in range(len(self.boundaries) - 1)]
- for i in range(len(self.lengths)):
- length = self.lengths[i]
- idx_bucket = self._bisect(length)
- if idx_bucket != -1:
- buckets[idx_bucket].append(i)
-
- for i in range(len(buckets) - 1, 0, -1):
- if len(buckets[i]) == 0:
- buckets.pop(i)
- self.boundaries.pop(i + 1)
-
- num_samples_per_bucket = []
- for i in range(len(buckets)):
- len_bucket = len(buckets[i])
- total_batch_size = self.num_replicas * self.batch_size
- rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
- num_samples_per_bucket.append(len_bucket + rem)
- return buckets, num_samples_per_bucket
-
- def __iter__(self):
- # deterministically shuffle based on epoch
- g = torch.Generator()
- g.manual_seed(self.epoch)
-
- indices = []
- if self.shuffle:
- for bucket in self.buckets:
- indices.append(torch.randperm(len(bucket), generator=g).tolist())
- else:
- for bucket in self.buckets:
- indices.append(list(range(len(bucket))))
-
- batches = []
- for i in range(len(self.buckets)):
- bucket = self.buckets[i]
- len_bucket = len(bucket)
- if (len_bucket == 0):
- continue
- ids_bucket = indices[i]
- num_samples_bucket = self.num_samples_per_bucket[i]
-
- # add extra samples to make it evenly divisible
- rem = num_samples_bucket - len_bucket
- ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
-
- # subsample
- ids_bucket = ids_bucket[self.rank::self.num_replicas]
-
- # batching
- for j in range(len(ids_bucket) // self.batch_size):
- batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]
- batches.append(batch)
-
- if self.shuffle:
- batch_ids = torch.randperm(len(batches), generator=g).tolist()
- batches = [batches[i] for i in batch_ids]
- self.batches = batches
-
- assert len(self.batches) * self.batch_size == self.num_samples
- return iter(self.batches)
-
- def _bisect(self, x, lo=0, hi=None):
- if hi is None:
- hi = len(self.boundaries) - 1
-
- if hi > lo:
- mid = (hi + lo) // 2
- if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
- return mid
- elif x <= self.boundaries[mid]:
- return self._bisect(x, lo, mid)
- else:
- return self._bisect(x, mid + 1, hi)
- else:
- return -1
-
- def __len__(self):
- return self.num_samples // self.batch_size
diff --git a/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/text/symbols.py b/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/text/symbols.py
deleted file mode 100644
index 9dfae4e633829f20c4fd767b1c7a9198911ed801..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/text/symbols.py
+++ /dev/null
@@ -1,51 +0,0 @@
-punctuation = ['!', '?', '…', ",", ".", "'", '-']
-pu_symbols = punctuation + ["SP", "UNK"]
-pad = '_'
-
-# chinese
-zh_symbols = ['E', 'En', 'a', 'ai', 'an', 'ang', 'ao', 'b', 'c', 'ch', 'd', 'e', 'ei', 'en', 'eng', 'er', 'f', 'g', 'h',
- 'i', 'i0', 'ia', 'ian', 'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'ir', 'iu', 'j', 'k', 'l', 'm', 'n', 'o',
- 'ong',
- 'ou', 'p', 'q', 'r', 's', 'sh', 't', 'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've', 'vn',
- 'w', 'x', 'y', 'z', 'zh',
- "AA", "EE", "OO"]
-num_zh_tones = 6
-
-# japanese
-ja_symbols = ['I', 'N', 'U', 'a', 'b', 'by', 'ch', 'cl', 'd', 'dy', 'e', 'f', 'g', 'gy', 'h', 'hy', 'i', 'j', 'k', 'ky',
- 'm', 'my', 'n', 'ny', 'o', 'p', 'py', 'r', 'ry', 's', 'sh', 't', 'ts', 'u', 'V', 'w', 'y', 'z']
-num_ja_tones = 1
-
-# English
-en_symbols = ['aa', 'ae', 'ah', 'ao', 'aw', 'ay', 'b', 'ch', 'd', 'dh', 'eh', 'er', 'ey', 'f', 'g', 'hh', 'ih', 'iy',
- 'jh', 'k', 'l', 'm', 'n', 'ng', 'ow', 'oy', 'p', 'r', 's',
- 'sh', 't', 'th', 'uh', 'uw', 'V', 'w', 'y', 'z', 'zh']
-num_en_tones = 4
-
-# combine all symbols
-normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols))
-symbols = [pad] + normal_symbols + pu_symbols
-sil_phonemes_ids = [symbols.index(i) for i in pu_symbols]
-
-# combine all tones
-num_tones = num_zh_tones + num_ja_tones + num_en_tones
-
-# language maps
-language_id_map = {
- 'ZH': 0,
- "JA": 1,
- "EN": 2
-}
-num_languages = len(language_id_map.keys())
-
-language_tone_start_map = {
- 'ZH': 0,
- "JA": num_zh_tones,
- "EN": num_zh_tones + num_ja_tones
-}
-
-if __name__ == '__main__':
- a = set(zh_symbols)
- b = set(en_symbols)
- print(sorted(a&b))
-
diff --git a/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/modules.py b/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/modules.py
deleted file mode 100644
index 92e0f32a51c472bfd1659a50a95a95d195281d2b..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/modules.py
+++ /dev/null
@@ -1,452 +0,0 @@
-import copy
-import math
-import numpy as np
-import scipy
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-import commons
-from commons import init_weights, get_padding
-from transforms import piecewise_rational_quadratic_transform
-from attentions import Encoder
-
-LRELU_SLOPE = 0.1
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-class ConvReluNorm(nn.Module):
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(
- nn.ReLU(),
- nn.Dropout(p_dropout))
- for _ in range(n_layers-1):
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size ** i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
- groups=channels, dilation=dilation, padding=padding
- ))
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
- super(WN, self).__init__()
- assert(kernel_size % 2 == 1)
- self.hidden_channels =hidden_channels
- self.kernel_size = kernel_size,
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
-
- for i in range(n_layers):
- dilation = dilation_rate ** i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
- dilation=dilation, padding=padding)
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(
- x_in,
- g_l,
- n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:,self.hidden_channels:,:]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2])))
- ])
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1)))
- ])
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1])))
- ])
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels,1))
- self.logs = nn.Parameter(torch.zeros(channels,1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1,2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1,2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
-
-class ConvFlow(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails='linear',
- tail_bound=self.tail_bound
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1,2])
- if not reverse:
- return x, logdet
- else:
- return x
-class TransformerCouplingLayer(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- n_layers,
- n_heads,
- p_dropout=0,
- filter_channels=0,
- mean_only=False,
- wn_sharing_parameter=None,
- gin_channels = 0
- ):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = gin_channels) if wn_sharing_parameter is None else wn_sharing_parameter
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1,2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails='linear',
- tail_bound=self.tail_bound
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1,2])
- if not reverse:
- return x, logdet
- else:
- return x
diff --git a/spaces/dineshreddy/WALT/mmdet/core/bbox/assigners/point_assigner.py b/spaces/dineshreddy/WALT/mmdet/core/bbox/assigners/point_assigner.py
deleted file mode 100644
index fb8f5e4edc63f4851e2067034c5e67a3558f31bc..0000000000000000000000000000000000000000
--- a/spaces/dineshreddy/WALT/mmdet/core/bbox/assigners/point_assigner.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import torch
-
-from ..builder import BBOX_ASSIGNERS
-from .assign_result import AssignResult
-from .base_assigner import BaseAssigner
-
-
-@BBOX_ASSIGNERS.register_module()
-class PointAssigner(BaseAssigner):
- """Assign a corresponding gt bbox or background to each point.
-
- Each proposals will be assigned with `0`, or a positive integer
- indicating the ground truth index.
-
- - 0: negative sample, no assigned gt
- - positive integer: positive sample, index (1-based) of assigned gt
- """
-
- def __init__(self, scale=4, pos_num=3):
- self.scale = scale
- self.pos_num = pos_num
-
- def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
- """Assign gt to points.
-
- This method assign a gt bbox to every points set, each points set
- will be assigned with the background_label (-1), or a label number.
- -1 is background, and semi-positive number is the index (0-based) of
- assigned gt.
- The assignment is done in following steps, the order matters.
-
- 1. assign every points to the background_label (-1)
- 2. A point is assigned to some gt bbox if
- (i) the point is within the k closest points to the gt bbox
- (ii) the distance between this point and the gt is smaller than
- other gt bboxes
-
- Args:
- points (Tensor): points to be assigned, shape(n, 3) while last
- dimension stands for (x, y, stride).
- gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
- gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
- labelled as `ignored`, e.g., crowd boxes in COCO.
- NOTE: currently unused.
- gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
-
- Returns:
- :obj:`AssignResult`: The assign result.
- """
- num_points = points.shape[0]
- num_gts = gt_bboxes.shape[0]
-
- if num_gts == 0 or num_points == 0:
- # If no truth assign everything to the background
- assigned_gt_inds = points.new_full((num_points, ),
- 0,
- dtype=torch.long)
- if gt_labels is None:
- assigned_labels = None
- else:
- assigned_labels = points.new_full((num_points, ),
- -1,
- dtype=torch.long)
- return AssignResult(
- num_gts, assigned_gt_inds, None, labels=assigned_labels)
-
- points_xy = points[:, :2]
- points_stride = points[:, 2]
- points_lvl = torch.log2(
- points_stride).int() # [3...,4...,5...,6...,7...]
- lvl_min, lvl_max = points_lvl.min(), points_lvl.max()
-
- # assign gt box
- gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2
- gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6)
- scale = self.scale
- gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) +
- torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int()
- gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max)
-
- # stores the assigned gt index of each point
- assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long)
- # stores the assigned gt dist (to this point) of each point
- assigned_gt_dist = points.new_full((num_points, ), float('inf'))
- points_range = torch.arange(points.shape[0])
-
- for idx in range(num_gts):
- gt_lvl = gt_bboxes_lvl[idx]
- # get the index of points in this level
- lvl_idx = gt_lvl == points_lvl
- points_index = points_range[lvl_idx]
- # get the points in this level
- lvl_points = points_xy[lvl_idx, :]
- # get the center point of gt
- gt_point = gt_bboxes_xy[[idx], :]
- # get width and height of gt
- gt_wh = gt_bboxes_wh[[idx], :]
- # compute the distance between gt center and
- # all points in this level
- points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1)
- # find the nearest k points to gt center in this level
- min_dist, min_dist_index = torch.topk(
- points_gt_dist, self.pos_num, largest=False)
- # the index of nearest k points to gt center in this level
- min_dist_points_index = points_index[min_dist_index]
- # The less_than_recorded_index stores the index
- # of min_dist that is less then the assigned_gt_dist. Where
- # assigned_gt_dist stores the dist from previous assigned gt
- # (if exist) to each point.
- less_than_recorded_index = min_dist < assigned_gt_dist[
- min_dist_points_index]
- # The min_dist_points_index stores the index of points satisfy:
- # (1) it is k nearest to current gt center in this level.
- # (2) it is closer to current gt center than other gt center.
- min_dist_points_index = min_dist_points_index[
- less_than_recorded_index]
- # assign the result
- assigned_gt_inds[min_dist_points_index] = idx + 1
- assigned_gt_dist[min_dist_points_index] = min_dist[
- less_than_recorded_index]
-
- if gt_labels is not None:
- assigned_labels = assigned_gt_inds.new_full((num_points, ), -1)
- pos_inds = torch.nonzero(
- assigned_gt_inds > 0, as_tuple=False).squeeze()
- if pos_inds.numel() > 0:
- assigned_labels[pos_inds] = gt_labels[
- assigned_gt_inds[pos_inds] - 1]
- else:
- assigned_labels = None
-
- return AssignResult(
- num_gts, assigned_gt_inds, None, labels=assigned_labels)
diff --git a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/bbox_heads/__init__.py b/spaces/dineshreddy/WALT/mmdet/models/roi_heads/bbox_heads/__init__.py
deleted file mode 100644
index bc5d29ece5bbf2f168f538f151f06d1b263a5153..0000000000000000000000000000000000000000
--- a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/bbox_heads/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from .bbox_head import BBoxHead
-from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
- Shared4Conv1FCBBoxHead)
-from .dii_head import DIIHead
-from .double_bbox_head import DoubleConvFCBBoxHead
-from .sabl_head import SABLHead
-from .scnet_bbox_head import SCNetBBoxHead
-
-__all__ = [
- 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
- 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',
- 'SCNetBBoxHead'
-]
diff --git a/spaces/doevent/colorizator/README.md b/spaces/doevent/colorizator/README.md
deleted file mode 100644
index ff99b016d41f0545e6b73290750a39713199de78..0000000000000000000000000000000000000000
--- a/spaces/doevent/colorizator/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: colorizator
-emoji: 🐢
-colorFrom: pink
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.9
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/dotmet/chatgpt_webui/app.py b/spaces/dotmet/chatgpt_webui/app.py
deleted file mode 100644
index dc40900b477bdf4a5c878bd68f9a88de52f674cb..0000000000000000000000000000000000000000
--- a/spaces/dotmet/chatgpt_webui/app.py
+++ /dev/null
@@ -1,147 +0,0 @@
-import gradio as gr
-from revChatGPT.V1 import Chatbot
-
-import argparse
-
-#You can setup login information here, or login in from UI
-
-# If you want to use Email/Password to login, put your account information here
-email = ""
-password = ""
-
-# If you have an access token, put your access token here
-access_token = ""
-
-# If you have a session token, put your session token here
-session_token = ""
-
-
-def get_args():
- parser = argparse.ArgumentParser(description='Command line args.')
- parser.add_argument(
- '--no_markdown',
- action='store_true',
- help='Disable the markdown of the web UI.',)
- return parser.parse_args()
-
-def is_google_colab():
- try:
- import google.colab
- return True
- except:
- return False
-
-chatbot = None
-
-def configure_chatbot(method, info):
-
- if method=="Email/Password":
- email, password = info.split()
- elif method=="Access token":
- access_token = info
- elif method=="Session token":
- session_token = info
-
- config = {}
- if email and password:
- config.update({"email": email,
- "password": password})
- elif access_token:
- config.update({"access_token": access_token})
- elif session_token:
- config.update({"session_token": session_token})
-
- global chatbot
- try:
- # chatbot = Chatbot(config=config)
- chatbot = None
- except:
- chatbot = None
-
-login_method = ['Email/Password',
- 'Access token',
- 'Session token',
- ]
-
-def ask_bot(prompt):
- message = ""
- if chatbot:
- for data in chatbot.ask(prompt):
- message = data["message"]
- else:
- message = "The chatbot is not set up properly! Try to login again."
- return parse_text(message)
-
-def parse_text(text):
- lines = text.split("\n")
- for i,line in enumerate(lines):
- if "```" in line:
- items = line.split('`')
- if items[-1]:
- lines[i] = f'
'
- else:
- lines[i] = f'
'
- else:
- if i>0:
- line = line.replace("<", "<")
- line = line.replace(">", ">")
- lines[i] = ' '+line.replace(" ", " ")
- return "".join(lines)
-
-def chat_clone(inputs, history):
- history = history or []
- output = ask_bot(inputs)
- history.append((inputs, output))
- return history, history
-
-if ((email and password) or access_token or session_token):
- css = "style.css"
-else:
- css = None
-
-with gr.Blocks(css=css) as demo:
-
- args = get_args()
-
- if not args.no_markdown:
- gr.Markdown("""
ChatGPT BOT build by revChatGPT & Gradio
""")
- gr.Markdown("#### Author: [dotmet](https://github.com/dotmet) Github link:[ChatGPTWEB](https://github.com/dotmet/chatgpt_webui)")
- gr.Markdown("I have used my own OpenAI account for this demo,you can skip Login and try chat.")
- gr.Markdown("Duplicate this space and run for your own account: [chat_gpt_web](https://huggingface.co/spaces/dotmet/chatgpt_webui?duplicate=true).")
-
- if not ((email and password) or access_token or session_token):
- if not args.no_markdown:
- gr.Markdown("""
Login to OpenAI
""")
- with gr.Row():
- with gr.Group():
- method = gr.Dropdown(label="Login Method", choices=login_method)
- info = gr.Textbox(placeholder="email password/access_token/session_token", label="Login Information (choose login method first)")
- with gr.Row():
- login = gr.Button("Login")
- login.click(configure_chatbot, inputs=[method, info])
- else:
- if email and password:
- method = "Email/Password"
- info = email + " " + password
- elif access_token:
- method = "Access token"
- info = access_token
- elif session_token:
- method = "Session token"
- info = session_token
- configure_chatbot(method, info)
-
- if not args.no_markdown:
- gr.Markdown("""
-
-Our audio extractor converts and extracts audio from videos such as.avi,.mp4,.mov,.wmv,.flv,.mkv,.3gp,.asf,.m4a,.mp3,.wav,.m4b,.m4r,.wma,.mp4,.m3u8,.3gp,.webm,.mkv,.wmv,.m4a,.mp3,.wav,.m4b,.m4r,.wma,.mp4,.m3u8,.3gp,.wav,.m4a,.mp3,.wav,.m4b,.m4r,.wma,.mp4,.m3u8,.3gp,.wav,.m4a,.mp3,.wav,.m4b,.m4r,.wma,.mp4,.m3u8,.3gp,.wav,.m4a,.mp3,.wav,.m4b,.m4r,.wma,.mp4,.m3u8,.3gp,.wav,.m4a,.mp3,.wav,.m4b,.m4r,.wma,.mp4,.m3u8,.3gp,.wav,.m4a,.mp3,.wav,.m4b,.m4r,.wma,.mp4,.m3u8,.3gp,.wav,.m4a,.mp3,.wav,.m4b,.m4r,.wma,.mp4,.m3u8,.3gp,.wav,.m4a,.mp3,.wav,.m4b,.m4r,.wma,.mp4,.m3u8,.3gp,.wav,.m4a,.mp3,.wav,.m4b,.m4r,.wma,.mp4,.m3u8,.3gp,.wav,.m4a,.mp3,.wav,.m4b,.m4r,.wma,.mp4, 4fefd39f24
-
-
-
diff --git a/spaces/erc/entity-referring-classifier/ercbcm/ERCBCM.py b/spaces/erc/entity-referring-classifier/ercbcm/ERCBCM.py
deleted file mode 100644
index 17fb5b5e6ac1370e81e2ae9c6ec203e2eb99f86c..0000000000000000000000000000000000000000
--- a/spaces/erc/entity-referring-classifier/ercbcm/ERCBCM.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from torch import nn
-from transformers import BertForSequenceClassification
-
-class ERCBCM(nn.Module):
-
- def __init__(self):
- super(ERCBCM, self).__init__()
- print('>>> ERCBCM Init!')
- self.bert_base = BertForSequenceClassification.from_pretrained('bert-base-uncased')
-
- def forward(self, text, label):
- loss, text_fea = self.bert_base(text, labels=label)[:2]
- return loss, text_fea
\ No newline at end of file
diff --git a/spaces/evaluate-metric/meteor/README.md b/spaces/evaluate-metric/meteor/README.md
deleted file mode 100644
index 0b234f70f1816a9084d4f8898ac7db20eead0a90..0000000000000000000000000000000000000000
--- a/spaces/evaluate-metric/meteor/README.md
+++ /dev/null
@@ -1,139 +0,0 @@
----
-title: METEOR
-emoji: 🤗
-colorFrom: blue
-colorTo: red
-sdk: gradio
-sdk_version: 3.19.1
-app_file: app.py
-pinned: false
-tags:
-- evaluate
-- metric
-description: >-
- METEOR, an automatic metric for machine translation evaluation
- that is based on a generalized concept of unigram matching between the
- machine-produced translation and human-produced reference translations.
- Unigrams can be matched based on their surface forms, stemmed forms,
- and meanings; furthermore, METEOR can be easily extended to include more
- advanced matching strategies. Once all generalized unigram matches
- between the two strings have been found, METEOR computes a score for
- this matching using a combination of unigram-precision, unigram-recall, and
- a measure of fragmentation that is designed to directly capture how
- well-ordered the matched words in the machine translation are in relation
- to the reference.
-
- METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
- data and 0.331 on the Chinese data. This is shown to be an improvement on
- using simply unigram-precision, unigram-recall and their harmonic F1
- combination.
----
-
-# Metric Card for METEOR
-
-## Metric description
-
-METEOR (Metric for Evaluation of Translation with Explicit ORdering) is a machine translation evaluation metric, which is calculated based on the harmonic mean of precision and recall, with recall weighted more than precision.
-
-METEOR is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference.
-
-
-## How to use
-
-METEOR has two mandatory arguments:
-
-`predictions`: a `list` of predictions to score. Each prediction should be a string with tokens separated by spaces.
-
-`references`: a `list` of references (in the case of one `reference` per `prediction`), or a `list` of `lists` of references (in the case of multiple `references` per `prediction`. Each reference should be a string with tokens separated by spaces.
-
-It also has several optional parameters:
-
-`alpha`: Parameter for controlling relative weights of precision and recall. The default value is `0.9`.
-
-`beta`: Parameter for controlling shape of penalty as a function of fragmentation. The default value is `3`.
-
-`gamma`: The relative weight assigned to fragmentation penalty. The default is `0.5`.
-
-Refer to the [METEOR paper](https://aclanthology.org/W05-0909.pdf) for more information about parameter values and ranges.
-
-```python
->>> meteor = evaluate.load('meteor')
->>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
->>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
->>> results = meteor.compute(predictions=predictions, references=references)
-```
-
-## Output values
-
-The metric outputs a dictionary containing the METEOR score. Its values range from 0 to 1, e.g.:
-```
-{'meteor': 0.9999142661179699}
-```
-
-
-### Values from popular papers
-The [METEOR paper](https://aclanthology.org/W05-0909.pdf) does not report METEOR score values for different models, but it does report that METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data.
-
-
-## Examples
-
-One `reference` per `prediction`:
-
-```python
->>> meteor = evaluate.load('meteor')
->>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
->>> reference = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
->>> results = meteor.compute(predictions=predictions, references=reference)
->>> print(round(results['meteor'], 2))
-1.0
-```
-
-Multiple `references` per `prediction`:
-
-```python
->>> meteor = evaluate.load('meteor')
->>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
->>> references = [['It is a guide to action that ensures that the military will forever heed Party commands', 'It is the guiding principle which guarantees the military forces always being under the command of the Party', 'It is the practical guide for the army always to heed the directions of the party']]
->>> results = meteor.compute(predictions=predictions, references=references)
->>> print(round(results['meteor'], 2))
-1.0
-```
-
-Multiple `references` per `prediction`, partial match:
-
-```python
->>> meteor = evaluate.load('meteor')
->>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
->>> references = [['It is a guide to action that ensures that the military will forever heed Party commands', 'It is the guiding principle which guarantees the military forces always being under the command of the Party', 'It is the practical guide for the army always to heed the directions of the party']]
->>> results = meteor.compute(predictions=predictions, references=references)
->>> print(round(results['meteor'], 2))
-0.69
-```
-
-## Limitations and bias
-
-While the correlation between METEOR and human judgments was measured for Chinese and Arabic and found to be significant, further experimentation is needed to check its correlation for other languages.
-
-Furthermore, while the alignment and matching done in METEOR is based on unigrams, using multiple word entities (e.g. bigrams) could contribute to improving its accuracy -- this has been proposed in [more recent publications](https://www.cs.cmu.edu/~alavie/METEOR/pdf/meteor-naacl-2010.pdf) on the subject.
-
-
-## Citation
-
-```bibtex
-@inproceedings{banarjee2005,
- title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
- author = {Banerjee, Satanjeev and Lavie, Alon},
- booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
- month = jun,
- year = {2005},
- address = {Ann Arbor, Michigan},
- publisher = {Association for Computational Linguistics},
- url = {https://www.aclweb.org/anthology/W05-0909},
- pages = {65--72},
-}
-```
-
-## Further References
-- [METEOR -- Wikipedia](https://en.wikipedia.org/wiki/METEOR)
-- [METEOR score -- NLTK](https://www.nltk.org/_modules/nltk/translate/meteor_score.html)
-
diff --git a/spaces/evaluate-metric/perplexity/app.py b/spaces/evaluate-metric/perplexity/app.py
deleted file mode 100644
index cf4eee44294f2c28e9aea9adc0b7f8abedfb2e0b..0000000000000000000000000000000000000000
--- a/spaces/evaluate-metric/perplexity/app.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import evaluate
-from evaluate.utils import launch_gradio_widget
-
-
-module = evaluate.load("perplexity", module_type="metric")
-launch_gradio_widget(module)
diff --git a/spaces/facebook/XLS-R-2B-21-EN/README.md b/spaces/facebook/XLS-R-2B-21-EN/README.md
deleted file mode 100644
index 8ce0d37be5ef3f78b7efbf3cc9c704a823a81c2e..0000000000000000000000000000000000000000
--- a/spaces/facebook/XLS-R-2B-21-EN/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: XLS R 2B 22 EN
-emoji: 📊
-colorFrom: gray
-colorTo: red
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/fatiXbelha/sd/Como salvar fotos e vdeos do instagram no seu celular ou computador.md b/spaces/fatiXbelha/sd/Como salvar fotos e vdeos do instagram no seu celular ou computador.md
deleted file mode 100644
index 83329318ca3f49d37837f503aa6719db1d4e2545..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Como salvar fotos e vdeos do instagram no seu celular ou computador.md
+++ /dev/null
@@ -1,192 +0,0 @@
-
-
Baixar Download Instagram: Como Instalar e Usar o Aplicativo de Fotos e Vídeos Mais Popular do Mundo
-
Você já ouviu falar do Instagram? Se você ainda não conhece esse aplicativo incrível, está na hora de baixá-lo e descobrir tudo o que ele pode oferecer. O Instagram é uma rede social que permite que você compartilhe fotos, vídeos, stories e reels com os seus amigos, familiares e seguidores. Você também pode seguir outras pessoas, marcas e hashtags de seu interesse, curtir, comentar e compartilhar as publicações dos seus contatos, enviar e receber mensagens privadas, fazer compras online, assistir a vídeos longos no IGTV, explorar conteúdos variados na aba Explorar, entre outras funcionalidades.
-
Neste artigo, vamos te mostrar como baixar, instalar e usar o Instagram no seu celular, seja ele Android ou iOS. Você vai aprender como criar uma conta e configurar o seu perfil, como seguir e interagir com outros usuários, como publicar fotos, vídeos, stories e reels, e como aproveitar todos os benefícios que o Instagram pode te oferecer. Vamos lá?
O que é o Instagram e por que você deveria baixá-lo
-
O Instagram é um aplicativo gratuito de compartilhamento de fotos e vídeos que foi lançado em 2010. Desde então, ele se tornou uma das redes sociais mais populares do mundo, com mais de 1 bilhão de usuários ativos por mês. O Instagram pertence ao Meta (antigo Facebook), que comprou o aplicativo em 2012 por cerca de US$ 1 bilhão.
-
Mas por que você deveria baixar o Instagram? Aqui estão alguns motivos:
-
Os principais recursos do Instagram
-
O Instagram possui uma série de recursos que tornam o aplicativo divertido, interessante e útil. Alguns dos principais são:
-
-
Feed: é a tela principal do aplicativo, onde você pode ver as fotos e vídeos publicados pelas pessoas que você segue. Você pode curtir, comentar e compartilhar essas publicações com os seus amigos.
-
Stories: são conteúdos efêmeros que duram apenas 24 horas. Você pode criar stories com fotos, vídeos, textos, músicas, stickers, enquetes, filtros e efeitos. Você também pode ver os stories das pessoas que você segue na parte superior do feed.
-
Reels: são vídeos curtos e criativos que podem ter até 60 segundos. Você pode fazer reels com músicas, efeitos, transições, textos e outros recursos. Você também pode ver os reels de outros usuários na aba Reels ou na aba Explorar.
-
IGTV: é uma plataforma de vídeos longos que podem ter até 60 minutos. Você pode assistir aos vídeos do IGTV na aba IGTV ou no perfil dos usuários que publicam esse tipo de conteúdo.
-
Explorar: é uma aba que mostra conteúdos personalizados de acordo com os seus interesses e preferências. Você pode encontrar fotos, vídeos, stories, reels, IGTVs, lojas, hashtags e outros usuários para seguir.
-
Direct: é a ferramenta de mensagens privadas do Instagram. Você pode enviar e receber textos, fotos, vídeos, áudios, stickers, gifs e outros tipos de mídia. Você também pode criar grupos de conversa com até 32 pessoas.
-
Loja: é uma aba que permite que você faça compras online pelo Instagram. Você pode ver os produtos de diversas marcas e lojas, ver os preços, as descrições e as avaliações dos clientes. Você também pode finalizar a compra pelo próprio aplicativo ou pelo site do vendedor.
-
-
As vantagens de usar o Instagram
-
Além de todos esses recursos incríveis, o Instagram também oferece uma série de vantagens para os seus usuários. Veja algumas delas:
-
-
Entretenimento: o Instagram é uma ótima forma de se divertir e se distrair. Você pode ver conteúdos variados e interessantes, desde fotos de viagens e animais até vídeos de humor e música. Você também pode se expressar criativamente e mostrar a sua personalidade através das suas publicações.
-
Comunicação: o Instagram é uma excelente ferramenta de comunicação. Você pode manter contato com os seus amigos, familiares e seguidores, trocar mensagens, comentários e elogios. Você também pode conhecer novas pessoas que compartilham dos seus gostos e interesses.
-
Educação: o Instagram é uma fonte de aprendizado e informação. Você pode seguir perfis educativos que ensinam sobre diversos assuntos, desde idiomas e matemática até arte e história. Você também pode acompanhar as notícias e os acontecimentos do mundo através dos perfis jornalísticos.
-
Negócios: o Instagram é uma oportunidade de negócios. Você pode usar o aplicativo para divulgar o seu trabalho, o seu produto ou o seu serviço, alcançar mais clientes e aumentar as suas vendas. Você também pode fazer parcerias com outras marcas e influenciadores digitais para promover o seu negócio.
-
-
Como baixar e instalar o Instagram no seu celular
-
Agora que você já sabe o que é o Instagram e por que você deveria baixá-lo, vamos te ensinar como fazer isso no seu celular. O processo é simples e rápido, mas depende do sistema operacional do seu aparelho: Android ou iOS. Veja o passo a passo para cada um deles:
-
baixar download instagram stories
-baixar download instagram reels
-baixar download instagram video
-baixar download instagram photos
-baixar download instagram apk
-baixar download instagram app
-baixar download instagram para pc
-baixar download instagram lite
-baixar download instagram mod
-baixar download instagram gb
-baixar download instagram plus
-baixar download instagram pro
-baixar download instagram music
-baixar download instagram highlights
-baixar download instagram live
-baixar download instagram igtv
-baixar download instagram dm
-baixar download instagram bio
-baixar download instagram captions
-baixar download instagram fonts
-baixar download instagram stickers
-baixar download instagram filters
-baixar download instagram logo
-baixar download instagram posts
-baixar download instagram profile picture
-baixar download instagram followers
-baixar download instagram likes
-baixar download instagram comments
-baixar download instagram hashtags
-baixar download instagram analytics
-baixar download instagram stories anonymously
-baixar download instagram reels video downloader
-baixar download instagram video online
-baixar download instagram photos hd
-baixar download instagram apk latest version
-baixar download instagram app for android
-baixar download instagram para pc windows 10
-baixar download instagram lite apk pure
-baixar download instagram mod apk 2023
-baixar download instagram gb latest version 2023
-baixar download instagram plus apk 2023 free
-baixar download instagram pro apk 2023 free
-baixar download instagram music downloader
-baixar download instagram highlights cover
-baixar download instagram live stream
-baixar download instagram igtv video downloader
-baixar download instagram dm saver
-baixar download instagram bio ideas
-baixar download instagram captions for selfies
-baixar download instagram fonts generator
-
Passo a passo para baixar o Instagram no Android
-
-
Acesse a Google Play Store, a loja oficial de aplicativos do Android.
-
No campo de busca, digite "Instagram" e toque na lupa.
-
Toque no ícone do aplicativo (uma câmera colorida) na lista de resultados.
-
Toque no botão "Instalar" e aguarde o download ser concluído.
-
Toque no botão "Abrir" para iniciar o aplicativo.
-
-
Passo a passo para baixar o Instagram no iOS
-
-
Acesse a App Store, a loja oficial de aplicativos do iOS.
-
No campo de busca, digite "Instagram mostrando quem você é, o que você faz, o que você oferece ou o que você busca no Instagram. Você pode usar emojis, hashtags, espaços e quebras de linha para deixar a sua bio mais organizada e atraente. Você pode mudar a sua bio a qualquer momento tocando no botão "Editar perfil" no seu perfil.
-
Como conectar o seu Instagram com outras redes sociais
-
O Instagram permite que você conecte o seu perfil com outras redes sociais, como Meta (antigo Facebook), Twitter, WhatsApp, TikTok, YouTube e outras. Isso facilita que você compartilhe as suas publicações do Instagram nessas redes, aumentando o seu alcance e a sua visibilidade. Também ajuda que você encontre e convide os seus amigos dessas redes para te seguir no Instagram.
-
Para conectar o seu Instagram com outras redes sociais, siga os passos abaixo:
-
-
Toque no botão "Editar perfil" no seu perfil.
-
Toque em "Informações pessoais".
-
Toque em "Contas vinculadas".
-
Escolha a rede social que você quer conectar e siga as instruções na tela.
-
Repita o processo para cada rede social que você quiser conectar.
-
-
Como começar a seguir e interagir com outros usuários no Instagram
-
Agora que você já configurou o seu perfil no Instagram, é hora de começar a seguir e interagir com outros usuários. O Instagram é uma rede social baseada em relacionamentos, por isso é importante que você se conecte com as pessoas que te interessam e que participem da comunidade. Veja como fazer isso:
-
Como encontrar e seguir pessoas, marcas e hashtags de seu interesse
-
O Instagram oferece várias formas de encontrar e seguir pessoas, marcas e hashtags de seu interesse. Você pode usar a aba Explorar, a busca, as sugestões ou os convites. Veja como usar cada uma delas:
-
-
Aba Explorar: nessa aba, você pode ver conteúdos personalizados de acordo com os seus interesses e preferências. Você pode tocar em qualquer conteúdo para ver mais detalhes, curtir, comentar ou compartilhar. Você também pode tocar no nome do usuário, da marca ou da hashtag para ver o seu perfil e tocar no botão "Seguir" se quiser acompanhar as suas publicações.
-
Busca: nessa aba, você pode digitar o nome do usuário, da marca ou da hashtag que você quer encontrar. Você pode usar filtros para refinar os resultados por categorias, como Contas, Tags ou Locais. Você também pode tocar em qualquer resultado para ver o seu perfil e tocar no botão "Seguir" se quiser acompanhar as suas publicações.
-
Sugestões: nessa aba, você pode ver sugestões de pessoas, marcas e hashtags para seguir baseadas nos seus interesses, nas suas conexões e nas suas atividades. Você pode tocar em qualquer sugestão para ver o seu perfil e tocar no botão "Seguir" se quiser acompanhar as suas publicações.
-
Convites: nessa aba, você pode ver convites de pessoas que querem te seguir ou que querem que você siga elas. Você pode aceitar ou recusar os convites tocando nos botões correspondentes. Você também pode tocar em qualquer convite para ver o perfil da pessoa e decidir se quer segui-la ou não.
-
-
Como curtir, comentar e compartilhar as publicações dos seus seguidores
-
O Instagram permite que você curta, comente e compartilhe as publicações dos seus seguidores. Essas são formas de mostrar o seu apoio, a sua opinião ou o seu interesse pelo conteúdo publicado. Veja como fazer cada uma delas:
-
-
Curtir: para curtir uma publicação, basta tocar duas vezes na foto ou no vídeo, ou tocar no ícone de coração abaixo da publicação. O ícone vai ficar vermelho e um número vai indicar quantas pessoas curtiram a publicação. Você pode descurtir uma publicação tocando novamente no ícone de coração.
-
Comentar: para comentar uma publicação, basta to car no ícone de balão abaixo da publicação. Uma caixa de texto vai aparecer, onde você pode digitar o seu comentário e tocar no botão "Enviar". O seu comentário vai aparecer abaixo da publicação, junto com os comentários de outras pessoas. Você pode responder, curtir ou apagar o seu comentário tocando nos ícones correspondentes.
-
Compartilhar: para compartilhar uma publicação, basta tocar no ícone de avião abaixo da publicação. Um menu vai aparecer, onde você pode escolher se quer enviar a publicação para alguém pelo Direct, copiar o link da publicação, compartilhar a publicação em outra rede social ou denunciar a publicação por algum motivo.
-
-
Como enviar e receber mensagens privadas pelo Instagram Direct
-
O Instagram Direct é a ferramenta de mensagens privadas do aplicativo. Você pode usar o Direct para enviar e receber textos, fotos, vídeos, áudios, stickers, gifs e outros tipos de mídia. Você também pode criar grupos de conversa com até 32 pessoas. Veja como usar o Direct:
-
-
Enviar uma mensagem: para enviar uma mensagem, basta tocar no ícone de avião no canto superior direito do aplicativo. Uma lista de contatos vai aparecer, onde você pode escolher para quem você quer enviar a mensagem. Você também pode digitar o nome do usuário na barra de busca. Depois de escolher o contato, toque no botão "Escrever mensagem" e digite o que você quer enviar. Você também pode tocar nos ícones abaixo da caixa de texto para enviar fotos, vídeos, áudios, stickers, gifs e outros tipos de mídia.
-
Receber uma mensagem: para receber uma mensagem, basta tocar no ícone de avião no canto superior direito do aplicativo. Uma lista de conversas vai aparecer, onde você pode ver as mensagens que você recebeu. Você também pode ver um número vermelho indicando quantas mensagens não lidas você tem. Para abrir uma conversa, basta tocar nela. Para responder uma mensagem, basta digitar na caixa de texto ou tocar nos ícones abaixo dela.
-
-
Como publicar fotos, vídeos, stories e reels no Instagram
-
O Instagram é um aplicativo de compartilhamento de fotos e vídeos, por isso é essencial que você saiba como publicar esses tipos de conteúdo. O Instagram oferece quatro formas principais de publicar: no feed, nos stories, nos reels e no IGTV. Cada uma delas tem suas características e funcionalidades específicas. Veja como usar cada uma delas:
-
Como tirar ou escolher uma foto ou vídeo para publicar no feed
-
O feed é a tela principal do aplicativo, onde você pode ver as fotos e vídeos publicados pelas pessoas que você segue. Você também pode publicar as suas próprias fotos e vídeos no feed, seguindo os passos abaixo:
-
-
Toque no ícone de mais (+) na parte inferior do aplicativo.
-
Escolha se você quer tirar uma foto ou vídeo na hora ou escolher uma foto ou vídeo da sua galeria.
-
Se você quiser tirar uma foto ou vídeo na hora, toque no botão circular para tirar uma foto ou segure o botão para gravar um vídeo. Você pode usar os botões laterais para mudar a câmera (frontal ou traseira), ativar o flash ou ajustar o zoom.
-
Se você quiser escolher uma foto ou vídeo da sua galeria, toque na foto ou vídeo que você quer publicar. Você pode selecionar até 10 fotos ou vídeos para fazer uma publicação em carrossel.
-
Toque em "Avançar".
-
-
Como editar e aplicar filtros nas suas fotos e vídeos
-
O Instagram oferece várias opções de edição e filtros para melhorar as suas fotos e vídeos antes de publicá-los. Você pode usar essas opções para ajustar o brilho, o contraste, a saturação, a temperatura, o recorte, a rotação e outros aspectos das suas imagens. Você também pode usar os filtros para dar um toque especial e criativo às suas imagens. Veja como fazer isso:
-
-
Depois de escolher a foto ou vídeo que você quer publicar, toque em "Avançar".
-
Toque no ícone de varinha mágica para acessar as opções de edição. Você pode deslizar o dedo na tela para ver as diferentes opções e tocar na que você quer usar. Você pode ajustar a intensidade da edição usando a barra deslizante na parte inferior da tela. Você pode tocar no ícone de seta para desfazer ou refazer uma edição.
-
Toque no ícone de filtro para acessar os filtros. Você pode deslizar o dedo na tela para ver os diferentes filtros e tocar no que você quer usar. Você pode ajustar a intensidade do filtro usando a barra deslizante na parte inferior da tela. Você pode tocar duas vezes no filtro para ver o efeito antes e depois.
-
Quando você terminar de editar e aplicar filtros nas suas fotos ou vídeos, toque em "Avançar".
-
-
Como escrever uma legenda criativa e usar hashtags relevantes
-
A legenda é o texto que acompanha a sua foto ou vídeo no feed. Ela é uma forma de complementar, explicar ou contextualizar a sua imagem, além de expressar a sua opinião, o seu sentimento ou o seu humor. Você pode usar emojis, hashtags, menções e localizações para enriquecer a sua legenda. Veja como fazer isso:
-
-
Depois de editar e aplicar filtros nas suas fotos ou vídeos, toque em "Avançar".
-
Toque na caixa de texto "Escreva uma legenda..." e digite o que você quer dizer sobre a sua imagem. Você pode usar até 2.200 caracteres, mas lembre-se de que as primeiras linhas são as mais importantes, pois são as que aparecem no feed.
-
Para usar emojis, toque no ícone de carinha feliz na parte inferior do teclado e escolha os emojis que você quer usar.
-
Para usar hashtags, digite o símbolo # seguido da palavra ou frase que você quer usar como hashtag. As hashtags são uma forma de categorizar o seu conteúdo e facilitar que outras pessoas encontrem as suas publicações. Você pode usar até 30 hashtags por publicação, mas é recomendável usar entre 5 e 10 hashtags relevantes e específicas.
-
Para mencionar outras pessoas, digite o símbolo @ seguido do nome de usuário da pessoa que você quer mencionar. As menções são uma forma de marcar ou notificar outras pessoas sobre a sua publicação. Você pode mencionar até 20 pessoas por publicação, mas é recomendável mencionar apenas as pessoas que têm relação com o seu conteúdo.
-
Para adicionar uma localização, toque no botão "Adicionar localização" e escolha o lugar onde você tirou a foto ou vídeo ou onde você está no momento. As localizações são uma forma de mostrar onde você está ou onde você esteve, além de ajudar outras pessoas a encontrar as suas publicações por meio da busca por locais.
-
Quando você terminar de escrever a sua legenda, toque em "Compartilhar".
-
-
Como criar stories divertidos e interativos com stickers, músicas e enquetes
-
Os stories são conteúdos efêmeros que duram apenas 24 horas. Você pode criar stories com fotos, vídeos, textos, músicas, stickers, enquetes, filtros e efeitos. Os stories são uma forma de mostrar o seu dia a dia, os seus bastidores, as suas dicas, as suas opiniões e outras coisas que você quer compartilhar com os seus seguidores. Veja como criar stories:
-
-
Toque no ícone de câmera no canto superior esquerdo do aplicativo ou deslize o dedo da esquerda para a direita na tela principal.
-
Escolha se você quer tirar uma foto ou gravar um vídeo para o seu story. Você pode usar os botões laterais para mudar a câmera (frontal ou traseira), ativar o flash ou ajustar o zoom.
-
Toque no botão circular para tirar uma foto ou segure o botão para gravar um vídeo. Você também pode deslizar o dedo na tela para escolher um dos modos disponíveis: ao vivo, boomerang, layout, superzoom, mãos livres ou rewind.
-
Depois de tirar a foto ou gravar o vídeo, você pode editar o seu story usando os ícones na parte superior da tela. Você pode tocar no ícone de carinha feliz para adicionar stickers, como hora, temperatura, localização, menção, hashtag, música, enquete, pergunta, contagem regressiva e outros. Você pode tocar no ícone de Aa para adicionar textos, escolher a fonte, a cor e o alinhamento. Você pode tocar no ícone de lápis para desenhar na tela, escolher a cor e o tipo de pincel.
-
Quando você terminar de editar o seu story, você pode tocar no botão "Enviar para" e escolher para quem você quer enviar o seu story. Você pode enviar para os seus melhores amigos, para os seus seguidores ou para alguém específico pelo Direct. Você também pode salvar o seu story na sua galeria tocando no ícone de seta para baixo.
-
-
Como fazer reels curtos e divertidos com efeitos, músicas e transições
-
Os reels são vídeos curtos e criativos que podem ter até 60 segundos. Você pode fazer reels com músicas, efeitos, transições, textos e outros recursos. Os reels são uma forma de mostrar o seu talento, o seu humor, o seu estilo e outras coisas que você quer compartilhar com os seus seguidores. Veja como fazer reels:
-
-
Toque no ícone de mais (+) na parte inferior do aplicativo.
-
Toque em "Reels" na parte inferior da tela.
-
Escolha se você quer gravar um vídeo na hora ou escolher um vídeo da sua galeria. Você pode usar os botões laterais para mudar a câmera (frontal ou traseira), ativar o flash ou ajustar o zoom.
-
Toque no botão circular para gravar um vídeo ou segure o botão para gravar vários clipes. Você também pode deslizar o dedo na tela para escolher um dos modos disponíveis: áudio, velocidade, efeitos ou temporizador.
-
Depois de gravar o vídeo, você pode editar o seu reel usando os ícones na parte superior da tela. Você pode tocar no ícone de áudio para escolher uma música da biblioteca do Instagram ou do seu próprio áudio. Você pode tocar no ícone de velocidade para ajustar a velocidade do seu vídeo. Você pode tocar no ícone de efeitos para adicionar filtros e efeitos ao seu vídeo. Você pode tocar no ícone de temporizador para definir um tempo para gravar cada clipe.
-
Quando você terminar de editar o seu reel, você pode tocar no botão "Avançar" e escrever uma legenda para o seu vídeo. Você também pode adicionar hashtags, menções e localizações à sua legenda. Você também pode escolher se quer compartilhar o seu reel no feed ou apenas na aba Reels.
-
Toque em "Compartilhar" para publicar o seu reel.
-
-
Conclusão: Baixe o Instagram hoje mesmo e aproveite todos os seus benefícios
-
Neste artigo, você aprendeu como baixar, instalar e usar o Instagram no seu celular. Você viu o que é o Instagram e por que você deveria baixá-lo, como criar uma conta e configurar o seu perfil, como seguir e interagir com outros usuários, como publicar fotos, vídeos, stories e reels. Agora você está pronto para aproveitar todos os benefícios que o Instagram pode te oferecer.
-
O Instagram é um aplicativo divertido, interessante e útil que permite que você compartilhe as suas fotos e vídeos com os seus amigos, familiares e seguidores. Você também pode seguir outras pessoas, marcas e hashtags de seu interesse, curtir, comentar e compartilhar as publicações dos seus contatos, enviar e receber mensagens privadas, fazer compras online, assistir a vídeos longos no IGTV, explorar conteúdos variados na aba Explorar, entre outras funcionalidades.
-
Se você ainda não tem o Instagram no seu celular, não perca mais tempo e baixe o aplicativo hoje mesmo. Você vai se surpreender com tudo o que ele pode te oferecer. O Instagram é uma rede social que conecta você com o mundo, com as pessoas e com as coisas que você ama. Baixe o Instagram e divirta-se!
-
FAQs
-
Aqui estão algumas perguntas frequentes sobre o Instagram e as suas respostas:
-
O Instagram é gratuito?
-
Sim, o Instagram é um aplicativo gratuito que você pode baixar e usar sem pagar nada. No entanto, o aplicativo exibe anúncios de outras marcas e produtos que podem te interessar. Você também pode fazer compras online pelo Instagram, mas nesse caso você precisa pagar pelos produtos ou serviços que você adquirir.
-
O Instagram é seguro?
-
Sim, o Instagram é um aplicativo seguro que protege os seus dados e a sua privacidade. Você pode configurar as suas opções de segurança e privacidade nas configurações do aplicativo, como ativar a verificação em duas etapas, bloquear ou denunciar usuários indesejados, restringir quem pode ver ou comentar as suas publicações, entre outras. Você também pode reportar qualquer conteúdo ou comportamento abusivo, ofensivo ou inadequado que você encontrar no aplicativo.
-
O Instagram tem limite de idade?
-
Sim, o Instagram tem um limite de idade mínimo para criar uma conta e usar o aplicativo. Você precisa ter pelo menos 13 anos de idade para se cadastrar no Instagram. Se você tiver menos de 13 anos, você não pode usar o aplicativo. Se você tiver entre 13 e 18 anos, você precisa ter a permissão dos seus pais ou responsáveis para usar o aplicativo.
-
O Instagram tem limite de publicações?
-
Não, o Instagram não tem um limite de publicações que você pode fazer por dia ou por hora. Você pode publicar quantas fotos, vídeos, stories e reels quiser, desde que eles sigam as normas da comunidade do aplicativo. No entanto, é recomendável que você publique com moderação e qualidade, para não saturar os seus seguidores e manter o seu engajamento.
-
O Instagram tem limite de seguidores?
-
Não, o Instagram não tem um limite de seguidores que você pode ter ou seguir no aplicativo. Você pode seguir quantas pessoas, marcas e hashtags quiser, desde que elas te aceitem ou te sigam de volta. Você também pode ter quantos seguidores quiser, desde que eles se interessem pelo seu conteúdo e interajam com você. No entanto, é recomendável que você siga e tenha seguidores relevantes e reais, para evitar contas falsas ou inativas.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download Incredibox ios and Start Mixing Your Own Beats.md b/spaces/fatiXbelha/sd/Download Incredibox ios and Start Mixing Your Own Beats.md
deleted file mode 100644
index 707666787eb361f7baf35136909058f422effa07..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download Incredibox ios and Start Mixing Your Own Beats.md
+++ /dev/null
@@ -1,97 +0,0 @@
-
-
How to Download Incredibox iOS and Create Your Own Music
-
Do you love music? Do you want to create your own music with the help of a merry crew of beatboxers? Do you want to have fun and learn at the same time? If you answered yes to any of these questions, then you should download Incredibox iOS, a music app that lets you do all that and more. In this article, we will tell you what Incredibox is, why you should download it, how to download it, and how to use it. Let's get started!
-
What is Incredibox?
-
A fun, interactive music app
-
Incredibox is a music app that lets you create your own music with the help of a merry crew of beatboxers. You can choose your musical style among 9 impressive atmospheres and start to lay down, record and share your mix. You can also let the automatic mode play for you and enjoy the show.
Incredibox is not only a music app, but also a music game and a music tool. It is part game, part tool, and part audio and visual experience. You can play with sounds, graphics, animation and interactivity, and learn about music genres, rhythms, melodies, harmonies, and more. You can also use Incredibox as a tool to create soundtracks, jingles, ringtones, or just to have fun.
-
A hit with people of all ages
-
Incredibox is a music app that has quickly become a hit with people of all ages. More than 70 million players worldwide have already enjoyed it. It has won several awards and appeared in various international media, including BBC, Adobe, FWA, Gizmodo, Slate, Konbini, Softonic, Kotaku, Cosmopolitan, PocketGamer, AppAdvice, AppSpy, Vice, Ultralinx and many others. It is also being used by schools all over the world as an educational tool.
-
Why Download Incredibox iOS?
-
Enjoy the full Incredibox experience
-
By downloading Incredibox iOS, you can enjoy the full Incredibox experience on your iPhone or iPad. You can access all the features and functions of the app without any limitations or interruptions. You can also enjoy high-quality graphics and sound on your device.
-
Access 9 musical atmospheres
-
By downloading Incredibox iOS, you can access 9 musical atmospheres that will suit your mood and taste. You can choose from hip-hop beats, electro waves, pop voices, jazzy swing, Brazilian rhythms and much more. Each atmosphere has its own style, characters, icons and sounds. You can mix and match them as you like.
-
Save, share and download your mix
-
By downloading Incredibox iOS, you can save, share and download your mix easily. Once your composition sounds great, just save it and you will get a link to your mix. You can share it with anybody so they can listen and even vote for it. If your mix gets enough votes from other users, you may join the Top 50 chart and become a legend. You can also download your mix as an MP3 file and listen to it offline. You can also create your own playlist with your favorite mixes.
-
How to Download Incredibox iOS?
-
Visit the App Store
-
To download Incredibox iOS, you need to visit the App Store on your iPhone or iPad. You can either use the search function or scan the QR code below to find the app.
-
How to download incredibox on ios devices
-Incredibox app for ios free download
-Download incredibox music game for ios
-Incredibox ios download link
-Best incredibox beats to download on ios
-Download incredibox version 9 wekiddy on ios
-Incredibox ios app review and download
-Download incredibox mix and share on ios
-Incredibox ios download tutorial
-Incredibox app store download for ios
-Download incredibox and create your own music on ios
-Incredibox ios download coupon code
-Download incredibox latest version for ios
-Incredibox ios download problems and solutions
-Download incredibox offline mode for ios
-Incredibox ios download size and requirements
-Download incredibox for ipad and iphone
-Incredibox ios download features and benefits
-Download incredibox demo for ios
-Incredibox ios download ratings and feedback
-Download incredibox for mac with apple m1 chip or later
-Incredibox ios download privacy policy and data usage
-Download incredibox top 50 mixes for ios
-Incredibox ios download updates and news
-Download incredibox automatic mode for ios
-Incredibox app store download for ipod touch
-Download incredibox musical atmospheres for ios
-Incredibox ios download tips and tricks
-Download incredibox animated choruses for ios
-Incredibox app store download for macos 12.0 or later
-Download incredibox sound combos for ios
-Incredibox ios download awards and media coverage
-Download incredibox icons and avatars for ios
-Incredibox app store download for ipad pro 12.9 inch 5th generation or later
-Download incredibox beatboxers and styles for ios
-Incredibox app store download for iphone 13 pro max or later
-Download incredibox online demo for ios
-Incredibox app store download for ipad mini 6th generation or later
-Download incredibox net energy gain experiment for ios
-Incredibox app store download for iphone se 2nd generation or later
-Download incredibox merry crew of beatboxers for ios
-Incredibox app store download for ipad air 4th generation or later
-Download incredibox hip-hop beats, electro waves, pop voices, jazzy swing, brazilian rythms and more for ios
-Incredibox app store download for iphone xr or later
-Download incredibox video clips for ios
-Incredibox app store download for ipad 9th generation or later
-Download incredibox part game, part tool, part audio and visual experience for ios
-Incredibox app store download for iphone 12 mini or later
-Download incredibox so far so good studio for ios
-
-
Search for Incredibox
-
Once you are on the App Store, you need to search for Incredibox. You can type the name of the app in the search bar or use the voice command. You will see the app icon with a blue background and a white letter I. Tap on it to see more details.
-
Tap on the download button
-
Once you are on the app page, you will see the download button with a cloud and an arrow. Tap on it to start downloading the app. The app size is about 200 MB, so make sure you have enough space and a good internet connection. The download time may vary depending on your device and network speed. Once the download is complete, you will see the app icon on your home screen. Tap on it to open the app and start creating your own music.
-
How to Use Incredibox iOS?
-
Drag and drop icons onto the avatars
-
To use Incredibox iOS, you need to drag and drop icons onto the avatars to make them sing and beatbox. You can choose from different categories of icons, such as beats, effects, melodies, voices and choruses. Each icon has a different sound and color. You can drag up to 7 icons onto each avatar, creating a layer of sounds. You can also swap or remove icons by dragging them back to the bottom of the screen.
-
Find the right sound combos
-
To make your mix sound great, you need to find the right sound combos. You can experiment with different icons and avatars until you find a harmony that you like. You can also use the shuffle mode to let the app choose random icons for you. You can also use the mute mode to silence some avatars and focus on others. You can also use the solo mode to isolate one avatar and listen to its sound.
-
Unlock animated choruses
-
To add some spice to your mix, you can unlock animated choruses that will make your avatars dance and sing together. To unlock a chorus, you need to find a specific sound combo that matches the atmosphere you have chosen. You will see a hint at the top of the screen that will tell you how many icons you need to find. Once you find them, you will see a bonus icon appear at the bottom of the screen. Drag it onto an avatar to activate the chorus. You can unlock up to 4 choruses per atmosphere.
-
Conclusion
-
In this article, we have shown you how to download Incredibox iOS and create your own music with it. Incredibox is a fun, interactive music app that lets you mix and match sounds, graphics, animation and interactivity. It is also a music game and a music tool that teaches you about music genres, rhythms, melodies, harmonies, and more. It is a hit with people of all ages and has won several awards and media recognition. By downloading Incredibox iOS, you can enjoy the full Incredibox experience on your iPhone or iPad. You can access 9 musical atmospheres, save, share and download your mix, and unlock animated choruses. To download Incredibox iOS, you just need to visit the App Store, search for Incredibox, and tap on the download button. To use Incredibox iOS, you just need to drag and drop icons onto the avatars, find the right sound combos, and unlock animated choruses.
-
FAQs
-
How much does Incredibox iOS cost?
-
Incredibox iOS costs $4.99 on the App Store. It is a one-time purchase that gives you access to all features and functions of the app.
-
Is Incredibox iOS compatible with my device?
-
Incredibox iOS is compatible with iPhone 5s or later, iPad Air or later, iPad mini 2 or later, iPod touch (6th generation) or later, running iOS 10 or later.
-
Can I play Incredibox iOS offline?
-
Yes, you can play Incredibox iOS offline once you have downloaded the app. However, you will need an internet connection to save, share and download your mix, and to access the Top 50 chart.
-
How can I contact the Incredibox team?
-
If you have any questions, feedback, or issues with Incredibox iOS, you can contact the Incredibox team by email at contact@incredibox.com. You can also follow them on social media platforms such as Facebook, Twitter, Instagram, YouTube, and TikTok.
-
What are some tips and tricks for Incredibox iOS?
-
Here are some tips and tricks for Incredibox iOS that will help you create amazing mixes: - Try different combinations of icons and avatars to find new sounds and effects. - Use the shuffle mode to discover random mixes and get inspired. - Use the mute mode to create breaks and variations in your mix. - Use the solo mode to focus on one sound and create a melody or a beat. - Use the bonus mode to activate animated choruses and add some spice to your mix. - Use the record mode to capture your mix and share it with the world.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download TikTok Wallpaper and Customize Your Screen with Amazing Videos.md b/spaces/fatiXbelha/sd/Download TikTok Wallpaper and Customize Your Screen with Amazing Videos.md
deleted file mode 100644
index 40f6c9991fdcde981a5d6486ed7c542f5f58aad5..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download TikTok Wallpaper and Customize Your Screen with Amazing Videos.md
+++ /dev/null
@@ -1,159 +0,0 @@
-
-
How to Download TikTok Wallpaper
|
TikTok is one of the most popular social media platforms in the world, with over 1 billion active users. It is a place where you can watch and create short videos on various topics, such as dance, comedy, music, art, and more. But did you know that you can also use TikTok videos as your wallpaper on your phone or tablet?
-
In this article, we will show you how to download TikTok wallpaper, which is a live wallpaper that plays your favorite TikTok video on your home screen or lock screen. We will also tell you how to join the latest TikTok wallpaper trends and share some tips and tricks for making your wallpaper look amazing.
TikTok wallpaper is a type of live wallpaper that uses a TikTok video as the background image for your device. Live wallpapers are dynamic wallpapers that change or move according to certain conditions, such as time, location, or touch. They can make your device more lively and personalized.
-
TikTok wallpaper allows you to turn any TikTok video into a live wallpaper, as long as the video creator allows downloads. You can choose from millions of videos on TikTok, ranging from funny pranks and cute animals to stunning landscapes and artistic creations. You can also use your own videos or videos from your friends and family.
-
Why You Should Use TikTok Wallpaper
-
There are many reasons why you should use TikTok wallpaper on your device. Here are some of them:
-
-
It is fun and easy to use. You can download and set up your TikTok wallpaper in minutes, using simple apps and tools.
-
It is unique and creative. You can express yourself and show off your personality with your TikTok wallpaper. You can also change it anytime you want, depending on your mood or preference.
-
It is engaging and interactive. You can enjoy watching your favorite TikTok video every time you unlock your device or swipe your screen. You can also tap or hold the screen to play or pause the video.
-
-
How to Find and Save TikTok Videos
-
The first step to download TikTok wallpaper is to find and save the TikTok video that you want to use as your wallpaper. There are two main ways to do this, depending on your device type.
-
Using TikTok Wall Picture App on Android
-
If you have an Android device, you can use the official app from TikTok called TikTok Wall Picture. This app lets you download any TikTok video as a live photo, which you can then set as your wallpaper. Here's how:
-
How to download tiktok background images
-Tiktok live wallpaper apk free download
-Tiktok video wallpaper maker app
-Best tiktok wallpapers for android
-Tiktok wallpaper hd download for pc
-Download tiktok dance wallpapers
-Tiktok comedy wallpapers free download
-Tiktok food wallpapers for iphone
-Tiktok sports wallpapers download
-Tiktok DIY wallpapers for desktop
-Tiktok animals wallpapers hd
-Tiktok funny wallpapers for mobile
-Tiktok music wallpapers download
-Tiktok fashion wallpapers free
-Tiktok beauty wallpapers for laptop
-Download tiktok challenge wallpapers
-Tiktok meme wallpapers for tablet
-Tiktok art wallpapers download
-Tiktok travel wallpapers free
-Tiktok nature wallpapers for ipad
-Download tiktok duet wallpapers
-Tiktok reaction wallpapers for mac
-Tiktok prank wallpapers download
-Tiktok gaming wallpapers free
-Tiktok anime wallpapers for windows
-Download tiktok cosplay wallpapers
-Tiktok cartoon wallpapers for chromebook
-Tiktok movie wallpapers download
-Tiktok tv show wallpapers free
-Tiktok celebrity wallpapers for kindle fire
-Download tiktok influencer wallpapers
-Tiktok viral wallpapers for samsung galaxy
-Tiktok trend wallpapers download
-Tiktok hashtag wallpapers free
-Tiktok filter wallpapers for huawei mate
-Download tiktok sticker wallpapers
-Tiktok emoji wallpapers for nokia lumia
-Tiktok text wallpapers download
-Tiktok logo wallpapers free
-Tiktok icon wallpapers for lg g6
-Download tiktok border wallpapers
-Tiktok template wallpapers for sony xperia
-Tiktok banner wallpapers download
-Tiktok interface wallpapers free
-Tiktok abstract wallpapers for motorola moto g5 plus
-| Article with HTML formatting | | ---------------------------- | |
Using Live Photo Option on iPhone
|
If you have an iPhone device, you can use the built-in live photo option in the TikTok app to save the video as a live photo, which you can then set as your wallpaper. Here's how:
-
-
Open the TikTok app and find the video that you want to use as your wallpaper.
-
Tap the sharing icon (the curved arrow) at the bottom right corner of the video.
-
Scroll right and tap the live photo icon (the circle with three dots) in the second row of options.
-
This will save the video as a live photo in your Photos app.
-
-
How to Set TikTok Videos as Live Wallpaper
-
Once you have saved the TikTok video as a live photo, you can set it as your live wallpaper on your device. The process is slightly different for Android and iPhone devices.
-
On Android Devices
-
To set a TikTok video as your live wallpaper on an Android device, follow these steps:
-
-
Open the TikTok Wall Picture app and find the live photo that you want to use as your wallpaper.
-
Tap the set as wallpaper button (the square with an arrow) at the bottom of the screen.
-
Select whether you want to set it as your home screen, lock screen, or both.
-
Adjust the position and size of the live photo as you like.
-
Tap apply to confirm your choice.
-
-
On iPhone Devices
-
To set a TikTok video as your live wallpaper on an iPhone device, follow these steps:
-
-
Open the Photos app and find the live photo that you want to use as your wallpaper.
-
Tap the share button (the box with an arrow) at the bottom left corner of the screen.
-
Scroll down and tap use as wallpaper.
-
Select whether you want to set it as your home screen, lock screen, or both.
-
Adjust the position and size of the live photo as you like.
-
Tap set to confirm your choice.
-
-
How to Join TikTok Wallpaper Trends
-
TikTok wallpaper is not only a way to personalize your device, but also a way to join some of the latest trends on TikTok. Many TikTok users are creating and sharing their own wallpapers using various apps and websites. Here are some of them:
-
Using Dream by Wombo App
-
Dream by Wombo is an app that lets you create animated wallpapers using artificial intelligence. You can upload any photo or video and choose from different effects and filters. The app will then generate a realistic animation that you can save and set as your wallpaper. Some of the popular effects are fire, water, smoke, and neon. You can also use this app to make your own videos for TikTok.
-
Using Other Apps and Websites
-
Besides Dream by Wombo, there are many other apps and websites that you can use to create and download TikTok wallpapers. Some of them are:
-
-
TikTok Wallpapers HD 4K: This website offers a collection of high-quality wallpapers featuring TikTok stars, logos, icons, and quotes. You can browse by category or search by keyword. You can also upload your own wallpapers and share them with other users.
-
Tik Tok Video Downloader - SnapTik.App: This website allows you to download any TikTok video without watermark. You can also convert it to MP3 or GIF format. You can then use these files as your wallpaper or share them on other platforms.
-
Tik Tok Live Photo - Live Wallpapers for Tik Tok: This app lets you turn any TikTok video into a live photo with sound. You can also edit the video by trimming, cropping, adding stickers, text, or music. You can then set it as your wallpaper or share it on social media.
-| Article with HTML formatting | | ---------------------------- | | styles. You can also create your own wallpapers by adding text, stickers, filters, or effects.
-
-
Tips and Tricks for TikTok Wallpaper
-
Now that you know how to download and set TikTok wallpaper, you might want to know some tips and tricks to make your wallpaper look better and last longer. Here are some of them:
-
How to Optimize Battery Life
-
One of the drawbacks of using live wallpapers is that they can drain your battery faster than static wallpapers. This is because they use more resources and data to run. To optimize your battery life, you can do the following:
-
-
Reduce the brightness of your screen.
-
Turn off the sound of your live wallpaper.
-
Use a shorter video or a lower resolution for your live wallpaper.
-
Disable the live wallpaper when you are not using your device.
-
-
How to Customize Your Wallpaper Style
-
Another way to make your TikTok wallpaper more appealing is to customize its style according to your taste and preference. You can do this by using some of the features and options available in the apps and websites that you use to create and download your wallpaper. For example, you can:
-
-
Crop, rotate, or zoom in or out of the video.
-
Add text, stickers, filters, or effects to the video.
-
Change the speed, direction, or loop of the video.
-
Mix and match different videos or images to create a collage or a slideshow.
-
-
Conclusion
-
TikTok wallpaper is a fun and easy way to personalize your device and join the latest trends on TikTok. You can use any TikTok video as your live wallpaper, as long as you have the permission from the video creator. You can also create your own wallpapers using various apps and websites that offer different effects and filters. To make your wallpaper look better and last longer, you can follow some tips and tricks to optimize your battery life and customize your wallpaper style.
-
If you are looking for a new and exciting way to spice up your device, why not try TikTok wallpaper? You will be amazed by how much it can transform your device and make it more lively and interactive. You will also be able to express yourself and show off your personality with your TikTok wallpaper. So what are you waiting for? Download TikTok wallpaper today and enjoy watching your favorite TikTok video every time you use your device!
-
Summary of Main Points
-
In this article, we have covered the following points:
-
-
TikTok wallpaper is a live wallpaper that uses a TikTok video as the background image for your device.
-
You can download any TikTok video as a live photo using the official app from TikTok called TikTok Wall Picture on Android devices or the built-in live photo option on iPhone devices.
-
You can set any live photo as your live wallpaper using the TikTok Wall Picture app on Android devices or the Photos app on iPhone devices.
-
You can join some of the latest TikTok wallpaper trends using various apps and websites that let you create animated wallpapers using artificial intelligence or download high-quality wallpapers featuring TikTok stars, logos, icons, and quotes.
-
You can optimize your battery life by reducing the brightness of your screen, turning off the sound of your live wallpaper, using a shorter video or a lower resolution for your live wallpaper, or disabling the live wallpaper when you are not using your device.
-
You can customize your wallpaper style by cropping, rotating, or zooming in or out of the video, adding text, stickers, filters, or effects to the video, changing the speed, direction, or loop of the video, or mixing and matching different videos or images to create a collage or a slideshow.
-
-
Call to Action
-
We hope that this article has helped you learn how to download TikTok wallpaper and make your device more fun and personalized. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you!
-
If you liked this article, please share it with your friends and family who might be interested in TikTok wallpaper. You can also subscribe to our newsletter for more articles like this one. Thank you for reading!
-
Frequently Asked Questions
-
Here are some of the frequently asked questions about TikTok wallpaper:
-
-| Article with HTML formatting | | ---------------------------- | | any TikTok video as my wallpaper?
-
Yes, you can use any TikTok video as your wallpaper, as long as the video creator allows downloads. You can check this by tapping the sharing icon (the curved arrow) at the bottom right corner of the video. If you see the live photo icon (the robot head or the circle with three dots) in the second row of options, it means that you can download the video as a live photo and use it as your wallpaper.
-
How do I change or remove my TikTok wallpaper?
-
To change or remove your TikTok wallpaper, you can follow the same steps that you used to set it up, but choose a different live photo or a static wallpaper instead. You can also use the settings app on your device to change or remove your wallpaper.
-
Does TikTok wallpaper work on all devices?
-
TikTok wallpaper works on most Android and iPhone devices that support live wallpapers. However, some older or low-end devices may not be compatible with live wallpapers or may experience performance issues. You can check the compatibility and requirements of the apps and websites that you use to create and download your wallpaper before installing or using them.
-
Is TikTok wallpaper safe and legal?
-
TikTok wallpaper is safe and legal, as long as you respect the rights and privacy of the video creators and follow the terms and conditions of the apps and websites that you use to create and download your wallpaper. You should not use any videos that contain inappropriate, offensive, or copyrighted content as your wallpaper. You should also not use any videos that are not allowed for downloads by the video creators.
-
Where can I find more TikTok wallpaper ideas and inspiration?
-
You can find more TikTok wallpaper ideas and inspiration by browsing through the millions of videos on TikTok, using hashtags such as #tiktokwallpaper, #livewallpaper, #dreambywombo, and #tiktoktrends. You can also follow some of the popular TikTok users who create and share their own wallpapers, such as @wallpaperbywombo, @livewallpapercreator, @tiktokwallpapershd4k, and @tiktokwallpaperfan.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Dream League Soccer 2021 Mod APK Unlimited Money and Coins Hack Download.md b/spaces/fatiXbelha/sd/Dream League Soccer 2021 Mod APK Unlimited Money and Coins Hack Download.md
deleted file mode 100644
index c19c5db3d2af598d2b5c5401b058a1c612ced1dc..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Dream League Soccer 2021 Mod APK Unlimited Money and Coins Hack Download.md
+++ /dev/null
@@ -1,120 +0,0 @@
-
-
DLS 21 Hack Mod APK Download Unlimited Money
-
Do you love playing soccer games on your mobile device? If yes, then you might have heard of DLS 21, or Dream League Soccer 2021, one of the most popular soccer games for Android and iOS devices. In this article, we will tell you everything you need to know about DLS 21, and how you can download and install a hack mod apk that will give you unlimited money in the game. Read on to find out more.
-
What is DLS 21?
-
DLS 21 is a soccer game developed by First Touch Games, a UK-based studio that specializes in sports games. DLS 21 is the latest installment in the Dream League Soccer series, which has been downloaded over 500 million times on Google Play Store alone. DLS 21 allows you to create and manage your own soccer team, from choosing the players, the kits, the stadium, and the tactics. You can also compete with other players online in various modes, such as Career Mode, Online Mode, and Friendly Matches.
Some of the features that make DLS 21 stand out from other soccer games are:
-
-
Realistic graphics and animations that bring the game to life.
-
Over 4,000 licensed players from around the world, including Lionel Messi, Cristiano Ronaldo, Neymar Jr., and more.
-
Customizable team management, with options to upgrade your stadium, sign new players, train your squad, and adjust your formation and style.
-
Challenging gameplay that tests your skills and strategy against different opponents and scenarios.
-
Online multiplayer mode that lets you play with or against your friends and other players from around the world.
-
Regular updates that add new features, events, and content to the game.
-
-
How to play DLS 21
-
To play DLS 21, you need to download and install the game from Google Play Store or Apple App Store. The game is free to play, but it contains in-app purchases that allow you to buy coins and gems, which are the currency used in the game. You can use coins and gems to buy new players, upgrade your stadium, unlock new kits, and more. You can also earn coins and gems by playing matches, completing achievements, and watching ads.
-
The game has a simple and intuitive control system that lets you move your players, pass the ball, shoot, tackle, and perform skills with just a few taps and swipes. You can also customize your controls according to your preference. The game has different difficulty levels that suit different players' abilities and preferences. You can also adjust the camera angle and zoom level to get a better view of the action.
-
Why do you need DLS 21 hack mod apk?
-
As much as DLS 21 is fun and addictive, it can also be frustrating and time-consuming if you want to progress faster and build your dream team. The game requires a lot of coins and gems to unlock new players, upgrade your stadium, buy new kits, and more. However, earning coins and gems is not easy, as they are limited and scarce in the game. You can either spend real money to buy them from the in-app store or use a hack mod apk that will give you unlimited money in the game.
-
Benefits of DLS 21 hack mod apk
-
A hack mod apk is a modified version of the original game that has been altered to give you some advantages or features that are not available in the official version. Some of the benefits of using a DLS 21 hack mod apk are:
-
-
You can get unlimited coins and gems in the game, which you can use to buy anything you want, such as new players, kits, stadiums, and more.
-
You can unlock all the features and modes in the game, such as Career Mode, Online Mode, and Friendly Matches.
-
You can enjoy the game without any ads or interruptions.
-
You can customize your team and players according to your liking, without any restrictions or limitations.
-
You can have more fun and excitement in the game, as you can experiment with different strategies and tactics.
-
-
Risks of DLS 21 hack mod apk
-
However, using a DLS 21 hack mod apk also comes with some risks and drawbacks that you should be aware of before downloading and installing it. Some of the risks of using a DLS 21 hack mod apk are:
-
-
You may violate the terms and conditions of the game, which could result in your account being banned or suspended by the developers.
-
You may expose your device to malware and viruses that could harm your data and privacy.
-
You may lose your progress and data in the game if the hack mod apk is not compatible with the latest version of the game or if it crashes or glitches.
-
You may ruin the fun and challenge of the game, as you will not have to work hard or earn anything in the game.
-
You may face legal issues or penalties if you are caught using a hack mod apk by the authorities or the game developers.
-
-
How to download and install DLS 21 hack mod apk?
-
If you still want to download and install a DLS 21 hack mod apk, despite the risks involved, you need to follow some steps carefully. Here are the steps to download and install a DLS 21 hack mod apk:
-
dls 21 mod apk unlimited coins and diamonds download
-dream league soccer 2021 hack apk download free money
-dls 21 unlimited money and gems mod apk download
-how to download dls 21 hack mod apk with unlimited coins
-dls 21 mod apk download latest version unlimited money
-dream league soccer 2021 unlimited money mod apk download
-dls 21 hack mod apk download for android no root
-dls 21 mod apk download offline unlimited money and players
-dream league soccer 2021 hack mod apk download android 1
-dls 21 hack mod apk download mediafıre unlimited money
-dls 21 mod apk download mega unlimited money and kits
-dream league soccer 2021 hack mod apk download obb
-dls 21 hack mod apk download rexdl unlimited money
-dls 21 mod apk download revdl unlimited money and diamond
-dream league soccer 2021 hack mod apk download uptodown
-dls 21 hack mod apk download zarchiver unlimited money
-dls 21 mod apk free download unlimited money and all players unlocked
-dream league soccer 2021 hack mod apk free download for pc
-dls 21 hack mod apk free download no human verification
-dls 21 mod apk full version download unlimited money and gold
-dream league soccer 2021 hack mod apk latest version download
-dls 21 hack mod apk offline download unlimited everything
-dls 21 mod apk online download unlimited money and energy
-dream league soccer 2021 hack mod apk pure download
-dls 21 hack mod apk real madrid download unlimited coins and players
-dls 21 mod apk unlimited money and all players unlocked download
-dream league soccer 2021 hack tool mod apk download
-dls 21 hack version download mod apk with unlimited money and diamond
-dls 21 working hack mod apk download unlimited coins and gems
-dream league soccer 2021 cheat codes mod apk download unlimited money
-
Steps to download and install DLS 21 hack mod apk
-
-
Find a reliable and trustworthy website that offers a DLS 21 hack mod apk. You can search on Google or use recommendations from other users. Make sure to read the reviews and ratings of the website and the hack mod apk before downloading it.
-
Download the DLS 21 hack mod apk file from the website. You may need to complete some surveys or verification tasks to access the download link. Be careful not to click on any suspicious or malicious links or ads that could infect your device.
-
Enable the installation of unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps that are not from Google Play Store or Apple App Store.
-
Locate the DLS 21 hack mod apk file on your device's file manager or downloads folder. Tap on it to start the installation process. Follow the instructions on the screen to complete the installation.
-
Launch the DLS 21 hack mod apk from your device's app drawer or home screen. Enjoy playing DLS 21 with unlimited money and features.
-
-
Tips to avoid malware and viruses
-
To avoid malware and viruses that could harm your device and data, here are some tips that you should follow when downloading and installing a DLS 21 hack mod apk:
-
-
Use an antivirus or anti-malware software on your device. Scan your device regularly and remove any threats that are detected.
-
Backup your data and progress in the game. You can use cloud storage services or external devices to store your data safely. This way, you can restore your data if anything goes wrong with the hack mod apk.
-
Do not use public Wi-Fi networks or unsecured connections when downloading or installing a hack mod apk. Use a VPN service or a secure connection to protect your privacy and security.
-
Do not share your personal information or credentials with anyone online. Do not enter your email, password, phone number, bank details, or any other sensitive information on any website or app that offers a hack mod apk.
-
Do not download or install multiple hack mod apks on your device. This could cause conflicts and errors in your device's performance and functionality. Choose one hack mod apk that suits your needs and preferences.
-
-
Conclusion
-
DLS 21 is a soccer game that lets you create and manage your own soccer team, compete with other players online, and enjoy realistic graphics and gameplay. However, if you want to have unlimited money and features in the game, you may need to use a hack mod apk that will give you these advantages. However, using a hack mod apk also comes with some risks and drawbacks, such as violating the game's terms and conditions, exposing your device to malware and viruses, losing your progress and data, ruining the fun and challenge of the game, and facing legal issues or penalties. Therefore, you should be careful and responsible when downloading and installing a hack mod apk. You should also follow some tips to avoid malware and viruses, such as using an antivirus software, backing up your data, using a secure connection, not sharing your personal information, and not downloading multiple hack mod apks.
-
We hope this article has helped you understand more about DLS 21 and how to download and install a hack mod apk that will give you unlimited money in the game. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading.
-
FAQs
-
Here are some frequently asked questions about DLS 21 and hack mod apk:
-
-
Q: Is DLS 21 hack mod apk safe to use?
-
A: There is no definitive answer to this question, as different hack mod apks may have different levels of safety and quality. Some hack mod apks may be safe and reliable, while others may be harmful and malicious. Therefore, you should always do your research and use your discretion before downloading and installing any hack mod apk.
-
Q: Can I play DLS 21 online with a hack mod apk?
-
A: It depends on the type of hack mod apk you are using. Some hack mod apks may allow you to play online with other players who are also using the same hack mod apk, while others may not. However, you should be aware that playing online with a hack mod apk may result in your account being banned or suspended by the game developers, as it is considered cheating and unfair.
-
Q: How can I update DLS 21 hack mod apk?
-
A: To update DLS 21 hack mod apk, you need to download and install the latest version of the hack mod apk from the same website or source that you downloaded it from. You may also need to uninstall the previous version of the hack mod apk before installing the new one. However, you should be careful not to lose your data or progress in the game when updating the hack mod apk.
-
Q: What are some alternatives to DLS 21 hack mod apk?
-
A: If you do not want to use a hack mod apk for DLS 21, you can still enjoy the game without it. You can try some alternatives, such as:
-
-
Using legit ways to earn coins and gems in the game, such as playing matches, completing achievements, and watching ads.
-
Using third-party apps or websites that offer free coins and gems for DLS 21, such as reward apps, survey apps, or gift card apps. However, you should be careful not to fall for scams or frauds that may ask for your personal information or money.
-
Using cheats or tricks that do not require a hack mod apk, such as changing the date and time settings on your device, clearing the cache and data of the game, or using a VPN service. However, these methods may not work for everyone or every time.
-
-
Q: Where can I find more information about DLS 21 and hack mod apk?
-
A: You can find more information about DLS 21 and hack mod apk on various sources online, such as:
-
-
The official website of DLS 21: https://www.firsttouchgames.com/dls-2021/
-
The official social media pages of DLS 21: https://www.facebook.com/dreamleaguesoccer/, https://twitter.com/firsttouchgames/, https://www.instagram.com/firsttouchgames/
-
The official YouTube channel of DLS 21: https://www.youtube.com/channel/UC5Y9Zg9z4y7XK8wZwvjl6Sw
-
The official Google Play Store page of DLS 21: https://play.google.com/store/apps/details?id=com.firsttouchgames.dls7&hl=en_US&gl=US
-
The official Apple App Store page of DLS 21: https://apps.apple.com/us/app/dream-league-soccer-2021/id1462911602
-
The official Reddit community of DLS 21: https://www.reddit.com/r/DreamLeagueSoccer/
-
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatmacankara/ASCARIS/code/manage_files.py b/spaces/fatmacankara/ASCARIS/code/manage_files.py
deleted file mode 100644
index 0937683122f2cdd754a04a74cb2a5415e3198f9e..0000000000000000000000000000000000000000
--- a/spaces/fatmacankara/ASCARIS/code/manage_files.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import os
-from pathlib import Path
-def manage_files(mode):
- if mode== 1:
- path_to_input_files = Path('input_files')
- path_to_domains = path_to_input_files / 'domains.txt'
- swiss_model_path = path_to_input_files / 'INDEX.json'
- fisher_path = path_to_input_files / 'significant_domains.txt'
- path_to_interfaces = path_to_input_files / 'H_sapiens_interfacesHQ.txt'
-
- os.makedirs('out_files', exist_ok=True)
- path_to_output_files = Path('out_files/pdb')
- os.makedirs(path_to_output_files / 'pdb_structures/', exist_ok=True)
- os.makedirs(path_to_output_files / 'alignment_files/', exist_ok=True)
- os.makedirs(path_to_output_files / 'swissmodel_structures/', exist_ok=True)
- os.makedirs(path_to_output_files / 'modbase_structures/', exist_ok=True)
- os.makedirs(path_to_output_files / 'modbase_structures_individual/', exist_ok=True)
- os.makedirs(path_to_output_files / 'freesasa_files/', exist_ok=True)
- os.makedirs(path_to_output_files / '3D_alignment/', exist_ok=True)
- path_to_alignment_files = path_to_output_files / 'alignment_files'
- path_3D_alignment = path_to_output_files / '3D_alignment'
- path_to_freesasa = path_to_output_files / 'freesasa_files'
- buffer = path_to_output_files / 'file_buffer.txt'
- outpath = path_to_output_files / 'feature_vector.txt'
-
- return path_to_input_files, path_to_output_files, path_to_domains,fisher_path, path_to_interfaces, buffer
-
- elif mode == 2:
- path_to_input_files = Path('input_files')
- path_to_domains = path_to_input_files / 'domains.txt'
- fisher_path = path_to_input_files / 'significant_domains.txt'
- alphafold_summary = path_to_input_files / 'alphafold_summary.txt'
- path_to_interfaces = path_to_input_files / 'H_sapiens_interfacesHQ.txt'
- # Unzip before using
- alphafold_path = Path(path_to_input_files/'alphafold_structures')
-
- os.makedirs('out_files', exist_ok=True)
- path_to_output_files = Path('out_files/alphafold')
- os.makedirs(path_to_output_files, exist_ok=True)
- os.makedirs(path_to_output_files / 'freesasa_files', exist_ok=True)
- os.makedirs(path_to_output_files / 'alignment_files', exist_ok=True)
- os.makedirs(path_to_output_files / '3D_alignment', exist_ok=True)
-
- return path_to_input_files,path_to_output_files, path_to_domains, fisher_path, path_to_interfaces, alphafold_path, alphafold_summary
diff --git a/spaces/fffiloni/Music_Source_Separation/bytesep/callbacks/voicebank_demand.py b/spaces/fffiloni/Music_Source_Separation/bytesep/callbacks/voicebank_demand.py
deleted file mode 100644
index 7041596cdc9b36585c119b582176e5690c9930e7..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/Music_Source_Separation/bytesep/callbacks/voicebank_demand.py
+++ /dev/null
@@ -1,231 +0,0 @@
-import logging
-import os
-import time
-from typing import List, NoReturn
-
-import librosa
-import numpy as np
-import pysepm
-import pytorch_lightning as pl
-import torch.nn as nn
-from pesq import pesq
-from pytorch_lightning.utilities import rank_zero_only
-
-from bytesep.callbacks.base_callbacks import SaveCheckpointsCallback
-from bytesep.inference import Separator
-from bytesep.utils import StatisticsContainer, read_yaml
-
-
-def get_voicebank_demand_callbacks(
- config_yaml: str,
- workspace: str,
- checkpoints_dir: str,
- statistics_path: str,
- logger: pl.loggers.TensorBoardLogger,
- model: nn.Module,
- evaluate_device: str,
-) -> List[pl.Callback]:
- """Get Voicebank-Demand callbacks of a config yaml.
-
- Args:
- config_yaml: str
- workspace: str
- checkpoints_dir: str, directory to save checkpoints
- statistics_dir: str, directory to save statistics
- logger: pl.loggers.TensorBoardLogger
- model: nn.Module
- evaluate_device: str
-
- Return:
- callbacks: List[pl.Callback]
- """
- configs = read_yaml(config_yaml)
- task_name = configs['task_name']
- target_source_types = configs['train']['target_source_types']
- input_channels = configs['train']['channels']
- evaluation_audios_dir = os.path.join(workspace, "evaluation_audios", task_name)
- sample_rate = configs['train']['sample_rate']
- evaluate_step_frequency = configs['train']['evaluate_step_frequency']
- save_step_frequency = configs['train']['save_step_frequency']
- test_batch_size = configs['evaluate']['batch_size']
- test_segment_seconds = configs['evaluate']['segment_seconds']
-
- test_segment_samples = int(test_segment_seconds * sample_rate)
- assert len(target_source_types) == 1
- target_source_type = target_source_types[0]
- assert target_source_type == 'speech'
-
- # save checkpoint callback
- save_checkpoints_callback = SaveCheckpointsCallback(
- model=model,
- checkpoints_dir=checkpoints_dir,
- save_step_frequency=save_step_frequency,
- )
-
- # statistics container
- statistics_container = StatisticsContainer(statistics_path)
-
- # evaluation callback
- evaluate_test_callback = EvaluationCallback(
- model=model,
- input_channels=input_channels,
- sample_rate=sample_rate,
- evaluation_audios_dir=evaluation_audios_dir,
- segment_samples=test_segment_samples,
- batch_size=test_batch_size,
- device=evaluate_device,
- evaluate_step_frequency=evaluate_step_frequency,
- logger=logger,
- statistics_container=statistics_container,
- )
-
- callbacks = [save_checkpoints_callback, evaluate_test_callback]
-
- return callbacks
-
-
-class EvaluationCallback(pl.Callback):
- def __init__(
- self,
- model: nn.Module,
- input_channels: int,
- evaluation_audios_dir,
- sample_rate: int,
- segment_samples: int,
- batch_size: int,
- device: str,
- evaluate_step_frequency: int,
- logger: pl.loggers.TensorBoardLogger,
- statistics_container: StatisticsContainer,
- ):
- r"""Callback to evaluate every #save_step_frequency steps.
-
- Args:
- model: nn.Module
- input_channels: int
- evaluation_audios_dir: str, directory containing audios for evaluation
- sample_rate: int
- segment_samples: int, length of segments to be input to a model, e.g., 44100*30
- batch_size, int, e.g., 12
- device: str, e.g., 'cuda'
- evaluate_step_frequency: int, evaluate every #save_step_frequency steps
- logger: pl.loggers.TensorBoardLogger
- statistics_container: StatisticsContainer
- """
- self.model = model
- self.mono = True
- self.sample_rate = sample_rate
- self.segment_samples = segment_samples
- self.evaluate_step_frequency = evaluate_step_frequency
- self.logger = logger
- self.statistics_container = statistics_container
-
- self.clean_dir = os.path.join(evaluation_audios_dir, "clean_testset_wav")
- self.noisy_dir = os.path.join(evaluation_audios_dir, "noisy_testset_wav")
-
- self.EVALUATION_SAMPLE_RATE = 16000 # Evaluation sample rate of the
- # Voicebank-Demand task.
-
- # separator
- self.separator = Separator(model, self.segment_samples, batch_size, device)
-
- @rank_zero_only
- def on_batch_end(self, trainer: pl.Trainer, _) -> NoReturn:
- r"""Evaluate losses on a few mini-batches. Losses are only used for
- observing training, and are not final F1 metrics.
- """
-
- global_step = trainer.global_step
-
- if global_step % self.evaluate_step_frequency == 0:
-
- audio_names = sorted(
- [
- audio_name
- for audio_name in sorted(os.listdir(self.clean_dir))
- if audio_name.endswith('.wav')
- ]
- )
-
- error_str = "Directory {} does not contain audios for evaluation!".format(
- self.clean_dir
- )
- assert len(audio_names) > 0, error_str
-
- pesqs, csigs, cbaks, covls, ssnrs = [], [], [], [], []
-
- logging.info("--- Step {} ---".format(global_step))
- logging.info("Total {} pieces for evaluation:".format(len(audio_names)))
-
- eval_time = time.time()
-
- for n, audio_name in enumerate(audio_names):
-
- # Load audio.
- clean_path = os.path.join(self.clean_dir, audio_name)
- mixture_path = os.path.join(self.noisy_dir, audio_name)
-
- mixture, _ = librosa.core.load(
- mixture_path, sr=self.sample_rate, mono=self.mono
- )
-
- if mixture.ndim == 1:
- mixture = mixture[None, :]
- # (channels_num, audio_length)
-
- # Separate.
- input_dict = {'waveform': mixture}
-
- sep_wav = self.separator.separate(input_dict)
- # (channels_num, audio_length)
-
- # Target
- clean, _ = librosa.core.load(
- clean_path, sr=self.EVALUATION_SAMPLE_RATE, mono=self.mono
- )
-
- # to mono
- sep_wav = np.squeeze(sep_wav)
-
- # Resample for evaluation.
- sep_wav = librosa.resample(
- sep_wav,
- orig_sr=self.sample_rate,
- target_sr=self.EVALUATION_SAMPLE_RATE,
- )
-
- sep_wav = librosa.util.fix_length(sep_wav, size=len(clean), axis=0)
- # (channels, audio_length)
-
- # Evaluate metrics
- pesq_ = pesq(self.EVALUATION_SAMPLE_RATE, clean, sep_wav, 'wb')
-
- (csig, cbak, covl) = pysepm.composite(
- clean, sep_wav, self.EVALUATION_SAMPLE_RATE
- )
-
- ssnr = pysepm.SNRseg(clean, sep_wav, self.EVALUATION_SAMPLE_RATE)
-
- pesqs.append(pesq_)
- csigs.append(csig)
- cbaks.append(cbak)
- covls.append(covl)
- ssnrs.append(ssnr)
- print(
- '{}, {}, PESQ: {:.3f}, CSIG: {:.3f}, CBAK: {:.3f}, COVL: {:.3f}, SSNR: {:.3f}'.format(
- n, audio_name, pesq_, csig, cbak, covl, ssnr
- )
- )
-
- logging.info("-----------------------------")
- logging.info('Avg PESQ: {:.3f}'.format(np.mean(pesqs)))
- logging.info('Avg CSIG: {:.3f}'.format(np.mean(csigs)))
- logging.info('Avg CBAK: {:.3f}'.format(np.mean(cbaks)))
- logging.info('Avg COVL: {:.3f}'.format(np.mean(covls)))
- logging.info('Avg SSNR: {:.3f}'.format(np.mean(ssnrs)))
-
- logging.info("Evlauation time: {:.3f}".format(time.time() - eval_time))
-
- statistics = {"pesq": np.mean(pesqs)}
- self.statistics_container.append(global_step, statistics, 'test')
- self.statistics_container.dump()
diff --git a/spaces/fittar/ViPE/README.md b/spaces/fittar/ViPE/README.md
deleted file mode 100644
index ed9394567823d4cc0a1ba65d815dce3af8af4ed2..0000000000000000000000000000000000000000
--- a/spaces/fittar/ViPE/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: ViPE
-emoji: 👀
-colorFrom: green
-colorTo: blue
-sdk: gradio
-sdk_version: 3.47.1
-app_file: app.py
-pinned: false
-license: mit
----
diff --git a/spaces/fmind/resume/database.py b/spaces/fmind/resume/database.py
deleted file mode 100644
index 3491d0baf705fd1fe6af375d1f6514eeed24a80b..0000000000000000000000000000000000000000
--- a/spaces/fmind/resume/database.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python3
-"""Manage the project database."""
-
-# %% IMPORTS
-
-import argparse
-import logging
-import re
-import sys
-import typing as T
-
-import lib
-
-# %% LOGGING
-
-logging.basicConfig(
- level=logging.DEBUG,
- format="[%(asctime)s][%(levelname)s] %(message)s",
-)
-
-# %% PARSING
-
-PARSER = argparse.ArgumentParser(description=__doc__)
-PARSER.add_argument("files", type=argparse.FileType("r"), nargs="+")
-PARSER.add_argument("--database", type=str, default=lib.DATABASE_PATH)
-PARSER.add_argument("--collection", type=str, default=lib.DATABASE_COLLECTION)
-
-# %% FUNCTIONS
-
-
-def segment_text(text: str, pattern: str) -> T.Iterator[tuple[str, str]]:
- """Segment the text in title and content pair by pattern."""
- splits = re.split(pattern, text, flags=re.MULTILINE)
- pairs = zip(splits[1::2], splits[2::2])
- return pairs
-
-
-def import_file(
- file: T.TextIO,
- collection: lib.Collection,
- encoding_function: T.Callable,
- max_output_tokens: int = lib.ENCODING_OUTPUT_LIMIT,
-) -> tuple[int, int]:
- """Import a markdown file to a database collection."""
- n_chars = 0
- n_tokens = 0
- text = file.read()
- filename = file.name
- segments_h1 = segment_text(text=text, pattern=r"^# (.+)")
- for h1, h1_text in segments_h1:
- logging.debug('\t- H1: "%s" (%d)', h1, len(h1_text))
- segments_h2 = segment_text(text=h1_text, pattern=r"^## (.+)")
- for h2, content in segments_h2:
- content_chars = len(content)
- content_tokens = len(encoding_function(content))
- logging.debug('\t\t- H2: "%s" (%d)', h2, content_chars)
- id_ = f"{filename} # {h1} ## {h2}" # unique doc id
- document = f"# {h1}\n\n## {h2}\n\n{content.strip()}"
- metadata = {"filename": filename, "h1": h1, "h2": h2}
- assert (
- content_tokens < max_output_tokens
- ), f"Content is too long ({content_tokens}): #{h1} ##{h2}"
- collection.add(ids=id_, documents=document, metadatas=metadata)
- n_tokens += content_tokens
- n_chars += content_chars
- return n_chars, n_tokens
-
-
-def main(args: list[str] | None = None) -> int:
- """Main function of the script."""
- # parsing
- opts = PARSER.parse_args(args)
- # database
- database_path = opts.database
- logging.info("Database path: %s", database_path)
- client = lib.get_database_client(path=database_path)
- logging.info("- Reseting database client: %s", client.reset())
- # encoding
- encoding_function = lib.get_encoding_function()
- logging.info("Encoding function: %s", encoding_function)
- # embedding
- embedding_function = lib.get_embedding_function()
- logging.info("Embedding function: %s", embedding_function)
- # collection
- database_collection = opts.collection
- logging.info("Database collection: %s", database_collection)
- collection = client.create_collection(
- name=database_collection, embedding_function=embedding_function
- )
- # files
- for i, file in enumerate(opts.files):
- logging.info("Importing file %d: %s", i, file.name)
- n_chars, n_tokens = import_file(
- file=file, collection=collection, encoding_function=encoding_function
- )
- logging.info(
- "- Docs imported from file %s: %d chars | %d tokens", i, n_chars, n_tokens
- )
- # return
- return 0
-
-
-# %% ENTRYPOINTS
-
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/spaces/freddyaboulton/gradio_pdf/src/backend/gradio_pdf/templates/component/Index-f4230f0b.js b/spaces/freddyaboulton/gradio_pdf/src/backend/gradio_pdf/templates/component/Index-f4230f0b.js
deleted file mode 100644
index 14018a78764c710c5128e8d45655d322a8d0eb1d..0000000000000000000000000000000000000000
--- a/spaces/freddyaboulton/gradio_pdf/src/backend/gradio_pdf/templates/component/Index-f4230f0b.js
+++ /dev/null
@@ -1,19923 +0,0 @@
-var ni = Object.defineProperty;
-var ii = (t, e, i) => e in t ? ni(t, e, { enumerable: !0, configurable: !0, writable: !0, value: i }) : t[e] = i;
-var nt = (t, e, i) => (ii(t, typeof e != "symbol" ? e + "" : e, i), i), $t = (t, e, i) => {
- if (!e.has(t))
- throw TypeError("Cannot " + i);
-};
-var a = (t, e, i) => ($t(t, e, "read from private field"), i ? i.call(t) : e.get(t)), W = (t, e, i) => {
- if (e.has(t))
- throw TypeError("Cannot add the same private member more than once");
- e instanceof WeakSet ? e.add(t) : e.set(t, i);
-}, oe = (t, e, i, n) => ($t(t, e, "write to private field"), n ? n.call(t, i) : e.set(t, i), i);
-var _t = (t, e, i, n) => ({
- set _(s) {
- oe(t, e, s, i);
- },
- get _() {
- return a(t, e, n);
- }
-}), K = (t, e, i) => ($t(t, e, "access private method"), i);
-const {
- SvelteComponent: SvelteComponent$e,
- append: append$d,
- attr: attr$d,
- detach: detach$e,
- init: init$e,
- insert: insert$e,
- noop: noop$7,
- safe_not_equal: safe_not_equal$f,
- set_style: set_style$6,
- svg_element: svg_element$5
-} = window.__gradio__svelte__internal;
-function create_fragment$e(t) {
- let e, i, n, s;
- return {
- c() {
- e = svg_element$5("svg"), i = svg_element$5("g"), n = svg_element$5("path"), s = svg_element$5("path"), attr$d(n, "d", "M18,6L6.087,17.913"), set_style$6(n, "fill", "none"), set_style$6(n, "fill-rule", "nonzero"), set_style$6(n, "stroke-width", "2px"), attr$d(i, "transform", "matrix(1.14096,-0.140958,-0.140958,1.14096,-0.0559523,0.0559523)"), attr$d(s, "d", "M4.364,4.364L19.636,19.636"), set_style$6(s, "fill", "none"), set_style$6(s, "fill-rule", "nonzero"), set_style$6(s, "stroke-width", "2px"), attr$d(e, "width", "100%"), attr$d(e, "height", "100%"), attr$d(e, "viewBox", "0 0 24 24"), attr$d(e, "version", "1.1"), attr$d(e, "xmlns", "http://www.w3.org/2000/svg"), attr$d(e, "xmlns:xlink", "http://www.w3.org/1999/xlink"), attr$d(e, "xml:space", "preserve"), attr$d(e, "stroke", "currentColor"), set_style$6(e, "fill-rule", "evenodd"), set_style$6(e, "clip-rule", "evenodd"), set_style$6(e, "stroke-linecap", "round"), set_style$6(e, "stroke-linejoin", "round");
- },
- m(l, h) {
- insert$e(l, e, h), append$d(e, i), append$d(i, n), append$d(e, s);
- },
- p: noop$7,
- i: noop$7,
- o: noop$7,
- d(l) {
- l && detach$e(e);
- }
- };
-}
-class Clear extends SvelteComponent$e {
- constructor(e) {
- super(), init$e(this, e, null, create_fragment$e, safe_not_equal$f, {});
- }
-}
-const DropdownArrow_svelte_svelte_type_style_lang = "", {
- SvelteComponent: SvelteComponent$d,
- append: append$c,
- attr: attr$c,
- detach: detach$d,
- init: init$d,
- insert: insert$d,
- noop: noop$6,
- safe_not_equal: safe_not_equal$e,
- svg_element: svg_element$4
-} = window.__gradio__svelte__internal;
-function create_fragment$d(t) {
- let e, i;
- return {
- c() {
- e = svg_element$4("svg"), i = svg_element$4("path"), attr$c(i, "d", "M17 3a2.828 2.828 0 1 1 4 4L7.5 20.5 2 22l1.5-5.5L17 3z"), attr$c(e, "xmlns", "http://www.w3.org/2000/svg"), attr$c(e, "width", "100%"), attr$c(e, "height", "100%"), attr$c(e, "viewBox", "0 0 24 24"), attr$c(e, "fill", "none"), attr$c(e, "stroke", "currentColor"), attr$c(e, "stroke-width", "1.5"), attr$c(e, "stroke-linecap", "round"), attr$c(e, "stroke-linejoin", "round"), attr$c(e, "class", "feather feather-edit-2");
- },
- m(n, s) {
- insert$d(n, e, s), append$c(e, i);
- },
- p: noop$6,
- i: noop$6,
- o: noop$6,
- d(n) {
- n && detach$d(e);
- }
- };
-}
-class Edit extends SvelteComponent$d {
- constructor(e) {
- super(), init$d(this, e, null, create_fragment$d, safe_not_equal$e, {});
- }
-}
-const {
- SvelteComponent: SvelteComponent$c,
- append: append$b,
- attr: attr$b,
- detach: detach$c,
- init: init$c,
- insert: insert$c,
- noop: noop$5,
- safe_not_equal: safe_not_equal$d,
- svg_element: svg_element$3
-} = window.__gradio__svelte__internal;
-function create_fragment$c(t) {
- let e, i, n;
- return {
- c() {
- e = svg_element$3("svg"), i = svg_element$3("path"), n = svg_element$3("polyline"), attr$b(i, "d", "M13 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V9z"), attr$b(n, "points", "13 2 13 9 20 9"), attr$b(e, "xmlns", "http://www.w3.org/2000/svg"), attr$b(e, "width", "100%"), attr$b(e, "height", "100%"), attr$b(e, "viewBox", "0 0 24 24"), attr$b(e, "fill", "none"), attr$b(e, "stroke", "currentColor"), attr$b(e, "stroke-width", "1.5"), attr$b(e, "stroke-linecap", "round"), attr$b(e, "stroke-linejoin", "round"), attr$b(e, "class", "feather feather-file");
- },
- m(s, l) {
- insert$c(s, e, l), append$b(e, i), append$b(e, n);
- },
- p: noop$5,
- i: noop$5,
- o: noop$5,
- d(s) {
- s && detach$c(e);
- }
- };
-}
-let File$1 = class extends SvelteComponent$c {
- constructor(e) {
- super(), init$c(this, e, null, create_fragment$c, safe_not_equal$d, {});
- }
-};
-const {
- SvelteComponent: SvelteComponent$b,
- append: append$a,
- attr: attr$a,
- detach: detach$b,
- init: init$b,
- insert: insert$b,
- noop: noop$4,
- safe_not_equal: safe_not_equal$c,
- svg_element: svg_element$2
-} = window.__gradio__svelte__internal;
-function create_fragment$b(t) {
- let e, i, n;
- return {
- c() {
- e = svg_element$2("svg"), i = svg_element$2("polyline"), n = svg_element$2("path"), attr$a(i, "points", "1 4 1 10 7 10"), attr$a(n, "d", "M3.51 15a9 9 0 1 0 2.13-9.36L1 10"), attr$a(e, "xmlns", "http://www.w3.org/2000/svg"), attr$a(e, "width", "100%"), attr$a(e, "height", "100%"), attr$a(e, "viewBox", "0 0 24 24"), attr$a(e, "fill", "none"), attr$a(e, "stroke", "currentColor"), attr$a(e, "stroke-width", "2"), attr$a(e, "stroke-linecap", "round"), attr$a(e, "stroke-linejoin", "round"), attr$a(e, "class", "feather feather-rotate-ccw");
- },
- m(s, l) {
- insert$b(s, e, l), append$a(e, i), append$a(e, n);
- },
- p: noop$4,
- i: noop$4,
- o: noop$4,
- d(s) {
- s && detach$b(e);
- }
- };
-}
-class Undo extends SvelteComponent$b {
- constructor(e) {
- super(), init$b(this, e, null, create_fragment$b, safe_not_equal$c, {});
- }
-}
-const {
- SvelteComponent: SvelteComponent$a,
- append: append$9,
- attr: attr$9,
- detach: detach$a,
- init: init$a,
- insert: insert$a,
- noop: noop$3,
- safe_not_equal: safe_not_equal$b,
- svg_element: svg_element$1
-} = window.__gradio__svelte__internal;
-function create_fragment$a(t) {
- let e, i, n, s;
- return {
- c() {
- e = svg_element$1("svg"), i = svg_element$1("path"), n = svg_element$1("polyline"), s = svg_element$1("line"), attr$9(i, "d", "M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4"), attr$9(n, "points", "17 8 12 3 7 8"), attr$9(s, "x1", "12"), attr$9(s, "y1", "3"), attr$9(s, "x2", "12"), attr$9(s, "y2", "15"), attr$9(e, "xmlns", "http://www.w3.org/2000/svg"), attr$9(e, "width", "90%"), attr$9(e, "height", "90%"), attr$9(e, "viewBox", "0 0 24 24"), attr$9(e, "fill", "none"), attr$9(e, "stroke", "currentColor"), attr$9(e, "stroke-width", "2"), attr$9(e, "stroke-linecap", "round"), attr$9(e, "stroke-linejoin", "round"), attr$9(e, "class", "feather feather-upload");
- },
- m(l, h) {
- insert$a(l, e, h), append$9(e, i), append$9(e, n), append$9(e, s);
- },
- p: noop$3,
- i: noop$3,
- o: noop$3,
- d(l) {
- l && detach$a(e);
- }
- };
-}
-let Upload$1 = class extends SvelteComponent$a {
- constructor(e) {
- super(), init$a(this, e, null, create_fragment$a, safe_not_equal$b, {});
- }
-};
-const PdfUploadText_svelte_svelte_type_style_lang = "", {
- SvelteComponent: SvelteComponent$9,
- append: append$8,
- attr: attr$8,
- create_component: create_component$5,
- destroy_component: destroy_component$5,
- detach: detach$9,
- element: element$9,
- init: init$9,
- insert: insert$9,
- mount_component: mount_component$5,
- safe_not_equal: safe_not_equal$a,
- text: text$4,
- toggle_class: toggle_class$8,
- transition_in: transition_in$8,
- transition_out: transition_out$8
-} = window.__gradio__svelte__internal;
-function create_fragment$9(t) {
- let e, i, n, s, l, h, _;
- return n = new Upload$1({}), {
- c() {
- e = element$9("div"), i = element$9("span"), create_component$5(n.$$.fragment), s = text$4(`
- Drop PDF
- `), l = element$9("span"), l.textContent = "- or -", h = text$4(`
- Click to Upload`), attr$8(i, "class", "icon-wrap svelte-kzcjhc"), toggle_class$8(
- i,
- "hovered",
- /*hovered*/
- t[0]
- ), attr$8(l, "class", "or svelte-kzcjhc"), attr$8(e, "class", "wrap svelte-kzcjhc");
- },
- m(c, o) {
- insert$9(c, e, o), append$8(e, i), mount_component$5(n, i, null), append$8(e, s), append$8(e, l), append$8(e, h), _ = !0;
- },
- p(c, [o]) {
- (!_ || o & /*hovered*/
- 1) && toggle_class$8(
- i,
- "hovered",
- /*hovered*/
- c[0]
- );
- },
- i(c) {
- _ || (transition_in$8(n.$$.fragment, c), _ = !0);
- },
- o(c) {
- transition_out$8(n.$$.fragment, c), _ = !1;
- },
- d(c) {
- c && detach$9(e), destroy_component$5(n);
- }
- };
-}
-function instance$9(t, e, i) {
- let { hovered: n = !1 } = e;
- return t.$$set = (s) => {
- "hovered" in s && i(0, n = s.hovered);
- }, [n];
-}
-class PdfUploadText extends SvelteComponent$9 {
- constructor(e) {
- super(), init$9(this, e, instance$9, create_fragment$9, safe_not_equal$a, { hovered: 0 });
- }
-}
-const Block_svelte_svelte_type_style_lang = "", {
- SvelteComponent: SvelteComponent$8,
- assign: assign$1,
- create_slot: create_slot$3,
- detach: detach$8,
- element: element$8,
- get_all_dirty_from_scope: get_all_dirty_from_scope$3,
- get_slot_changes: get_slot_changes$3,
- get_spread_update: get_spread_update$1,
- init: init$8,
- insert: insert$8,
- safe_not_equal: safe_not_equal$9,
- set_dynamic_element_data,
- set_style: set_style$5,
- toggle_class: toggle_class$7,
- transition_in: transition_in$7,
- transition_out: transition_out$7,
- update_slot_base: update_slot_base$3
-} = window.__gradio__svelte__internal;
-function create_dynamic_element(t) {
- let e, i, n;
- const s = (
- /*#slots*/
- t[17].default
- ), l = create_slot$3(
- s,
- t,
- /*$$scope*/
- t[16],
- null
- );
- let h = [
- { "data-testid": (
- /*test_id*/
- t[7]
- ) },
- { id: (
- /*elem_id*/
- t[2]
- ) },
- {
- class: i = "block " + /*elem_classes*/
- t[3].join(" ") + " svelte-1t38q2d"
- }
- ], _ = {};
- for (let c = 0; c < h.length; c += 1)
- _ = assign$1(_, h[c]);
- return {
- c() {
- e = element$8(
- /*tag*/
- t[14]
- ), l && l.c(), set_dynamic_element_data(
- /*tag*/
- t[14]
- )(e, _), toggle_class$7(
- e,
- "hidden",
- /*visible*/
- t[10] === !1
- ), toggle_class$7(
- e,
- "padded",
- /*padding*/
- t[6]
- ), toggle_class$7(
- e,
- "border_focus",
- /*border_mode*/
- t[5] === "focus"
- ), toggle_class$7(e, "hide-container", !/*explicit_call*/
- t[8] && !/*container*/
- t[9]), set_style$5(e, "height", typeof /*height*/
- t[0] == "number" ? (
- /*height*/
- t[0] + "px"
- ) : void 0), set_style$5(e, "width", typeof /*width*/
- t[1] == "number" ? `calc(min(${/*width*/
- t[1]}px, 100%))` : void 0), set_style$5(
- e,
- "border-style",
- /*variant*/
- t[4]
- ), set_style$5(
- e,
- "overflow",
- /*allow_overflow*/
- t[11] ? "visible" : "hidden"
- ), set_style$5(
- e,
- "flex-grow",
- /*scale*/
- t[12]
- ), set_style$5(e, "min-width", `calc(min(${/*min_width*/
- t[13]}px, 100%))`), set_style$5(e, "border-width", "var(--block-border-width)");
- },
- m(c, o) {
- insert$8(c, e, o), l && l.m(e, null), n = !0;
- },
- p(c, o) {
- l && l.p && (!n || o & /*$$scope*/
- 65536) && update_slot_base$3(
- l,
- s,
- c,
- /*$$scope*/
- c[16],
- n ? get_slot_changes$3(
- s,
- /*$$scope*/
- c[16],
- o,
- null
- ) : get_all_dirty_from_scope$3(
- /*$$scope*/
- c[16]
- ),
- null
- ), set_dynamic_element_data(
- /*tag*/
- c[14]
- )(e, _ = get_spread_update$1(h, [
- (!n || o & /*test_id*/
- 128) && { "data-testid": (
- /*test_id*/
- c[7]
- ) },
- (!n || o & /*elem_id*/
- 4) && { id: (
- /*elem_id*/
- c[2]
- ) },
- (!n || o & /*elem_classes*/
- 8 && i !== (i = "block " + /*elem_classes*/
- c[3].join(" ") + " svelte-1t38q2d")) && { class: i }
- ])), toggle_class$7(
- e,
- "hidden",
- /*visible*/
- c[10] === !1
- ), toggle_class$7(
- e,
- "padded",
- /*padding*/
- c[6]
- ), toggle_class$7(
- e,
- "border_focus",
- /*border_mode*/
- c[5] === "focus"
- ), toggle_class$7(e, "hide-container", !/*explicit_call*/
- c[8] && !/*container*/
- c[9]), o & /*height*/
- 1 && set_style$5(e, "height", typeof /*height*/
- c[0] == "number" ? (
- /*height*/
- c[0] + "px"
- ) : void 0), o & /*width*/
- 2 && set_style$5(e, "width", typeof /*width*/
- c[1] == "number" ? `calc(min(${/*width*/
- c[1]}px, 100%))` : void 0), o & /*variant*/
- 16 && set_style$5(
- e,
- "border-style",
- /*variant*/
- c[4]
- ), o & /*allow_overflow*/
- 2048 && set_style$5(
- e,
- "overflow",
- /*allow_overflow*/
- c[11] ? "visible" : "hidden"
- ), o & /*scale*/
- 4096 && set_style$5(
- e,
- "flex-grow",
- /*scale*/
- c[12]
- ), o & /*min_width*/
- 8192 && set_style$5(e, "min-width", `calc(min(${/*min_width*/
- c[13]}px, 100%))`);
- },
- i(c) {
- n || (transition_in$7(l, c), n = !0);
- },
- o(c) {
- transition_out$7(l, c), n = !1;
- },
- d(c) {
- c && detach$8(e), l && l.d(c);
- }
- };
-}
-function create_fragment$8(t) {
- let e, i = (
- /*tag*/
- t[14] && create_dynamic_element(t)
- );
- return {
- c() {
- i && i.c();
- },
- m(n, s) {
- i && i.m(n, s), e = !0;
- },
- p(n, [s]) {
- /*tag*/
- n[14] && i.p(n, s);
- },
- i(n) {
- e || (transition_in$7(i, n), e = !0);
- },
- o(n) {
- transition_out$7(i, n), e = !1;
- },
- d(n) {
- i && i.d(n);
- }
- };
-}
-function instance$8(t, e, i) {
- let { $$slots: n = {}, $$scope: s } = e, { height: l = void 0 } = e, { width: h = void 0 } = e, { elem_id: _ = "" } = e, { elem_classes: c = [] } = e, { variant: o = "solid" } = e, { border_mode: r = "base" } = e, { padding: T = !0 } = e, { type: S = "normal" } = e, { test_id: w = void 0 } = e, { explicit_call: C = !1 } = e, { container: P = !0 } = e, { visible: b = !0 } = e, { allow_overflow: k = !0 } = e, { scale: F = null } = e, { min_width: x = 0 } = e, y = S === "fieldset" ? "fieldset" : "div";
- return t.$$set = (p) => {
- "height" in p && i(0, l = p.height), "width" in p && i(1, h = p.width), "elem_id" in p && i(2, _ = p.elem_id), "elem_classes" in p && i(3, c = p.elem_classes), "variant" in p && i(4, o = p.variant), "border_mode" in p && i(5, r = p.border_mode), "padding" in p && i(6, T = p.padding), "type" in p && i(15, S = p.type), "test_id" in p && i(7, w = p.test_id), "explicit_call" in p && i(8, C = p.explicit_call), "container" in p && i(9, P = p.container), "visible" in p && i(10, b = p.visible), "allow_overflow" in p && i(11, k = p.allow_overflow), "scale" in p && i(12, F = p.scale), "min_width" in p && i(13, x = p.min_width), "$$scope" in p && i(16, s = p.$$scope);
- }, [
- l,
- h,
- _,
- c,
- o,
- r,
- T,
- w,
- C,
- P,
- b,
- k,
- F,
- x,
- y,
- S,
- s,
- n
- ];
-}
-class Block extends SvelteComponent$8 {
- constructor(e) {
- super(), init$8(this, e, instance$8, create_fragment$8, safe_not_equal$9, {
- height: 0,
- width: 1,
- elem_id: 2,
- elem_classes: 3,
- variant: 4,
- border_mode: 5,
- padding: 6,
- type: 15,
- test_id: 7,
- explicit_call: 8,
- container: 9,
- visible: 10,
- allow_overflow: 11,
- scale: 12,
- min_width: 13
- });
- }
-}
-const Info_svelte_svelte_type_style_lang = "", BlockTitle_svelte_svelte_type_style_lang = "", BlockLabel_svelte_svelte_type_style_lang = "", {
- SvelteComponent: SvelteComponent$7,
- append: append$7,
- attr: attr$7,
- create_component: create_component$4,
- destroy_component: destroy_component$4,
- detach: detach$7,
- element: element$7,
- init: init$7,
- insert: insert$7,
- mount_component: mount_component$4,
- safe_not_equal: safe_not_equal$8,
- set_data: set_data$3,
- space: space$6,
- text: text$3,
- toggle_class: toggle_class$6,
- transition_in: transition_in$6,
- transition_out: transition_out$6
-} = window.__gradio__svelte__internal;
-function create_fragment$7(t) {
- let e, i, n, s, l, h;
- return n = new /*Icon*/
- t[1]({}), {
- c() {
- e = element$7("label"), i = element$7("span"), create_component$4(n.$$.fragment), s = space$6(), l = text$3(
- /*label*/
- t[0]
- ), attr$7(i, "class", "svelte-9gxdi0"), attr$7(e, "for", ""), attr$7(e, "data-testid", "block-label"), attr$7(e, "class", "svelte-9gxdi0"), toggle_class$6(e, "hide", !/*show_label*/
- t[2]), toggle_class$6(e, "sr-only", !/*show_label*/
- t[2]), toggle_class$6(
- e,
- "float",
- /*float*/
- t[4]
- ), toggle_class$6(
- e,
- "hide-label",
- /*disable*/
- t[3]
- );
- },
- m(_, c) {
- insert$7(_, e, c), append$7(e, i), mount_component$4(n, i, null), append$7(e, s), append$7(e, l), h = !0;
- },
- p(_, [c]) {
- (!h || c & /*label*/
- 1) && set_data$3(
- l,
- /*label*/
- _[0]
- ), (!h || c & /*show_label*/
- 4) && toggle_class$6(e, "hide", !/*show_label*/
- _[2]), (!h || c & /*show_label*/
- 4) && toggle_class$6(e, "sr-only", !/*show_label*/
- _[2]), (!h || c & /*float*/
- 16) && toggle_class$6(
- e,
- "float",
- /*float*/
- _[4]
- ), (!h || c & /*disable*/
- 8) && toggle_class$6(
- e,
- "hide-label",
- /*disable*/
- _[3]
- );
- },
- i(_) {
- h || (transition_in$6(n.$$.fragment, _), h = !0);
- },
- o(_) {
- transition_out$6(n.$$.fragment, _), h = !1;
- },
- d(_) {
- _ && detach$7(e), destroy_component$4(n);
- }
- };
-}
-function instance$7(t, e, i) {
- let { label: n = null } = e, { Icon: s } = e, { show_label: l = !0 } = e, { disable: h = !1 } = e, { float: _ = !0 } = e;
- return t.$$set = (c) => {
- "label" in c && i(0, n = c.label), "Icon" in c && i(1, s = c.Icon), "show_label" in c && i(2, l = c.show_label), "disable" in c && i(3, h = c.disable), "float" in c && i(4, _ = c.float);
- }, [n, s, l, h, _];
-}
-class BlockLabel extends SvelteComponent$7 {
- constructor(e) {
- super(), init$7(this, e, instance$7, create_fragment$7, safe_not_equal$8, {
- label: 0,
- Icon: 1,
- show_label: 2,
- disable: 3,
- float: 4
- });
- }
-}
-const IconButton_svelte_svelte_type_style_lang = "", {
- SvelteComponent: SvelteComponent$6,
- append: append$6,
- attr: attr$6,
- bubble: bubble$2,
- create_component: create_component$3,
- destroy_component: destroy_component$3,
- detach: detach$6,
- element: element$6,
- init: init$6,
- insert: insert$6,
- listen: listen$2,
- mount_component: mount_component$3,
- safe_not_equal: safe_not_equal$7,
- set_data: set_data$2,
- space: space$5,
- text: text$2,
- toggle_class: toggle_class$5,
- transition_in: transition_in$5,
- transition_out: transition_out$5
-} = window.__gradio__svelte__internal;
-function create_if_block$4(t) {
- let e, i;
- return {
- c() {
- e = element$6("span"), i = text$2(
- /*label*/
- t[1]
- ), attr$6(e, "class", "svelte-xtz2g8");
- },
- m(n, s) {
- insert$6(n, e, s), append$6(e, i);
- },
- p(n, s) {
- s & /*label*/
- 2 && set_data$2(
- i,
- /*label*/
- n[1]
- );
- },
- d(n) {
- n && detach$6(e);
- }
- };
-}
-function create_fragment$6(t) {
- let e, i, n, s, l, h, _, c = (
- /*show_label*/
- t[2] && create_if_block$4(t)
- );
- return s = new /*Icon*/
- t[0]({}), {
- c() {
- e = element$6("button"), c && c.c(), i = space$5(), n = element$6("div"), create_component$3(s.$$.fragment), attr$6(n, "class", "svelte-xtz2g8"), toggle_class$5(
- n,
- "small",
- /*size*/
- t[4] === "small"
- ), toggle_class$5(
- n,
- "large",
- /*size*/
- t[4] === "large"
- ), attr$6(
- e,
- "aria-label",
- /*label*/
- t[1]
- ), attr$6(
- e,
- "title",
- /*label*/
- t[1]
- ), attr$6(e, "class", "svelte-xtz2g8"), toggle_class$5(
- e,
- "pending",
- /*pending*/
- t[3]
- ), toggle_class$5(
- e,
- "padded",
- /*padded*/
- t[5]
- );
- },
- m(o, r) {
- insert$6(o, e, r), c && c.m(e, null), append$6(e, i), append$6(e, n), mount_component$3(s, n, null), l = !0, h || (_ = listen$2(
- e,
- "click",
- /*click_handler*/
- t[6]
- ), h = !0);
- },
- p(o, [r]) {
- /*show_label*/
- o[2] ? c ? c.p(o, r) : (c = create_if_block$4(o), c.c(), c.m(e, i)) : c && (c.d(1), c = null), (!l || r & /*size*/
- 16) && toggle_class$5(
- n,
- "small",
- /*size*/
- o[4] === "small"
- ), (!l || r & /*size*/
- 16) && toggle_class$5(
- n,
- "large",
- /*size*/
- o[4] === "large"
- ), (!l || r & /*label*/
- 2) && attr$6(
- e,
- "aria-label",
- /*label*/
- o[1]
- ), (!l || r & /*label*/
- 2) && attr$6(
- e,
- "title",
- /*label*/
- o[1]
- ), (!l || r & /*pending*/
- 8) && toggle_class$5(
- e,
- "pending",
- /*pending*/
- o[3]
- ), (!l || r & /*padded*/
- 32) && toggle_class$5(
- e,
- "padded",
- /*padded*/
- o[5]
- );
- },
- i(o) {
- l || (transition_in$5(s.$$.fragment, o), l = !0);
- },
- o(o) {
- transition_out$5(s.$$.fragment, o), l = !1;
- },
- d(o) {
- o && detach$6(e), c && c.d(), destroy_component$3(s), h = !1, _();
- }
- };
-}
-function instance$6(t, e, i) {
- let { Icon: n } = e, { label: s = "" } = e, { show_label: l = !1 } = e, { pending: h = !1 } = e, { size: _ = "small" } = e, { padded: c = !0 } = e;
- function o(r) {
- bubble$2.call(this, t, r);
- }
- return t.$$set = (r) => {
- "Icon" in r && i(0, n = r.Icon), "label" in r && i(1, s = r.label), "show_label" in r && i(2, l = r.show_label), "pending" in r && i(3, h = r.pending), "size" in r && i(4, _ = r.size), "padded" in r && i(5, c = r.padded);
- }, [n, s, l, h, _, c, o];
-}
-class IconButton extends SvelteComponent$6 {
- constructor(e) {
- super(), init$6(this, e, instance$6, create_fragment$6, safe_not_equal$7, {
- Icon: 0,
- label: 1,
- show_label: 2,
- pending: 3,
- size: 4,
- padded: 5
- });
- }
-}
-const Empty_svelte_svelte_type_style_lang = "", color_values = [
- { color: "red", primary: 600, secondary: 100 },
- { color: "green", primary: 600, secondary: 100 },
- { color: "blue", primary: 600, secondary: 100 },
- { color: "yellow", primary: 500, secondary: 100 },
- { color: "purple", primary: 600, secondary: 100 },
- { color: "teal", primary: 600, secondary: 100 },
- { color: "orange", primary: 600, secondary: 100 },
- { color: "cyan", primary: 600, secondary: 100 },
- { color: "lime", primary: 500, secondary: 100 },
- { color: "pink", primary: 600, secondary: 100 }
-], tw_colors = {
- inherit: "inherit",
- current: "currentColor",
- transparent: "transparent",
- black: "#000",
- white: "#fff",
- slate: {
- 50: "#f8fafc",
- 100: "#f1f5f9",
- 200: "#e2e8f0",
- 300: "#cbd5e1",
- 400: "#94a3b8",
- 500: "#64748b",
- 600: "#475569",
- 700: "#334155",
- 800: "#1e293b",
- 900: "#0f172a",
- 950: "#020617"
- },
- gray: {
- 50: "#f9fafb",
- 100: "#f3f4f6",
- 200: "#e5e7eb",
- 300: "#d1d5db",
- 400: "#9ca3af",
- 500: "#6b7280",
- 600: "#4b5563",
- 700: "#374151",
- 800: "#1f2937",
- 900: "#111827",
- 950: "#030712"
- },
- zinc: {
- 50: "#fafafa",
- 100: "#f4f4f5",
- 200: "#e4e4e7",
- 300: "#d4d4d8",
- 400: "#a1a1aa",
- 500: "#71717a",
- 600: "#52525b",
- 700: "#3f3f46",
- 800: "#27272a",
- 900: "#18181b",
- 950: "#09090b"
- },
- neutral: {
- 50: "#fafafa",
- 100: "#f5f5f5",
- 200: "#e5e5e5",
- 300: "#d4d4d4",
- 400: "#a3a3a3",
- 500: "#737373",
- 600: "#525252",
- 700: "#404040",
- 800: "#262626",
- 900: "#171717",
- 950: "#0a0a0a"
- },
- stone: {
- 50: "#fafaf9",
- 100: "#f5f5f4",
- 200: "#e7e5e4",
- 300: "#d6d3d1",
- 400: "#a8a29e",
- 500: "#78716c",
- 600: "#57534e",
- 700: "#44403c",
- 800: "#292524",
- 900: "#1c1917",
- 950: "#0c0a09"
- },
- red: {
- 50: "#fef2f2",
- 100: "#fee2e2",
- 200: "#fecaca",
- 300: "#fca5a5",
- 400: "#f87171",
- 500: "#ef4444",
- 600: "#dc2626",
- 700: "#b91c1c",
- 800: "#991b1b",
- 900: "#7f1d1d",
- 950: "#450a0a"
- },
- orange: {
- 50: "#fff7ed",
- 100: "#ffedd5",
- 200: "#fed7aa",
- 300: "#fdba74",
- 400: "#fb923c",
- 500: "#f97316",
- 600: "#ea580c",
- 700: "#c2410c",
- 800: "#9a3412",
- 900: "#7c2d12",
- 950: "#431407"
- },
- amber: {
- 50: "#fffbeb",
- 100: "#fef3c7",
- 200: "#fde68a",
- 300: "#fcd34d",
- 400: "#fbbf24",
- 500: "#f59e0b",
- 600: "#d97706",
- 700: "#b45309",
- 800: "#92400e",
- 900: "#78350f",
- 950: "#451a03"
- },
- yellow: {
- 50: "#fefce8",
- 100: "#fef9c3",
- 200: "#fef08a",
- 300: "#fde047",
- 400: "#facc15",
- 500: "#eab308",
- 600: "#ca8a04",
- 700: "#a16207",
- 800: "#854d0e",
- 900: "#713f12",
- 950: "#422006"
- },
- lime: {
- 50: "#f7fee7",
- 100: "#ecfccb",
- 200: "#d9f99d",
- 300: "#bef264",
- 400: "#a3e635",
- 500: "#84cc16",
- 600: "#65a30d",
- 700: "#4d7c0f",
- 800: "#3f6212",
- 900: "#365314",
- 950: "#1a2e05"
- },
- green: {
- 50: "#f0fdf4",
- 100: "#dcfce7",
- 200: "#bbf7d0",
- 300: "#86efac",
- 400: "#4ade80",
- 500: "#22c55e",
- 600: "#16a34a",
- 700: "#15803d",
- 800: "#166534",
- 900: "#14532d",
- 950: "#052e16"
- },
- emerald: {
- 50: "#ecfdf5",
- 100: "#d1fae5",
- 200: "#a7f3d0",
- 300: "#6ee7b7",
- 400: "#34d399",
- 500: "#10b981",
- 600: "#059669",
- 700: "#047857",
- 800: "#065f46",
- 900: "#064e3b",
- 950: "#022c22"
- },
- teal: {
- 50: "#f0fdfa",
- 100: "#ccfbf1",
- 200: "#99f6e4",
- 300: "#5eead4",
- 400: "#2dd4bf",
- 500: "#14b8a6",
- 600: "#0d9488",
- 700: "#0f766e",
- 800: "#115e59",
- 900: "#134e4a",
- 950: "#042f2e"
- },
- cyan: {
- 50: "#ecfeff",
- 100: "#cffafe",
- 200: "#a5f3fc",
- 300: "#67e8f9",
- 400: "#22d3ee",
- 500: "#06b6d4",
- 600: "#0891b2",
- 700: "#0e7490",
- 800: "#155e75",
- 900: "#164e63",
- 950: "#083344"
- },
- sky: {
- 50: "#f0f9ff",
- 100: "#e0f2fe",
- 200: "#bae6fd",
- 300: "#7dd3fc",
- 400: "#38bdf8",
- 500: "#0ea5e9",
- 600: "#0284c7",
- 700: "#0369a1",
- 800: "#075985",
- 900: "#0c4a6e",
- 950: "#082f49"
- },
- blue: {
- 50: "#eff6ff",
- 100: "#dbeafe",
- 200: "#bfdbfe",
- 300: "#93c5fd",
- 400: "#60a5fa",
- 500: "#3b82f6",
- 600: "#2563eb",
- 700: "#1d4ed8",
- 800: "#1e40af",
- 900: "#1e3a8a",
- 950: "#172554"
- },
- indigo: {
- 50: "#eef2ff",
- 100: "#e0e7ff",
- 200: "#c7d2fe",
- 300: "#a5b4fc",
- 400: "#818cf8",
- 500: "#6366f1",
- 600: "#4f46e5",
- 700: "#4338ca",
- 800: "#3730a3",
- 900: "#312e81",
- 950: "#1e1b4b"
- },
- violet: {
- 50: "#f5f3ff",
- 100: "#ede9fe",
- 200: "#ddd6fe",
- 300: "#c4b5fd",
- 400: "#a78bfa",
- 500: "#8b5cf6",
- 600: "#7c3aed",
- 700: "#6d28d9",
- 800: "#5b21b6",
- 900: "#4c1d95",
- 950: "#2e1065"
- },
- purple: {
- 50: "#faf5ff",
- 100: "#f3e8ff",
- 200: "#e9d5ff",
- 300: "#d8b4fe",
- 400: "#c084fc",
- 500: "#a855f7",
- 600: "#9333ea",
- 700: "#7e22ce",
- 800: "#6b21a8",
- 900: "#581c87",
- 950: "#3b0764"
- },
- fuchsia: {
- 50: "#fdf4ff",
- 100: "#fae8ff",
- 200: "#f5d0fe",
- 300: "#f0abfc",
- 400: "#e879f9",
- 500: "#d946ef",
- 600: "#c026d3",
- 700: "#a21caf",
- 800: "#86198f",
- 900: "#701a75",
- 950: "#4a044e"
- },
- pink: {
- 50: "#fdf2f8",
- 100: "#fce7f3",
- 200: "#fbcfe8",
- 300: "#f9a8d4",
- 400: "#f472b6",
- 500: "#ec4899",
- 600: "#db2777",
- 700: "#be185d",
- 800: "#9d174d",
- 900: "#831843",
- 950: "#500724"
- },
- rose: {
- 50: "#fff1f2",
- 100: "#ffe4e6",
- 200: "#fecdd3",
- 300: "#fda4af",
- 400: "#fb7185",
- 500: "#f43f5e",
- 600: "#e11d48",
- 700: "#be123c",
- 800: "#9f1239",
- 900: "#881337",
- 950: "#4c0519"
- }
-};
-color_values.reduce(
- (t, { color: e, primary: i, secondary: n }) => ({
- ...t,
- [e]: {
- primary: tw_colors[e][i],
- secondary: tw_colors[e][n]
- }
- }),
- {}
-);
-const UploadText_svelte_svelte_type_style_lang = "", Toolbar_svelte_svelte_type_style_lang = "";
-new Intl.Collator(0, { numeric: 1 }).compare;
-function is_url$1(t) {
- try {
- const e = new URL(t);
- return e.protocol === "http:" || e.protocol === "https:";
- } catch {
- return !1;
- }
-}
-function get_fetchable_url_or_file$1(t, e, i) {
- return t == null ? i ? `/proxy=${i}file=` : `${e}/file=` : is_url$1(t) ? t : i ? `/proxy=${i}file=${t}` : `${e}/file=${t}`;
-}
-const Button_svelte_svelte_type_style_lang = "", {
- SvelteComponent: SvelteComponent$5,
- append: append$5,
- attr: attr$5,
- bubble: bubble$1,
- check_outros: check_outros$3,
- create_slot: create_slot$2,
- detach: detach$5,
- element: element$5,
- empty: empty$2,
- get_all_dirty_from_scope: get_all_dirty_from_scope$2,
- get_slot_changes: get_slot_changes$2,
- group_outros: group_outros$3,
- init: init$5,
- insert: insert$5,
- listen: listen$1,
- safe_not_equal: safe_not_equal$6,
- set_style: set_style$4,
- space: space$4,
- src_url_equal,
- toggle_class: toggle_class$4,
- transition_in: transition_in$4,
- transition_out: transition_out$4,
- update_slot_base: update_slot_base$2
-} = window.__gradio__svelte__internal;
-function create_else_block$2(t) {
- let e, i, n, s, l, h, _ = (
- /*icon*/
- t[7] && create_if_block_2$1(t)
- );
- const c = (
- /*#slots*/
- t[15].default
- ), o = create_slot$2(
- c,
- t,
- /*$$scope*/
- t[14],
- null
- );
- return {
- c() {
- e = element$5("button"), _ && _.c(), i = space$4(), o && o.c(), attr$5(e, "class", n = /*size*/
- t[4] + " " + /*variant*/
- t[3] + " " + /*elem_classes*/
- t[1].join(" ") + " svelte-8huxfn"), attr$5(
- e,
- "id",
- /*elem_id*/
- t[0]
- ), e.disabled = /*disabled*/
- t[8], toggle_class$4(e, "hidden", !/*visible*/
- t[2]), set_style$4(
- e,
- "flex-grow",
- /*scale*/
- t[9]
- ), set_style$4(
- e,
- "width",
- /*scale*/
- t[9] === 0 ? "fit-content" : null
- ), set_style$4(e, "min-width", typeof /*min_width*/
- t[10] == "number" ? `calc(min(${/*min_width*/
- t[10]}px, 100%))` : null);
- },
- m(r, T) {
- insert$5(r, e, T), _ && _.m(e, null), append$5(e, i), o && o.m(e, null), s = !0, l || (h = listen$1(
- e,
- "click",
- /*click_handler*/
- t[16]
- ), l = !0);
- },
- p(r, T) {
- /*icon*/
- r[7] ? _ ? _.p(r, T) : (_ = create_if_block_2$1(r), _.c(), _.m(e, i)) : _ && (_.d(1), _ = null), o && o.p && (!s || T & /*$$scope*/
- 16384) && update_slot_base$2(
- o,
- c,
- r,
- /*$$scope*/
- r[14],
- s ? get_slot_changes$2(
- c,
- /*$$scope*/
- r[14],
- T,
- null
- ) : get_all_dirty_from_scope$2(
- /*$$scope*/
- r[14]
- ),
- null
- ), (!s || T & /*size, variant, elem_classes*/
- 26 && n !== (n = /*size*/
- r[4] + " " + /*variant*/
- r[3] + " " + /*elem_classes*/
- r[1].join(" ") + " svelte-8huxfn")) && attr$5(e, "class", n), (!s || T & /*elem_id*/
- 1) && attr$5(
- e,
- "id",
- /*elem_id*/
- r[0]
- ), (!s || T & /*disabled*/
- 256) && (e.disabled = /*disabled*/
- r[8]), (!s || T & /*size, variant, elem_classes, visible*/
- 30) && toggle_class$4(e, "hidden", !/*visible*/
- r[2]), T & /*scale*/
- 512 && set_style$4(
- e,
- "flex-grow",
- /*scale*/
- r[9]
- ), T & /*scale*/
- 512 && set_style$4(
- e,
- "width",
- /*scale*/
- r[9] === 0 ? "fit-content" : null
- ), T & /*min_width*/
- 1024 && set_style$4(e, "min-width", typeof /*min_width*/
- r[10] == "number" ? `calc(min(${/*min_width*/
- r[10]}px, 100%))` : null);
- },
- i(r) {
- s || (transition_in$4(o, r), s = !0);
- },
- o(r) {
- transition_out$4(o, r), s = !1;
- },
- d(r) {
- r && detach$5(e), _ && _.d(), o && o.d(r), l = !1, h();
- }
- };
-}
-function create_if_block$3(t) {
- let e, i, n, s, l = (
- /*icon*/
- t[7] && create_if_block_1$3(t)
- );
- const h = (
- /*#slots*/
- t[15].default
- ), _ = create_slot$2(
- h,
- t,
- /*$$scope*/
- t[14],
- null
- );
- return {
- c() {
- e = element$5("a"), l && l.c(), i = space$4(), _ && _.c(), attr$5(
- e,
- "href",
- /*link*/
- t[6]
- ), attr$5(e, "rel", "noopener noreferrer"), attr$5(
- e,
- "aria-disabled",
- /*disabled*/
- t[8]
- ), attr$5(e, "class", n = /*size*/
- t[4] + " " + /*variant*/
- t[3] + " " + /*elem_classes*/
- t[1].join(" ") + " svelte-8huxfn"), attr$5(
- e,
- "id",
- /*elem_id*/
- t[0]
- ), toggle_class$4(e, "hidden", !/*visible*/
- t[2]), toggle_class$4(
- e,
- "disabled",
- /*disabled*/
- t[8]
- ), set_style$4(
- e,
- "flex-grow",
- /*scale*/
- t[9]
- ), set_style$4(
- e,
- "pointer-events",
- /*disabled*/
- t[8] ? "none" : null
- ), set_style$4(
- e,
- "width",
- /*scale*/
- t[9] === 0 ? "fit-content" : null
- ), set_style$4(e, "min-width", typeof /*min_width*/
- t[10] == "number" ? `calc(min(${/*min_width*/
- t[10]}px, 100%))` : null);
- },
- m(c, o) {
- insert$5(c, e, o), l && l.m(e, null), append$5(e, i), _ && _.m(e, null), s = !0;
- },
- p(c, o) {
- /*icon*/
- c[7] ? l ? l.p(c, o) : (l = create_if_block_1$3(c), l.c(), l.m(e, i)) : l && (l.d(1), l = null), _ && _.p && (!s || o & /*$$scope*/
- 16384) && update_slot_base$2(
- _,
- h,
- c,
- /*$$scope*/
- c[14],
- s ? get_slot_changes$2(
- h,
- /*$$scope*/
- c[14],
- o,
- null
- ) : get_all_dirty_from_scope$2(
- /*$$scope*/
- c[14]
- ),
- null
- ), (!s || o & /*link*/
- 64) && attr$5(
- e,
- "href",
- /*link*/
- c[6]
- ), (!s || o & /*disabled*/
- 256) && attr$5(
- e,
- "aria-disabled",
- /*disabled*/
- c[8]
- ), (!s || o & /*size, variant, elem_classes*/
- 26 && n !== (n = /*size*/
- c[4] + " " + /*variant*/
- c[3] + " " + /*elem_classes*/
- c[1].join(" ") + " svelte-8huxfn")) && attr$5(e, "class", n), (!s || o & /*elem_id*/
- 1) && attr$5(
- e,
- "id",
- /*elem_id*/
- c[0]
- ), (!s || o & /*size, variant, elem_classes, visible*/
- 30) && toggle_class$4(e, "hidden", !/*visible*/
- c[2]), (!s || o & /*size, variant, elem_classes, disabled*/
- 282) && toggle_class$4(
- e,
- "disabled",
- /*disabled*/
- c[8]
- ), o & /*scale*/
- 512 && set_style$4(
- e,
- "flex-grow",
- /*scale*/
- c[9]
- ), o & /*disabled*/
- 256 && set_style$4(
- e,
- "pointer-events",
- /*disabled*/
- c[8] ? "none" : null
- ), o & /*scale*/
- 512 && set_style$4(
- e,
- "width",
- /*scale*/
- c[9] === 0 ? "fit-content" : null
- ), o & /*min_width*/
- 1024 && set_style$4(e, "min-width", typeof /*min_width*/
- c[10] == "number" ? `calc(min(${/*min_width*/
- c[10]}px, 100%))` : null);
- },
- i(c) {
- s || (transition_in$4(_, c), s = !0);
- },
- o(c) {
- transition_out$4(_, c), s = !1;
- },
- d(c) {
- c && detach$5(e), l && l.d(), _ && _.d(c);
- }
- };
-}
-function create_if_block_2$1(t) {
- let e, i, n;
- return {
- c() {
- e = element$5("img"), attr$5(e, "class", "button-icon svelte-8huxfn"), src_url_equal(e.src, i = /*icon_path*/
- t[11]) || attr$5(e, "src", i), attr$5(e, "alt", n = `${/*value*/
- t[5]} icon`);
- },
- m(s, l) {
- insert$5(s, e, l);
- },
- p(s, l) {
- l & /*icon_path*/
- 2048 && !src_url_equal(e.src, i = /*icon_path*/
- s[11]) && attr$5(e, "src", i), l & /*value*/
- 32 && n !== (n = `${/*value*/
- s[5]} icon`) && attr$5(e, "alt", n);
- },
- d(s) {
- s && detach$5(e);
- }
- };
-}
-function create_if_block_1$3(t) {
- let e, i, n;
- return {
- c() {
- e = element$5("img"), attr$5(e, "class", "button-icon svelte-8huxfn"), src_url_equal(e.src, i = /*icon_path*/
- t[11]) || attr$5(e, "src", i), attr$5(e, "alt", n = `${/*value*/
- t[5]} icon`);
- },
- m(s, l) {
- insert$5(s, e, l);
- },
- p(s, l) {
- l & /*icon_path*/
- 2048 && !src_url_equal(e.src, i = /*icon_path*/
- s[11]) && attr$5(e, "src", i), l & /*value*/
- 32 && n !== (n = `${/*value*/
- s[5]} icon`) && attr$5(e, "alt", n);
- },
- d(s) {
- s && detach$5(e);
- }
- };
-}
-function create_fragment$5(t) {
- let e, i, n, s;
- const l = [create_if_block$3, create_else_block$2], h = [];
- function _(c, o) {
- return (
- /*link*/
- c[6] && /*link*/
- c[6].length > 0 ? 0 : 1
- );
- }
- return e = _(t), i = h[e] = l[e](t), {
- c() {
- i.c(), n = empty$2();
- },
- m(c, o) {
- h[e].m(c, o), insert$5(c, n, o), s = !0;
- },
- p(c, [o]) {
- let r = e;
- e = _(c), e === r ? h[e].p(c, o) : (group_outros$3(), transition_out$4(h[r], 1, 1, () => {
- h[r] = null;
- }), check_outros$3(), i = h[e], i ? i.p(c, o) : (i = h[e] = l[e](c), i.c()), transition_in$4(i, 1), i.m(n.parentNode, n));
- },
- i(c) {
- s || (transition_in$4(i), s = !0);
- },
- o(c) {
- transition_out$4(i), s = !1;
- },
- d(c) {
- c && detach$5(n), h[e].d(c);
- }
- };
-}
-function instance$5(t, e, i) {
- let n, { $$slots: s = {}, $$scope: l } = e, { elem_id: h = "" } = e, { elem_classes: _ = [] } = e, { visible: c = !0 } = e, { variant: o = "secondary" } = e, { size: r = "lg" } = e, { value: T = null } = e, { link: S = null } = e, { icon: w = null } = e, { disabled: C = !1 } = e, { scale: P = null } = e, { min_width: b = void 0 } = e, { root: k = "" } = e, { proxy_url: F = null } = e;
- function x(y) {
- bubble$1.call(this, t, y);
- }
- return t.$$set = (y) => {
- "elem_id" in y && i(0, h = y.elem_id), "elem_classes" in y && i(1, _ = y.elem_classes), "visible" in y && i(2, c = y.visible), "variant" in y && i(3, o = y.variant), "size" in y && i(4, r = y.size), "value" in y && i(5, T = y.value), "link" in y && i(6, S = y.link), "icon" in y && i(7, w = y.icon), "disabled" in y && i(8, C = y.disabled), "scale" in y && i(9, P = y.scale), "min_width" in y && i(10, b = y.min_width), "root" in y && i(12, k = y.root), "proxy_url" in y && i(13, F = y.proxy_url), "$$scope" in y && i(14, l = y.$$scope);
- }, t.$$.update = () => {
- t.$$.dirty & /*icon, root, proxy_url*/
- 12416 && i(11, n = get_fetchable_url_or_file$1(w, k, F));
- }, [
- h,
- _,
- c,
- o,
- r,
- T,
- S,
- w,
- C,
- P,
- b,
- n,
- k,
- F,
- l,
- s,
- x
- ];
-}
-class Button extends SvelteComponent$5 {
- constructor(e) {
- super(), init$5(this, e, instance$5, create_fragment$5, safe_not_equal$6, {
- elem_id: 0,
- elem_classes: 1,
- visible: 2,
- variant: 3,
- size: 4,
- value: 5,
- link: 6,
- icon: 7,
- disabled: 8,
- scale: 9,
- min_width: 10,
- root: 12,
- proxy_url: 13
- });
- }
-}
-function pretty_si(t) {
- let e = ["", "k", "M", "G", "T", "P", "E", "Z"], i = 0;
- for (; t > 1e3 && i < e.length - 1; )
- t /= 1e3, i++;
- let n = e[i];
- return (Number.isInteger(t) ? t : t.toFixed(1)) + n;
-}
-function noop$2() {
-}
-function run(t) {
- return t();
-}
-function run_all$1(t) {
- t.forEach(run);
-}
-function is_function(t) {
- return typeof t == "function";
-}
-function safe_not_equal$5(t, e) {
- return t != t ? e == e : t !== e || t && typeof t == "object" || typeof t == "function";
-}
-function subscribe(t, ...e) {
- if (t == null) {
- for (const n of e)
- n(void 0);
- return noop$2;
- }
- const i = t.subscribe(...e);
- return i.unsubscribe ? () => i.unsubscribe() : i;
-}
-const is_client = typeof window < "u";
-let now = is_client ? () => window.performance.now() : () => Date.now(), raf = is_client ? (t) => requestAnimationFrame(t) : noop$2;
-const tasks = /* @__PURE__ */ new Set();
-function run_tasks(t) {
- tasks.forEach((e) => {
- e.c(t) || (tasks.delete(e), e.f());
- }), tasks.size !== 0 && raf(run_tasks);
-}
-function loop(t) {
- let e;
- return tasks.size === 0 && raf(run_tasks), {
- promise: new Promise((i) => {
- tasks.add(e = { c: t, f: i });
- }),
- abort() {
- tasks.delete(e);
- }
- };
-}
-const subscriber_queue = [];
-function readable(t, e) {
- return {
- subscribe: writable(t, e).subscribe
- };
-}
-function writable(t, e = noop$2) {
- let i;
- const n = /* @__PURE__ */ new Set();
- function s(_) {
- if (safe_not_equal$5(t, _) && (t = _, i)) {
- const c = !subscriber_queue.length;
- for (const o of n)
- o[1](), subscriber_queue.push(o, t);
- if (c) {
- for (let o = 0; o < subscriber_queue.length; o += 2)
- subscriber_queue[o][0](subscriber_queue[o + 1]);
- subscriber_queue.length = 0;
- }
- }
- }
- function l(_) {
- s(_(t));
- }
- function h(_, c = noop$2) {
- const o = [_, c];
- return n.add(o), n.size === 1 && (i = e(s, l) || noop$2), _(t), () => {
- n.delete(o), n.size === 0 && i && (i(), i = null);
- };
- }
- return { set: s, update: l, subscribe: h };
-}
-function derived(t, e, i) {
- const n = !Array.isArray(t), s = n ? [t] : t;
- if (!s.every(Boolean))
- throw new Error("derived() expects stores as input, got a falsy value");
- const l = e.length < 2;
- return readable(i, (h, _) => {
- let c = !1;
- const o = [];
- let r = 0, T = noop$2;
- const S = () => {
- if (r)
- return;
- T();
- const C = e(n ? o[0] : o, h, _);
- l ? h(C) : T = is_function(C) ? C : noop$2;
- }, w = s.map(
- (C, P) => subscribe(
- C,
- (b) => {
- o[P] = b, r &= ~(1 << P), c && S();
- },
- () => {
- r |= 1 << P;
- }
- )
- );
- return c = !0, S(), function() {
- run_all$1(w), T(), c = !1;
- };
- });
-}
-function is_date(t) {
- return Object.prototype.toString.call(t) === "[object Date]";
-}
-function tick_spring(t, e, i, n) {
- if (typeof i == "number" || is_date(i)) {
- const s = n - i, l = (i - e) / (t.dt || 1 / 60), h = t.opts.stiffness * s, _ = t.opts.damping * l, c = (h - _) * t.inv_mass, o = (l + c) * t.dt;
- return Math.abs(o) < t.opts.precision && Math.abs(s) < t.opts.precision ? n : (t.settled = !1, is_date(i) ? new Date(i.getTime() + o) : i + o);
- } else {
- if (Array.isArray(i))
- return i.map(
- (s, l) => tick_spring(t, e[l], i[l], n[l])
- );
- if (typeof i == "object") {
- const s = {};
- for (const l in i)
- s[l] = tick_spring(t, e[l], i[l], n[l]);
- return s;
- } else
- throw new Error(`Cannot spring ${typeof i} values`);
- }
-}
-function spring(t, e = {}) {
- const i = writable(t), { stiffness: n = 0.15, damping: s = 0.8, precision: l = 0.01 } = e;
- let h, _, c, o = t, r = t, T = 1, S = 0, w = !1;
- function C(b, k = {}) {
- r = b;
- const F = c = {};
- return t == null || k.hard || P.stiffness >= 1 && P.damping >= 1 ? (w = !0, h = now(), o = b, i.set(t = r), Promise.resolve()) : (k.soft && (S = 1 / ((k.soft === !0 ? 0.5 : +k.soft) * 60), T = 0), _ || (h = now(), w = !1, _ = loop((x) => {
- if (w)
- return w = !1, _ = null, !1;
- T = Math.min(T + S, 1);
- const y = {
- inv_mass: T,
- opts: P,
- settled: !0,
- dt: (x - h) * 60 / 1e3
- }, p = tick_spring(y, o, t, r);
- return h = x, o = t, i.set(t = p), y.settled && (_ = null), !y.settled;
- })), new Promise((x) => {
- _.promise.then(() => {
- F === c && x();
- });
- }));
- }
- const P = {
- set: C,
- update: (b, k) => C(b(r, t), k),
- subscribe: i.subscribe,
- stiffness: n,
- damping: s,
- precision: l
- };
- return P;
-}
-const Loader_svelte_svelte_type_style_lang = "", {
- SvelteComponent: SvelteComponent$4,
- append: append$4,
- attr: attr$4,
- component_subscribe,
- detach: detach$4,
- element: element$4,
- init: init$4,
- insert: insert$4,
- noop: noop$1,
- safe_not_equal: safe_not_equal$4,
- set_style: set_style$3,
- svg_element,
- toggle_class: toggle_class$3
-} = window.__gradio__svelte__internal, { onMount } = window.__gradio__svelte__internal;
-function create_fragment$4(t) {
- let e, i, n, s, l, h, _, c, o, r, T, S;
- return {
- c() {
- e = element$4("div"), i = svg_element("svg"), n = svg_element("g"), s = svg_element("path"), l = svg_element("path"), h = svg_element("path"), _ = svg_element("path"), c = svg_element("g"), o = svg_element("path"), r = svg_element("path"), T = svg_element("path"), S = svg_element("path"), attr$4(s, "d", "M255.926 0.754768L509.702 139.936V221.027L255.926 81.8465V0.754768Z"), attr$4(s, "fill", "#FF7C00"), attr$4(s, "fill-opacity", "0.4"), attr$4(s, "class", "svelte-43sxxs"), attr$4(l, "d", "M509.69 139.936L254.981 279.641V361.255L509.69 221.55V139.936Z"), attr$4(l, "fill", "#FF7C00"), attr$4(l, "class", "svelte-43sxxs"), attr$4(h, "d", "M0.250138 139.937L254.981 279.641V361.255L0.250138 221.55V139.937Z"), attr$4(h, "fill", "#FF7C00"), attr$4(h, "fill-opacity", "0.4"), attr$4(h, "class", "svelte-43sxxs"), attr$4(_, "d", "M255.923 0.232622L0.236328 139.936V221.55L255.923 81.8469V0.232622Z"), attr$4(_, "fill", "#FF7C00"), attr$4(_, "class", "svelte-43sxxs"), set_style$3(n, "transform", "translate(" + /*$top*/
- t[1][0] + "px, " + /*$top*/
- t[1][1] + "px)"), attr$4(o, "d", "M255.926 141.5L509.702 280.681V361.773L255.926 222.592V141.5Z"), attr$4(o, "fill", "#FF7C00"), attr$4(o, "fill-opacity", "0.4"), attr$4(o, "class", "svelte-43sxxs"), attr$4(r, "d", "M509.69 280.679L254.981 420.384V501.998L509.69 362.293V280.679Z"), attr$4(r, "fill", "#FF7C00"), attr$4(r, "class", "svelte-43sxxs"), attr$4(T, "d", "M0.250138 280.681L254.981 420.386V502L0.250138 362.295V280.681Z"), attr$4(T, "fill", "#FF7C00"), attr$4(T, "fill-opacity", "0.4"), attr$4(T, "class", "svelte-43sxxs"), attr$4(S, "d", "M255.923 140.977L0.236328 280.68V362.294L255.923 222.591V140.977Z"), attr$4(S, "fill", "#FF7C00"), attr$4(S, "class", "svelte-43sxxs"), set_style$3(c, "transform", "translate(" + /*$bottom*/
- t[2][0] + "px, " + /*$bottom*/
- t[2][1] + "px)"), attr$4(i, "viewBox", "-1200 -1200 3000 3000"), attr$4(i, "fill", "none"), attr$4(i, "xmlns", "http://www.w3.org/2000/svg"), attr$4(i, "class", "svelte-43sxxs"), attr$4(e, "class", "svelte-43sxxs"), toggle_class$3(
- e,
- "margin",
- /*margin*/
- t[0]
- );
- },
- m(w, C) {
- insert$4(w, e, C), append$4(e, i), append$4(i, n), append$4(n, s), append$4(n, l), append$4(n, h), append$4(n, _), append$4(i, c), append$4(c, o), append$4(c, r), append$4(c, T), append$4(c, S);
- },
- p(w, [C]) {
- C & /*$top*/
- 2 && set_style$3(n, "transform", "translate(" + /*$top*/
- w[1][0] + "px, " + /*$top*/
- w[1][1] + "px)"), C & /*$bottom*/
- 4 && set_style$3(c, "transform", "translate(" + /*$bottom*/
- w[2][0] + "px, " + /*$bottom*/
- w[2][1] + "px)"), C & /*margin*/
- 1 && toggle_class$3(
- e,
- "margin",
- /*margin*/
- w[0]
- );
- },
- i: noop$1,
- o: noop$1,
- d(w) {
- w && detach$4(e);
- }
- };
-}
-function instance$4(t, e, i) {
- let n, s, { margin: l = !0 } = e;
- const h = spring([0, 0]);
- component_subscribe(t, h, (S) => i(1, n = S));
- const _ = spring([0, 0]);
- component_subscribe(t, _, (S) => i(2, s = S));
- let c;
- async function o() {
- await Promise.all([h.set([125, 140]), _.set([-125, -140])]), await Promise.all([h.set([-125, 140]), _.set([125, -140])]), await Promise.all([h.set([-125, 0]), _.set([125, -0])]), await Promise.all([h.set([125, 0]), _.set([-125, 0])]);
- }
- async function r() {
- await o(), c || r();
- }
- async function T() {
- await Promise.all([h.set([125, 0]), _.set([-125, 0])]), r();
- }
- return onMount(() => (T(), () => c = !0)), t.$$set = (S) => {
- "margin" in S && i(0, l = S.margin);
- }, [l, n, s, h, _];
-}
-class Loader extends SvelteComponent$4 {
- constructor(e) {
- super(), init$4(this, e, instance$4, create_fragment$4, safe_not_equal$4, { margin: 0 });
- }
-}
-const index_svelte_svelte_type_style_lang = "", {
- SvelteComponent: SvelteComponent$3,
- append: append$3,
- attr: attr$3,
- binding_callbacks: binding_callbacks$2,
- check_outros: check_outros$2,
- create_component: create_component$2,
- create_slot: create_slot$1,
- destroy_component: destroy_component$2,
- destroy_each,
- detach: detach$3,
- element: element$3,
- empty: empty$1,
- ensure_array_like,
- get_all_dirty_from_scope: get_all_dirty_from_scope$1,
- get_slot_changes: get_slot_changes$1,
- group_outros: group_outros$2,
- init: init$3,
- insert: insert$3,
- mount_component: mount_component$2,
- noop,
- safe_not_equal: safe_not_equal$3,
- set_data: set_data$1,
- set_style: set_style$2,
- space: space$3,
- text: text$1,
- toggle_class: toggle_class$2,
- transition_in: transition_in$3,
- transition_out: transition_out$3,
- update_slot_base: update_slot_base$1
-} = window.__gradio__svelte__internal, { tick: tick$2 } = window.__gradio__svelte__internal, { onDestroy } = window.__gradio__svelte__internal, get_error_slot_changes = (t) => ({}), get_error_slot_context = (t) => ({});
-function get_each_context(t, e, i) {
- const n = t.slice();
- return n[38] = e[i], n[40] = i, n;
-}
-function get_each_context_1(t, e, i) {
- const n = t.slice();
- return n[38] = e[i], n;
-}
-function create_if_block_17(t) {
- let e, i = (
- /*i18n*/
- t[1]("common.error") + ""
- ), n, s, l;
- const h = (
- /*#slots*/
- t[29].error
- ), _ = create_slot$1(
- h,
- t,
- /*$$scope*/
- t[28],
- get_error_slot_context
- );
- return {
- c() {
- e = element$3("span"), n = text$1(i), s = space$3(), _ && _.c(), attr$3(e, "class", "error svelte-14miwb5");
- },
- m(c, o) {
- insert$3(c, e, o), append$3(e, n), insert$3(c, s, o), _ && _.m(c, o), l = !0;
- },
- p(c, o) {
- (!l || o[0] & /*i18n*/
- 2) && i !== (i = /*i18n*/
- c[1]("common.error") + "") && set_data$1(n, i), _ && _.p && (!l || o[0] & /*$$scope*/
- 268435456) && update_slot_base$1(
- _,
- h,
- c,
- /*$$scope*/
- c[28],
- l ? get_slot_changes$1(
- h,
- /*$$scope*/
- c[28],
- o,
- get_error_slot_changes
- ) : get_all_dirty_from_scope$1(
- /*$$scope*/
- c[28]
- ),
- get_error_slot_context
- );
- },
- i(c) {
- l || (transition_in$3(_, c), l = !0);
- },
- o(c) {
- transition_out$3(_, c), l = !1;
- },
- d(c) {
- c && (detach$3(e), detach$3(s)), _ && _.d(c);
- }
- };
-}
-function create_if_block$2(t) {
- let e, i, n, s, l, h, _, c, o, r = (
- /*variant*/
- t[8] === "default" && /*show_eta_bar*/
- t[18] && /*show_progress*/
- t[6] === "full" && create_if_block_16(t)
- );
- function T(x, y) {
- if (
- /*progress*/
- x[7]
- )
- return create_if_block_11;
- if (
- /*queue_position*/
- x[2] !== null && /*queue_size*/
- x[3] !== void 0 && /*queue_position*/
- x[2] >= 0
- )
- return create_if_block_14;
- if (
- /*queue_position*/
- x[2] === 0
- )
- return create_if_block_15;
- }
- let S = T(t), w = S && S(t), C = (
- /*timer*/
- t[5] && create_if_block_10(t)
- );
- const P = [create_if_block_2, create_if_block_9], b = [];
- function k(x, y) {
- return (
- /*last_progress_level*/
- x[15] != null ? 0 : (
- /*show_progress*/
- x[6] === "full" ? 1 : -1
- )
- );
- }
- ~(l = k(t)) && (h = b[l] = P[l](t));
- let F = !/*timer*/
- t[5] && create_if_block_1$2(t);
- return {
- c() {
- r && r.c(), e = space$3(), i = element$3("div"), w && w.c(), n = space$3(), C && C.c(), s = space$3(), h && h.c(), _ = space$3(), F && F.c(), c = empty$1(), attr$3(i, "class", "progress-text svelte-14miwb5"), toggle_class$2(
- i,
- "meta-text-center",
- /*variant*/
- t[8] === "center"
- ), toggle_class$2(
- i,
- "meta-text",
- /*variant*/
- t[8] === "default"
- );
- },
- m(x, y) {
- r && r.m(x, y), insert$3(x, e, y), insert$3(x, i, y), w && w.m(i, null), append$3(i, n), C && C.m(i, null), insert$3(x, s, y), ~l && b[l].m(x, y), insert$3(x, _, y), F && F.m(x, y), insert$3(x, c, y), o = !0;
- },
- p(x, y) {
- /*variant*/
- x[8] === "default" && /*show_eta_bar*/
- x[18] && /*show_progress*/
- x[6] === "full" ? r ? r.p(x, y) : (r = create_if_block_16(x), r.c(), r.m(e.parentNode, e)) : r && (r.d(1), r = null), S === (S = T(x)) && w ? w.p(x, y) : (w && w.d(1), w = S && S(x), w && (w.c(), w.m(i, n))), /*timer*/
- x[5] ? C ? C.p(x, y) : (C = create_if_block_10(x), C.c(), C.m(i, null)) : C && (C.d(1), C = null), (!o || y[0] & /*variant*/
- 256) && toggle_class$2(
- i,
- "meta-text-center",
- /*variant*/
- x[8] === "center"
- ), (!o || y[0] & /*variant*/
- 256) && toggle_class$2(
- i,
- "meta-text",
- /*variant*/
- x[8] === "default"
- );
- let p = l;
- l = k(x), l === p ? ~l && b[l].p(x, y) : (h && (group_outros$2(), transition_out$3(b[p], 1, 1, () => {
- b[p] = null;
- }), check_outros$2()), ~l ? (h = b[l], h ? h.p(x, y) : (h = b[l] = P[l](x), h.c()), transition_in$3(h, 1), h.m(_.parentNode, _)) : h = null), /*timer*/
- x[5] ? F && (F.d(1), F = null) : F ? F.p(x, y) : (F = create_if_block_1$2(x), F.c(), F.m(c.parentNode, c));
- },
- i(x) {
- o || (transition_in$3(h), o = !0);
- },
- o(x) {
- transition_out$3(h), o = !1;
- },
- d(x) {
- x && (detach$3(e), detach$3(i), detach$3(s), detach$3(_), detach$3(c)), r && r.d(x), w && w.d(), C && C.d(), ~l && b[l].d(x), F && F.d(x);
- }
- };
-}
-function create_if_block_16(t) {
- let e, i = `translateX(${/*eta_level*/
- (t[17] || 0) * 100 - 100}%)`;
- return {
- c() {
- e = element$3("div"), attr$3(e, "class", "eta-bar svelte-14miwb5"), set_style$2(e, "transform", i);
- },
- m(n, s) {
- insert$3(n, e, s);
- },
- p(n, s) {
- s[0] & /*eta_level*/
- 131072 && i !== (i = `translateX(${/*eta_level*/
- (n[17] || 0) * 100 - 100}%)`) && set_style$2(e, "transform", i);
- },
- d(n) {
- n && detach$3(e);
- }
- };
-}
-function create_if_block_15(t) {
- let e;
- return {
- c() {
- e = text$1("processing |");
- },
- m(i, n) {
- insert$3(i, e, n);
- },
- p: noop,
- d(i) {
- i && detach$3(e);
- }
- };
-}
-function create_if_block_14(t) {
- let e, i = (
- /*queue_position*/
- t[2] + 1 + ""
- ), n, s, l, h;
- return {
- c() {
- e = text$1("queue: "), n = text$1(i), s = text$1("/"), l = text$1(
- /*queue_size*/
- t[3]
- ), h = text$1(" |");
- },
- m(_, c) {
- insert$3(_, e, c), insert$3(_, n, c), insert$3(_, s, c), insert$3(_, l, c), insert$3(_, h, c);
- },
- p(_, c) {
- c[0] & /*queue_position*/
- 4 && i !== (i = /*queue_position*/
- _[2] + 1 + "") && set_data$1(n, i), c[0] & /*queue_size*/
- 8 && set_data$1(
- l,
- /*queue_size*/
- _[3]
- );
- },
- d(_) {
- _ && (detach$3(e), detach$3(n), detach$3(s), detach$3(l), detach$3(h));
- }
- };
-}
-function create_if_block_11(t) {
- let e, i = ensure_array_like(
- /*progress*/
- t[7]
- ), n = [];
- for (let s = 0; s < i.length; s += 1)
- n[s] = create_each_block_1(get_each_context_1(t, i, s));
- return {
- c() {
- for (let s = 0; s < n.length; s += 1)
- n[s].c();
- e = empty$1();
- },
- m(s, l) {
- for (let h = 0; h < n.length; h += 1)
- n[h] && n[h].m(s, l);
- insert$3(s, e, l);
- },
- p(s, l) {
- if (l[0] & /*progress*/
- 128) {
- i = ensure_array_like(
- /*progress*/
- s[7]
- );
- let h;
- for (h = 0; h < i.length; h += 1) {
- const _ = get_each_context_1(s, i, h);
- n[h] ? n[h].p(_, l) : (n[h] = create_each_block_1(_), n[h].c(), n[h].m(e.parentNode, e));
- }
- for (; h < n.length; h += 1)
- n[h].d(1);
- n.length = i.length;
- }
- },
- d(s) {
- s && detach$3(e), destroy_each(n, s);
- }
- };
-}
-function create_if_block_12(t) {
- let e, i = (
- /*p*/
- t[38].unit + ""
- ), n, s, l = " ", h;
- function _(r, T) {
- return (
- /*p*/
- r[38].length != null ? create_if_block_13 : create_else_block$1
- );
- }
- let c = _(t), o = c(t);
- return {
- c() {
- o.c(), e = space$3(), n = text$1(i), s = text$1(" | "), h = text$1(l);
- },
- m(r, T) {
- o.m(r, T), insert$3(r, e, T), insert$3(r, n, T), insert$3(r, s, T), insert$3(r, h, T);
- },
- p(r, T) {
- c === (c = _(r)) && o ? o.p(r, T) : (o.d(1), o = c(r), o && (o.c(), o.m(e.parentNode, e))), T[0] & /*progress*/
- 128 && i !== (i = /*p*/
- r[38].unit + "") && set_data$1(n, i);
- },
- d(r) {
- r && (detach$3(e), detach$3(n), detach$3(s), detach$3(h)), o.d(r);
- }
- };
-}
-function create_else_block$1(t) {
- let e = pretty_si(
- /*p*/
- t[38].index || 0
- ) + "", i;
- return {
- c() {
- i = text$1(e);
- },
- m(n, s) {
- insert$3(n, i, s);
- },
- p(n, s) {
- s[0] & /*progress*/
- 128 && e !== (e = pretty_si(
- /*p*/
- n[38].index || 0
- ) + "") && set_data$1(i, e);
- },
- d(n) {
- n && detach$3(i);
- }
- };
-}
-function create_if_block_13(t) {
- let e = pretty_si(
- /*p*/
- t[38].index || 0
- ) + "", i, n, s = pretty_si(
- /*p*/
- t[38].length
- ) + "", l;
- return {
- c() {
- i = text$1(e), n = text$1("/"), l = text$1(s);
- },
- m(h, _) {
- insert$3(h, i, _), insert$3(h, n, _), insert$3(h, l, _);
- },
- p(h, _) {
- _[0] & /*progress*/
- 128 && e !== (e = pretty_si(
- /*p*/
- h[38].index || 0
- ) + "") && set_data$1(i, e), _[0] & /*progress*/
- 128 && s !== (s = pretty_si(
- /*p*/
- h[38].length
- ) + "") && set_data$1(l, s);
- },
- d(h) {
- h && (detach$3(i), detach$3(n), detach$3(l));
- }
- };
-}
-function create_each_block_1(t) {
- let e, i = (
- /*p*/
- t[38].index != null && create_if_block_12(t)
- );
- return {
- c() {
- i && i.c(), e = empty$1();
- },
- m(n, s) {
- i && i.m(n, s), insert$3(n, e, s);
- },
- p(n, s) {
- /*p*/
- n[38].index != null ? i ? i.p(n, s) : (i = create_if_block_12(n), i.c(), i.m(e.parentNode, e)) : i && (i.d(1), i = null);
- },
- d(n) {
- n && detach$3(e), i && i.d(n);
- }
- };
-}
-function create_if_block_10(t) {
- let e, i = (
- /*eta*/
- t[0] ? `/${/*formatted_eta*/
- t[19]}` : ""
- ), n, s;
- return {
- c() {
- e = text$1(
- /*formatted_timer*/
- t[20]
- ), n = text$1(i), s = text$1("s");
- },
- m(l, h) {
- insert$3(l, e, h), insert$3(l, n, h), insert$3(l, s, h);
- },
- p(l, h) {
- h[0] & /*formatted_timer*/
- 1048576 && set_data$1(
- e,
- /*formatted_timer*/
- l[20]
- ), h[0] & /*eta, formatted_eta*/
- 524289 && i !== (i = /*eta*/
- l[0] ? `/${/*formatted_eta*/
- l[19]}` : "") && set_data$1(n, i);
- },
- d(l) {
- l && (detach$3(e), detach$3(n), detach$3(s));
- }
- };
-}
-function create_if_block_9(t) {
- let e, i;
- return e = new Loader({
- props: { margin: (
- /*variant*/
- t[8] === "default"
- ) }
- }), {
- c() {
- create_component$2(e.$$.fragment);
- },
- m(n, s) {
- mount_component$2(e, n, s), i = !0;
- },
- p(n, s) {
- const l = {};
- s[0] & /*variant*/
- 256 && (l.margin = /*variant*/
- n[8] === "default"), e.$set(l);
- },
- i(n) {
- i || (transition_in$3(e.$$.fragment, n), i = !0);
- },
- o(n) {
- transition_out$3(e.$$.fragment, n), i = !1;
- },
- d(n) {
- destroy_component$2(e, n);
- }
- };
-}
-function create_if_block_2(t) {
- let e, i, n, s, l, h = `${/*last_progress_level*/
- t[15] * 100}%`, _ = (
- /*progress*/
- t[7] != null && create_if_block_3(t)
- );
- return {
- c() {
- e = element$3("div"), i = element$3("div"), _ && _.c(), n = space$3(), s = element$3("div"), l = element$3("div"), attr$3(i, "class", "progress-level-inner svelte-14miwb5"), attr$3(l, "class", "progress-bar svelte-14miwb5"), set_style$2(l, "width", h), attr$3(s, "class", "progress-bar-wrap svelte-14miwb5"), attr$3(e, "class", "progress-level svelte-14miwb5");
- },
- m(c, o) {
- insert$3(c, e, o), append$3(e, i), _ && _.m(i, null), append$3(e, n), append$3(e, s), append$3(s, l), t[30](l);
- },
- p(c, o) {
- /*progress*/
- c[7] != null ? _ ? _.p(c, o) : (_ = create_if_block_3(c), _.c(), _.m(i, null)) : _ && (_.d(1), _ = null), o[0] & /*last_progress_level*/
- 32768 && h !== (h = `${/*last_progress_level*/
- c[15] * 100}%`) && set_style$2(l, "width", h);
- },
- i: noop,
- o: noop,
- d(c) {
- c && detach$3(e), _ && _.d(), t[30](null);
- }
- };
-}
-function create_if_block_3(t) {
- let e, i = ensure_array_like(
- /*progress*/
- t[7]
- ), n = [];
- for (let s = 0; s < i.length; s += 1)
- n[s] = create_each_block(get_each_context(t, i, s));
- return {
- c() {
- for (let s = 0; s < n.length; s += 1)
- n[s].c();
- e = empty$1();
- },
- m(s, l) {
- for (let h = 0; h < n.length; h += 1)
- n[h] && n[h].m(s, l);
- insert$3(s, e, l);
- },
- p(s, l) {
- if (l[0] & /*progress_level, progress*/
- 16512) {
- i = ensure_array_like(
- /*progress*/
- s[7]
- );
- let h;
- for (h = 0; h < i.length; h += 1) {
- const _ = get_each_context(s, i, h);
- n[h] ? n[h].p(_, l) : (n[h] = create_each_block(_), n[h].c(), n[h].m(e.parentNode, e));
- }
- for (; h < n.length; h += 1)
- n[h].d(1);
- n.length = i.length;
- }
- },
- d(s) {
- s && detach$3(e), destroy_each(n, s);
- }
- };
-}
-function create_if_block_4(t) {
- let e, i, n, s, l = (
- /*i*/
- t[40] !== 0 && create_if_block_8()
- ), h = (
- /*p*/
- t[38].desc != null && create_if_block_7(t)
- ), _ = (
- /*p*/
- t[38].desc != null && /*progress_level*/
- t[14] && /*progress_level*/
- t[14][
- /*i*/
- t[40]
- ] != null && create_if_block_6()
- ), c = (
- /*progress_level*/
- t[14] != null && create_if_block_5(t)
- );
- return {
- c() {
- l && l.c(), e = space$3(), h && h.c(), i = space$3(), _ && _.c(), n = space$3(), c && c.c(), s = empty$1();
- },
- m(o, r) {
- l && l.m(o, r), insert$3(o, e, r), h && h.m(o, r), insert$3(o, i, r), _ && _.m(o, r), insert$3(o, n, r), c && c.m(o, r), insert$3(o, s, r);
- },
- p(o, r) {
- /*p*/
- o[38].desc != null ? h ? h.p(o, r) : (h = create_if_block_7(o), h.c(), h.m(i.parentNode, i)) : h && (h.d(1), h = null), /*p*/
- o[38].desc != null && /*progress_level*/
- o[14] && /*progress_level*/
- o[14][
- /*i*/
- o[40]
- ] != null ? _ || (_ = create_if_block_6(), _.c(), _.m(n.parentNode, n)) : _ && (_.d(1), _ = null), /*progress_level*/
- o[14] != null ? c ? c.p(o, r) : (c = create_if_block_5(o), c.c(), c.m(s.parentNode, s)) : c && (c.d(1), c = null);
- },
- d(o) {
- o && (detach$3(e), detach$3(i), detach$3(n), detach$3(s)), l && l.d(o), h && h.d(o), _ && _.d(o), c && c.d(o);
- }
- };
-}
-function create_if_block_8(t) {
- let e;
- return {
- c() {
- e = text$1(" /");
- },
- m(i, n) {
- insert$3(i, e, n);
- },
- d(i) {
- i && detach$3(e);
- }
- };
-}
-function create_if_block_7(t) {
- let e = (
- /*p*/
- t[38].desc + ""
- ), i;
- return {
- c() {
- i = text$1(e);
- },
- m(n, s) {
- insert$3(n, i, s);
- },
- p(n, s) {
- s[0] & /*progress*/
- 128 && e !== (e = /*p*/
- n[38].desc + "") && set_data$1(i, e);
- },
- d(n) {
- n && detach$3(i);
- }
- };
-}
-function create_if_block_6(t) {
- let e;
- return {
- c() {
- e = text$1("-");
- },
- m(i, n) {
- insert$3(i, e, n);
- },
- d(i) {
- i && detach$3(e);
- }
- };
-}
-function create_if_block_5(t) {
- let e = (100 * /*progress_level*/
- (t[14][
- /*i*/
- t[40]
- ] || 0)).toFixed(1) + "", i, n;
- return {
- c() {
- i = text$1(e), n = text$1("%");
- },
- m(s, l) {
- insert$3(s, i, l), insert$3(s, n, l);
- },
- p(s, l) {
- l[0] & /*progress_level*/
- 16384 && e !== (e = (100 * /*progress_level*/
- (s[14][
- /*i*/
- s[40]
- ] || 0)).toFixed(1) + "") && set_data$1(i, e);
- },
- d(s) {
- s && (detach$3(i), detach$3(n));
- }
- };
-}
-function create_each_block(t) {
- let e, i = (
- /*p*/
- (t[38].desc != null || /*progress_level*/
- t[14] && /*progress_level*/
- t[14][
- /*i*/
- t[40]
- ] != null) && create_if_block_4(t)
- );
- return {
- c() {
- i && i.c(), e = empty$1();
- },
- m(n, s) {
- i && i.m(n, s), insert$3(n, e, s);
- },
- p(n, s) {
- /*p*/
- n[38].desc != null || /*progress_level*/
- n[14] && /*progress_level*/
- n[14][
- /*i*/
- n[40]
- ] != null ? i ? i.p(n, s) : (i = create_if_block_4(n), i.c(), i.m(e.parentNode, e)) : i && (i.d(1), i = null);
- },
- d(n) {
- n && detach$3(e), i && i.d(n);
- }
- };
-}
-function create_if_block_1$2(t) {
- let e, i;
- return {
- c() {
- e = element$3("p"), i = text$1(
- /*loading_text*/
- t[9]
- ), attr$3(e, "class", "loading svelte-14miwb5");
- },
- m(n, s) {
- insert$3(n, e, s), append$3(e, i);
- },
- p(n, s) {
- s[0] & /*loading_text*/
- 512 && set_data$1(
- i,
- /*loading_text*/
- n[9]
- );
- },
- d(n) {
- n && detach$3(e);
- }
- };
-}
-function create_fragment$3(t) {
- let e, i, n, s, l;
- const h = [create_if_block$2, create_if_block_17], _ = [];
- function c(o, r) {
- return (
- /*status*/
- o[4] === "pending" ? 0 : (
- /*status*/
- o[4] === "error" ? 1 : -1
- )
- );
- }
- return ~(i = c(t)) && (n = _[i] = h[i](t)), {
- c() {
- e = element$3("div"), n && n.c(), attr$3(e, "class", s = "wrap " + /*variant*/
- t[8] + " " + /*show_progress*/
- t[6] + " svelte-14miwb5"), toggle_class$2(e, "hide", !/*status*/
- t[4] || /*status*/
- t[4] === "complete" || /*show_progress*/
- t[6] === "hidden"), toggle_class$2(
- e,
- "translucent",
- /*variant*/
- t[8] === "center" && /*status*/
- (t[4] === "pending" || /*status*/
- t[4] === "error") || /*translucent*/
- t[11] || /*show_progress*/
- t[6] === "minimal"
- ), toggle_class$2(
- e,
- "generating",
- /*status*/
- t[4] === "generating"
- ), toggle_class$2(
- e,
- "border",
- /*border*/
- t[12]
- ), set_style$2(
- e,
- "position",
- /*absolute*/
- t[10] ? "absolute" : "static"
- ), set_style$2(
- e,
- "padding",
- /*absolute*/
- t[10] ? "0" : "var(--size-8) 0"
- );
- },
- m(o, r) {
- insert$3(o, e, r), ~i && _[i].m(e, null), t[31](e), l = !0;
- },
- p(o, r) {
- let T = i;
- i = c(o), i === T ? ~i && _[i].p(o, r) : (n && (group_outros$2(), transition_out$3(_[T], 1, 1, () => {
- _[T] = null;
- }), check_outros$2()), ~i ? (n = _[i], n ? n.p(o, r) : (n = _[i] = h[i](o), n.c()), transition_in$3(n, 1), n.m(e, null)) : n = null), (!l || r[0] & /*variant, show_progress*/
- 320 && s !== (s = "wrap " + /*variant*/
- o[8] + " " + /*show_progress*/
- o[6] + " svelte-14miwb5")) && attr$3(e, "class", s), (!l || r[0] & /*variant, show_progress, status, show_progress*/
- 336) && toggle_class$2(e, "hide", !/*status*/
- o[4] || /*status*/
- o[4] === "complete" || /*show_progress*/
- o[6] === "hidden"), (!l || r[0] & /*variant, show_progress, variant, status, translucent, show_progress*/
- 2384) && toggle_class$2(
- e,
- "translucent",
- /*variant*/
- o[8] === "center" && /*status*/
- (o[4] === "pending" || /*status*/
- o[4] === "error") || /*translucent*/
- o[11] || /*show_progress*/
- o[6] === "minimal"
- ), (!l || r[0] & /*variant, show_progress, status*/
- 336) && toggle_class$2(
- e,
- "generating",
- /*status*/
- o[4] === "generating"
- ), (!l || r[0] & /*variant, show_progress, border*/
- 4416) && toggle_class$2(
- e,
- "border",
- /*border*/
- o[12]
- ), r[0] & /*absolute*/
- 1024 && set_style$2(
- e,
- "position",
- /*absolute*/
- o[10] ? "absolute" : "static"
- ), r[0] & /*absolute*/
- 1024 && set_style$2(
- e,
- "padding",
- /*absolute*/
- o[10] ? "0" : "var(--size-8) 0"
- );
- },
- i(o) {
- l || (transition_in$3(n), l = !0);
- },
- o(o) {
- transition_out$3(n), l = !1;
- },
- d(o) {
- o && detach$3(e), ~i && _[i].d(), t[31](null);
- }
- };
-}
-let items = [], called = !1;
-async function scroll_into_view(t, e = !0) {
- if (!(window.__gradio_mode__ === "website" || window.__gradio_mode__ !== "app" && e !== !0)) {
- if (items.push(t), !called)
- called = !0;
- else
- return;
- await tick$2(), requestAnimationFrame(() => {
- let i = [0, 0];
- for (let n = 0; n < items.length; n++) {
- const l = items[n].getBoundingClientRect();
- (n === 0 || l.top + window.scrollY <= i[0]) && (i[0] = l.top + window.scrollY, i[1] = n);
- }
- window.scrollTo({ top: i[0] - 20, behavior: "smooth" }), called = !1, items = [];
- });
- }
-}
-function instance$3(t, e, i) {
- let n, { $$slots: s = {}, $$scope: l } = e, { i18n: h } = e, { eta: _ = null } = e, { queue: c = !1 } = e, { queue_position: o } = e, { queue_size: r } = e, { status: T } = e, { scroll_to_output: S = !1 } = e, { timer: w = !0 } = e, { show_progress: C = "full" } = e, { message: P = null } = e, { progress: b = null } = e, { variant: k = "default" } = e, { loading_text: F = "Loading..." } = e, { absolute: x = !0 } = e, { translucent: y = !1 } = e, { border: p = !1 } = e, { autoscroll: E } = e, $, M = !1, m = 0, N = 0, D = null, X = 0, G = null, I, B = null, ee = !0;
- const Y = () => {
- i(25, m = performance.now()), i(26, N = 0), M = !0, q();
- };
- function q() {
- requestAnimationFrame(() => {
- i(26, N = (performance.now() - m) / 1e3), M && q();
- });
- }
- function le() {
- i(26, N = 0), M && (M = !1);
- }
- onDestroy(() => {
- M && le();
- });
- let pe = null;
- function we(R) {
- binding_callbacks$2[R ? "unshift" : "push"](() => {
- B = R, i(16, B), i(7, b), i(14, G), i(15, I);
- });
- }
- function be(R) {
- binding_callbacks$2[R ? "unshift" : "push"](() => {
- $ = R, i(13, $);
- });
- }
- return t.$$set = (R) => {
- "i18n" in R && i(1, h = R.i18n), "eta" in R && i(0, _ = R.eta), "queue" in R && i(21, c = R.queue), "queue_position" in R && i(2, o = R.queue_position), "queue_size" in R && i(3, r = R.queue_size), "status" in R && i(4, T = R.status), "scroll_to_output" in R && i(22, S = R.scroll_to_output), "timer" in R && i(5, w = R.timer), "show_progress" in R && i(6, C = R.show_progress), "message" in R && i(23, P = R.message), "progress" in R && i(7, b = R.progress), "variant" in R && i(8, k = R.variant), "loading_text" in R && i(9, F = R.loading_text), "absolute" in R && i(10, x = R.absolute), "translucent" in R && i(11, y = R.translucent), "border" in R && i(12, p = R.border), "autoscroll" in R && i(24, E = R.autoscroll), "$$scope" in R && i(28, l = R.$$scope);
- }, t.$$.update = () => {
- t.$$.dirty[0] & /*eta, old_eta, queue, timer_start*/
- 169869313 && (_ === null ? i(0, _ = D) : c && i(0, _ = (performance.now() - m) / 1e3 + _), _ != null && (i(19, pe = _.toFixed(1)), i(27, D = _))), t.$$.dirty[0] & /*eta, timer_diff*/
- 67108865 && i(17, X = _ === null || _ <= 0 || !N ? null : Math.min(N / _, 1)), t.$$.dirty[0] & /*progress*/
- 128 && b != null && i(18, ee = !1), t.$$.dirty[0] & /*progress, progress_level, progress_bar, last_progress_level*/
- 114816 && (b != null ? i(14, G = b.map((R) => {
- if (R.index != null && R.length != null)
- return R.index / R.length;
- if (R.progress != null)
- return R.progress;
- })) : i(14, G = null), G ? (i(15, I = G[G.length - 1]), B && (I === 0 ? i(16, B.style.transition = "0", B) : i(16, B.style.transition = "150ms", B))) : i(15, I = void 0)), t.$$.dirty[0] & /*status*/
- 16 && (T === "pending" ? Y() : le()), t.$$.dirty[0] & /*el, scroll_to_output, status, autoscroll*/
- 20979728 && $ && S && (T === "pending" || T === "complete") && scroll_into_view($, E), t.$$.dirty[0] & /*status, message*/
- 8388624, t.$$.dirty[0] & /*timer_diff*/
- 67108864 && i(20, n = N.toFixed(1));
- }, [
- _,
- h,
- o,
- r,
- T,
- w,
- C,
- b,
- k,
- F,
- x,
- y,
- p,
- $,
- G,
- I,
- B,
- X,
- ee,
- pe,
- n,
- c,
- S,
- P,
- E,
- m,
- N,
- D,
- l,
- s,
- we,
- be
- ];
-}
-class Static extends SvelteComponent$3 {
- constructor(e) {
- super(), init$3(
- this,
- e,
- instance$3,
- create_fragment$3,
- safe_not_equal$3,
- {
- i18n: 1,
- eta: 0,
- queue: 21,
- queue_position: 2,
- queue_size: 3,
- status: 4,
- scroll_to_output: 22,
- timer: 5,
- show_progress: 6,
- message: 23,
- progress: 7,
- variant: 8,
- loading_text: 9,
- absolute: 10,
- translucent: 11,
- border: 12,
- autoscroll: 24
- },
- null,
- [-1, -1]
- );
- }
-}
-const ToastContent_svelte_svelte_type_style_lang = "", Toast_svelte_svelte_type_style_lang = "";
-var fn = new Intl.Collator(0, { numeric: 1 }).compare;
-function semiver(t, e, i) {
- return t = t.split("."), e = e.split("."), fn(t[0], e[0]) || fn(t[1], e[1]) || (e[2] = e.slice(2).join("."), i = /[.-]/.test(t[2] = t.slice(2).join(".")), i == /[.-]/.test(e[2]) ? fn(t[2], e[2]) : i ? -1 : 1);
-}
-function resolve_root(t, e, i) {
- return e.startsWith("http://") || e.startsWith("https://") ? i ? t : e : t + e;
-}
-function determine_protocol(t) {
- if (t.startsWith("http")) {
- const { protocol: e, host: i } = new URL(t);
- return i.endsWith("hf.space") ? {
- ws_protocol: "wss",
- host: i,
- http_protocol: e
- } : {
- ws_protocol: e === "https:" ? "wss" : "ws",
- http_protocol: e,
- host: i
- };
- } else if (t.startsWith("file:"))
- return {
- ws_protocol: "ws",
- http_protocol: "http:",
- host: "lite.local"
- // Special fake hostname only used for this case. This matches the hostname allowed in `is_self_host()` in `js/wasm/network/host.ts`.
- };
- return {
- ws_protocol: "wss",
- http_protocol: "https:",
- host: t
- };
-}
-const RE_SPACE_NAME = /^[^\/]*\/[^\/]*$/, RE_SPACE_DOMAIN = /.*hf\.space\/{0,1}$/;
-async function process_endpoint(t, e) {
- const i = {};
- e && (i.Authorization = `Bearer ${e}`);
- const n = t.trim();
- if (RE_SPACE_NAME.test(n))
- try {
- const s = await fetch(
- `https://huggingface.co/api/spaces/${n}/host`,
- { headers: i }
- );
- if (s.status !== 200)
- throw new Error("Space metadata could not be loaded.");
- const l = (await s.json()).host;
- return {
- space_id: t,
- ...determine_protocol(l)
- };
- } catch (s) {
- throw new Error("Space metadata could not be loaded." + s.message);
- }
- if (RE_SPACE_DOMAIN.test(n)) {
- const { ws_protocol: s, http_protocol: l, host: h } = determine_protocol(n);
- return {
- space_id: h.replace(".hf.space", ""),
- ws_protocol: s,
- http_protocol: l,
- host: h
- };
- }
- return {
- space_id: !1,
- ...determine_protocol(n)
- };
-}
-function map_names_to_ids(t) {
- let e = {};
- return t.forEach(({ api_name: i }, n) => {
- i && (e[i] = n);
- }), e;
-}
-const RE_DISABLED_DISCUSSION = /^(?=[^]*\b[dD]iscussions{0,1}\b)(?=[^]*\b[dD]isabled\b)[^]*$/;
-async function discussions_enabled(t) {
- try {
- const i = (await fetch(
- `https://huggingface.co/api/spaces/${t}/discussions`,
- {
- method: "HEAD"
- }
- )).headers.get("x-error-message");
- return !(i && RE_DISABLED_DISCUSSION.test(i));
- } catch {
- return !1;
- }
-}
-function normalise_file(t, e, i) {
- if (t == null)
- return null;
- if (Array.isArray(t)) {
- const n = [];
- for (const s of t)
- s == null ? n.push(null) : n.push(normalise_file(s, e, i));
- return n;
- }
- return t.is_stream ? i == null ? new FileData({
- ...t,
- url: e + "/stream/" + t.path
- }) : new FileData({
- ...t,
- url: "/proxy=" + i + "stream/" + t.path
- }) : new FileData({
- ...t,
- url: get_fetchable_url_or_file(t.path, e, i)
- });
-}
-function is_url(t) {
- try {
- const e = new URL(t);
- return e.protocol === "http:" || e.protocol === "https:";
- } catch {
- return !1;
- }
-}
-function get_fetchable_url_or_file(t, e, i) {
- return t == null ? i ? `/proxy=${i}file=` : `${e}/file=` : is_url(t) ? t : i ? `/proxy=${i}file=${t}` : `${e}/file=${t}`;
-}
-async function upload(t, e, i = upload_files) {
- let n = (Array.isArray(t) ? t : [t]).map(
- (s) => s.blob
- );
- return await Promise.all(
- await i(e, n).then(
- async (s) => {
- if (s.error)
- throw new Error(s.error);
- return s.files ? s.files.map((l, h) => {
- const _ = new FileData({ ...t[h], path: l });
- return normalise_file(_, e, null);
- }) : [];
- }
- )
- );
-}
-async function prepare_files(t, e) {
- return t.map(
- (i, n) => new FileData({
- path: i.name,
- orig_name: i.name,
- blob: i,
- size: i.size,
- mime_type: i.type,
- is_stream: e
- })
- );
-}
-class FileData {
- constructor({
- path: e,
- url: i,
- orig_name: n,
- size: s,
- blob: l,
- is_stream: h,
- mime_type: _,
- alt_text: c
- }) {
- this.path = e, this.url = i, this.orig_name = n, this.size = s, this.blob = i ? void 0 : l, this.is_stream = h, this.mime_type = _, this.alt_text = c;
- }
-}
-const QUEUE_FULL_MSG = "This application is too busy. Keep trying!", BROKEN_CONNECTION_MSG = "Connection errored out.";
-let NodeBlob;
-function api_factory(t, e) {
- return { post_data: i, upload_files: n, client: s, handle_blob: l };
- async function i(h, _, c) {
- const o = { "Content-Type": "application/json" };
- c && (o.Authorization = `Bearer ${c}`);
- try {
- var r = await t(h, {
- method: "POST",
- body: JSON.stringify(_),
- headers: o
- });
- } catch {
- return [{ error: BROKEN_CONNECTION_MSG }, 500];
- }
- return [await r.json(), r.status];
- }
- async function n(h, _, c) {
- const o = {};
- c && (o.Authorization = `Bearer ${c}`);
- const r = 1e3, T = [];
- for (let w = 0; w < _.length; w += r) {
- const C = _.slice(w, w + r), P = new FormData();
- C.forEach((k) => {
- P.append("files", k);
- });
- try {
- var S = await t(`${h}/upload`, {
- method: "POST",
- body: P,
- headers: o
- });
- } catch {
- return { error: BROKEN_CONNECTION_MSG };
- }
- const b = await S.json();
- T.push(...b);
- }
- return { files: T };
- }
- async function s(h, _ = { normalise_files: !0 }) {
- return new Promise(async (c) => {
- const { status_callback: o, hf_token: r, normalise_files: T } = _, S = {
- predict: N,
- submit: D,
- view_api: G,
- component_server: X
- }, w = T ?? !0;
- if ((typeof window > "u" || !("WebSocket" in window)) && !global.Websocket) {
- const I = await import("./wrapper-98f94c21-523a3923.js");
- NodeBlob = (await Promise.resolve().then(() => __viteBrowserExternal)).Blob, global.WebSocket = I.WebSocket;
- }
- const { ws_protocol: C, http_protocol: P, host: b, space_id: k } = await process_endpoint(h, r), F = Math.random().toString(36).substring(2), x = {};
- let y, p = {}, E = !1;
- r && k && (E = await get_jwt(k, r));
- async function $(I) {
- if (y = I, p = map_names_to_ids((I == null ? void 0 : I.dependencies) || []), y.auth_required)
- return {
- config: y,
- ...S
- };
- try {
- M = await G(y);
- } catch (B) {
- console.error(`Could not get api details: ${B.message}`);
- }
- return {
- config: y,
- ...S
- };
- }
- let M;
- async function m(I) {
- if (o && o(I), I.status === "running")
- try {
- y = await resolve_config(
- t,
- `${P}//${b}`,
- r
- );
- const B = await $(y);
- c(B);
- } catch (B) {
- console.error(B), o && o({
- status: "error",
- message: "Could not load this space.",
- load_status: "error",
- detail: "NOT_FOUND"
- });
- }
- }
- try {
- y = await resolve_config(
- t,
- `${P}//${b}`,
- r
- );
- const I = await $(y);
- c(I);
- } catch (I) {
- console.error(I), k ? check_space_status(
- k,
- RE_SPACE_NAME.test(k) ? "space_name" : "subdomain",
- m
- ) : o && o({
- status: "error",
- message: "Could not load this space.",
- load_status: "error",
- detail: "NOT_FOUND"
- });
- }
- function N(I, B, ee) {
- let Y = !1, q = !1, le;
- if (typeof I == "number")
- le = y.dependencies[I];
- else {
- const pe = I.replace(/^\//, "");
- le = y.dependencies[p[pe]];
- }
- if (le.types.continuous)
- throw new Error(
- "Cannot call predict on this function as it may run forever. Use submit instead"
- );
- return new Promise((pe, we) => {
- const be = D(I, B, ee);
- let R;
- be.on("data", (d) => {
- q && (be.destroy(), pe(d)), Y = !0, R = d;
- }).on("status", (d) => {
- d.stage === "error" && we(d), d.stage === "complete" && (q = !0, Y && (be.destroy(), pe(R)));
- });
- });
- }
- function D(I, B, ee) {
- let Y, q;
- if (typeof I == "number")
- Y = I, q = M.unnamed_endpoints[Y];
- else {
- const Q = I.replace(/^\//, "");
- Y = p[Q], q = M.named_endpoints[I.trim()];
- }
- if (typeof Y != "number")
- throw new Error(
- "There is no endpoint matching that name of fn_index matching that number."
- );
- let le, pe, we = y.protocol ?? "sse";
- const be = typeof I == "number" ? "/predict" : I;
- let R, d = null, g = !1;
- const f = {};
- let v = "";
- typeof window < "u" && (v = new URLSearchParams(window.location.search).toString()), l(
- `${P}//${resolve_root(b, y.path, !0)}`,
- B,
- q,
- r
- ).then((Q) => {
- if (R = { data: Q || [], event_data: ee, fn_index: Y }, skip_queue(Y, y))
- A({
- type: "status",
- endpoint: be,
- stage: "pending",
- queue: !1,
- fn_index: Y,
- time: /* @__PURE__ */ new Date()
- }), i(
- `${P}//${resolve_root(b, y.path, !0)}/run${be.startsWith("/") ? be : `/${be}`}${v ? "?" + v : ""}`,
- {
- ...R,
- session_hash: F
- },
- r
- ).then(([ue, me]) => {
- const fe = w ? transform_output(
- ue.data,
- q,
- y.root,
- y.root_url
- ) : ue.data;
- me == 200 ? (A({
- type: "data",
- endpoint: be,
- fn_index: Y,
- data: fe,
- time: /* @__PURE__ */ new Date()
- }), A({
- type: "status",
- endpoint: be,
- fn_index: Y,
- stage: "complete",
- eta: ue.average_duration,
- queue: !1,
- time: /* @__PURE__ */ new Date()
- })) : A({
- type: "status",
- stage: "error",
- endpoint: be,
- fn_index: Y,
- message: ue.error,
- queue: !1,
- time: /* @__PURE__ */ new Date()
- });
- }).catch((ue) => {
- A({
- type: "status",
- stage: "error",
- message: ue.message,
- endpoint: be,
- fn_index: Y,
- queue: !1,
- time: /* @__PURE__ */ new Date()
- });
- });
- else if (we == "ws") {
- A({
- type: "status",
- stage: "pending",
- queue: !0,
- endpoint: be,
- fn_index: Y,
- time: /* @__PURE__ */ new Date()
- });
- let ue = new URL(`${C}://${resolve_root(
- b,
- y.path,
- !0
- )}
- /queue/join${v ? "?" + v : ""}`);
- E && ue.searchParams.set("__sign", E), le = e(ue), le.onclose = (me) => {
- me.wasClean || A({
- type: "status",
- stage: "error",
- broken: !0,
- message: BROKEN_CONNECTION_MSG,
- queue: !0,
- endpoint: be,
- fn_index: Y,
- time: /* @__PURE__ */ new Date()
- });
- }, le.onmessage = function(me) {
- const fe = JSON.parse(me.data), { type: Pe, status: Fe, data: Ee } = handle_message(
- fe,
- x[Y]
- );
- if (Pe === "update" && Fe && !g)
- A({
- type: "status",
- endpoint: be,
- fn_index: Y,
- time: /* @__PURE__ */ new Date(),
- ...Fe
- }), Fe.stage === "error" && le.close();
- else if (Pe === "hash") {
- le.send(JSON.stringify({ fn_index: Y, session_hash: F }));
- return;
- } else
- Pe === "data" ? le.send(JSON.stringify({ ...R, session_hash: F })) : Pe === "complete" ? g = Fe : Pe === "log" ? A({
- type: "log",
- log: Ee.log,
- level: Ee.level,
- endpoint: be,
- fn_index: Y
- }) : Pe === "generating" && A({
- type: "status",
- time: /* @__PURE__ */ new Date(),
- ...Fe,
- stage: Fe == null ? void 0 : Fe.stage,
- queue: !0,
- endpoint: be,
- fn_index: Y
- });
- Ee && (A({
- type: "data",
- time: /* @__PURE__ */ new Date(),
- data: w ? transform_output(
- Ee.data,
- q,
- y.root,
- y.root_url
- ) : Ee.data,
- endpoint: be,
- fn_index: Y
- }), g && (A({
- type: "status",
- time: /* @__PURE__ */ new Date(),
- ...g,
- stage: Fe == null ? void 0 : Fe.stage,
- queue: !0,
- endpoint: be,
- fn_index: Y
- }), le.close()));
- }, semiver(y.version || "2.0.0", "3.6") < 0 && addEventListener(
- "open",
- () => le.send(JSON.stringify({ hash: F }))
- );
- } else {
- A({
- type: "status",
- stage: "pending",
- queue: !0,
- endpoint: be,
- fn_index: Y,
- time: /* @__PURE__ */ new Date()
- });
- var ce = new URLSearchParams({
- fn_index: Y.toString(),
- session_hash: F
- }).toString();
- let ue = new URL(
- `${P}//${resolve_root(
- b,
- y.path,
- !0
- )}/queue/join?${ce}`
- );
- pe = new EventSource(ue), pe.onmessage = async function(me) {
- const fe = JSON.parse(me.data), { type: Pe, status: Fe, data: Ee } = handle_message(
- fe,
- x[Y]
- );
- if (Pe === "update" && Fe && !g)
- A({
- type: "status",
- endpoint: be,
- fn_index: Y,
- time: /* @__PURE__ */ new Date(),
- ...Fe
- }), Fe.stage === "error" && pe.close();
- else if (Pe === "data") {
- d = fe.event_id;
- let [De, _e] = await i(
- `${P}//${resolve_root(
- b,
- y.path,
- !0
- )}/queue/data`,
- {
- ...R,
- session_hash: F,
- event_id: d
- },
- r
- );
- _e !== 200 && (A({
- type: "status",
- stage: "error",
- message: BROKEN_CONNECTION_MSG,
- queue: !0,
- endpoint: be,
- fn_index: Y,
- time: /* @__PURE__ */ new Date()
- }), pe.close());
- } else
- Pe === "complete" ? g = Fe : Pe === "log" ? A({
- type: "log",
- log: Ee.log,
- level: Ee.level,
- endpoint: be,
- fn_index: Y
- }) : Pe === "generating" && A({
- type: "status",
- time: /* @__PURE__ */ new Date(),
- ...Fe,
- stage: Fe == null ? void 0 : Fe.stage,
- queue: !0,
- endpoint: be,
- fn_index: Y
- });
- Ee && (A({
- type: "data",
- time: /* @__PURE__ */ new Date(),
- data: w ? transform_output(
- Ee.data,
- q,
- y.root,
- y.root_url
- ) : Ee.data,
- endpoint: be,
- fn_index: Y
- }), g && (A({
- type: "status",
- time: /* @__PURE__ */ new Date(),
- ...g,
- stage: Fe == null ? void 0 : Fe.stage,
- queue: !0,
- endpoint: be,
- fn_index: Y
- }), pe.close()));
- };
- }
- });
- function A(Q) {
- const ue = f[Q.type] || [];
- ue == null || ue.forEach((me) => me(Q));
- }
- function O(Q, ce) {
- const ue = f, me = ue[Q] || [];
- return ue[Q] = me, me == null || me.push(ce), { on: O, off: H, cancel: z, destroy: ae };
- }
- function H(Q, ce) {
- const ue = f;
- let me = ue[Q] || [];
- return me = me == null ? void 0 : me.filter((fe) => fe !== ce), ue[Q] = me, { on: O, off: H, cancel: z, destroy: ae };
- }
- async function z() {
- const Q = {
- stage: "complete",
- queue: !1,
- time: /* @__PURE__ */ new Date()
- };
- g = Q, A({
- ...Q,
- type: "status",
- endpoint: be,
- fn_index: Y
- });
- let ce = {};
- we === "ws" ? (le && le.readyState === 0 ? le.addEventListener("open", () => {
- le.close();
- }) : le.close(), ce = { fn_index: Y, session_hash: F }) : (pe.close(), ce = { event_id: d });
- try {
- await t(
- `${P}//${resolve_root(
- b,
- y.path,
- !0
- )}/reset`,
- {
- headers: { "Content-Type": "application/json" },
- method: "POST",
- body: JSON.stringify(ce)
- }
- );
- } catch {
- console.warn(
- "The `/reset` endpoint could not be called. Subsequent endpoint results may be unreliable."
- );
- }
- }
- function ae() {
- for (const Q in f)
- f[Q].forEach((ce) => {
- H(Q, ce);
- });
- }
- return {
- on: O,
- off: H,
- cancel: z,
- destroy: ae
- };
- }
- async function X(I, B, ee) {
- var Y;
- const q = { "Content-Type": "application/json" };
- r && (q.Authorization = `Bearer ${r}`);
- let le, pe = y.components.find(
- (R) => R.id === I
- );
- (Y = pe == null ? void 0 : pe.props) != null && Y.root_url ? le = pe.props.root_url : le = `${P}//${resolve_root(
- b,
- y.path,
- !0
- )}/`;
- const we = await t(
- `${le}component_server/`,
- {
- method: "POST",
- body: JSON.stringify({
- data: ee,
- component_id: I,
- fn_name: B,
- session_hash: F
- }),
- headers: q
- }
- );
- if (!we.ok)
- throw new Error(
- "Could not connect to component server: " + we.statusText
- );
- return await we.json();
- }
- async function G(I) {
- if (M)
- return M;
- const B = { "Content-Type": "application/json" };
- r && (B.Authorization = `Bearer ${r}`);
- let ee;
- if (semiver(I.version || "2.0.0", "3.30") < 0 ? ee = await t(
- "https://gradio-space-api-fetcher-v2.hf.space/api",
- {
- method: "POST",
- body: JSON.stringify({
- serialize: !1,
- config: JSON.stringify(I)
- }),
- headers: B
- }
- ) : ee = await t(`${I.root}/info`, {
- headers: B
- }), !ee.ok)
- throw new Error(BROKEN_CONNECTION_MSG);
- let Y = await ee.json();
- return "api" in Y && (Y = Y.api), Y.named_endpoints["/predict"] && !Y.unnamed_endpoints[0] && (Y.unnamed_endpoints[0] = Y.named_endpoints["/predict"]), transform_api_info(Y, I, p);
- }
- });
- }
- async function l(h, _, c, o) {
- const r = await walk_and_store_blobs(
- _,
- void 0,
- [],
- !0,
- c
- );
- return Promise.all(
- r.map(async ({ path: T, blob: S, type: w }) => {
- if (S) {
- const C = (await n(h, [S], o)).files[0];
- return { path: T, file_url: C, type: w, name: S == null ? void 0 : S.name };
- }
- return { path: T, type: w };
- })
- ).then((T) => (T.forEach(({ path: S, file_url: w, type: C, name: P }) => {
- if (C === "Gallery")
- update_object(_, w, S);
- else if (w) {
- const b = new FileData({ path: w, orig_name: P });
- update_object(_, b, S);
- }
- }), _));
- }
-}
-const { post_data, upload_files, client, handle_blob } = api_factory(
- fetch,
- (...t) => new WebSocket(...t)
-);
-function transform_output(t, e, i, n) {
- return t.map((s, l) => {
- var h, _, c, o;
- return ((_ = (h = e == null ? void 0 : e.returns) == null ? void 0 : h[l]) == null ? void 0 : _.component) === "File" ? normalise_file(s, i, n) : ((o = (c = e == null ? void 0 : e.returns) == null ? void 0 : c[l]) == null ? void 0 : o.component) === "Gallery" ? s.map((r) => Array.isArray(r) ? [normalise_file(r[0], i, n), r[1]] : [normalise_file(r, i, n), null]) : typeof s == "object" && s.path ? normalise_file(s, i, n) : s;
- });
-}
-function get_type(t, e, i, n) {
- switch (t.type) {
- case "string":
- return "string";
- case "boolean":
- return "boolean";
- case "number":
- return "number";
- }
- if (i === "JSONSerializable" || i === "StringSerializable")
- return "any";
- if (i === "ListStringSerializable")
- return "string[]";
- if (e === "Image")
- return n === "parameter" ? "Blob | File | Buffer" : "string";
- if (i === "FileSerializable")
- return (t == null ? void 0 : t.type) === "array" ? n === "parameter" ? "(Blob | File | Buffer)[]" : "{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}[]" : n === "parameter" ? "Blob | File | Buffer" : "{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}";
- if (i === "GallerySerializable")
- return n === "parameter" ? "[(Blob | File | Buffer), (string | null)][]" : "[{ name: string; data: string; size?: number; is_file?: boolean; orig_name?: string}, (string | null))][]";
-}
-function get_description(t, e) {
- return e === "GallerySerializable" ? "array of [file, label] tuples" : e === "ListStringSerializable" ? "array of strings" : e === "FileSerializable" ? "array of files or single file" : t.description;
-}
-function transform_api_info(t, e, i) {
- const n = {
- named_endpoints: {},
- unnamed_endpoints: {}
- };
- for (const s in t) {
- const l = t[s];
- for (const h in l) {
- const _ = e.dependencies[h] ? h : i[h.replace("/", "")], c = l[h];
- n[s][h] = {}, n[s][h].parameters = {}, n[s][h].returns = {}, n[s][h].type = e.dependencies[_].types, n[s][h].parameters = c.parameters.map(
- ({ label: o, component: r, type: T, serializer: S }) => ({
- label: o,
- component: r,
- type: get_type(T, r, S, "parameter"),
- description: get_description(T, S)
- })
- ), n[s][h].returns = c.returns.map(
- ({ label: o, component: r, type: T, serializer: S }) => ({
- label: o,
- component: r,
- type: get_type(T, r, S, "return"),
- description: get_description(T, S)
- })
- );
- }
- }
- return n;
-}
-async function get_jwt(t, e) {
- try {
- return (await (await fetch(`https://huggingface.co/api/spaces/${t}/jwt`, {
- headers: {
- Authorization: `Bearer ${e}`
- }
- })).json()).token || !1;
- } catch (i) {
- return console.error(i), !1;
- }
-}
-function update_object(t, e, i) {
- for (; i.length > 1; )
- t = t[i.shift()];
- t[i.shift()] = e;
-}
-async function walk_and_store_blobs(t, e = void 0, i = [], n = !1, s = void 0) {
- if (Array.isArray(t)) {
- let l = [];
- return await Promise.all(
- t.map(async (h, _) => {
- var c;
- let o = i.slice();
- o.push(_);
- const r = await walk_and_store_blobs(
- t[_],
- n ? ((c = s == null ? void 0 : s.parameters[_]) == null ? void 0 : c.component) || void 0 : e,
- o,
- !1,
- s
- );
- l = l.concat(r);
- })
- ), l;
- } else {
- if (globalThis.Buffer && t instanceof globalThis.Buffer)
- return [
- {
- path: i,
- blob: e === "Image" ? !1 : new NodeBlob([t]),
- type: e
- }
- ];
- if (typeof t == "object") {
- let l = [];
- for (let h in t)
- if (t.hasOwnProperty(h)) {
- let _ = i.slice();
- _.push(h), l = l.concat(
- await walk_and_store_blobs(
- t[h],
- void 0,
- _,
- !1,
- s
- )
- );
- }
- return l;
- }
- }
- return [];
-}
-function skip_queue(t, e) {
- var i, n, s, l;
- return !(((n = (i = e == null ? void 0 : e.dependencies) == null ? void 0 : i[t]) == null ? void 0 : n.queue) === null ? e.enable_queue : (l = (s = e == null ? void 0 : e.dependencies) == null ? void 0 : s[t]) != null && l.queue) || !1;
-}
-async function resolve_config(t, e, i) {
- const n = {};
- if (i && (n.Authorization = `Bearer ${i}`), typeof window < "u" && window.gradio_config && location.origin !== "http://localhost:9876" && !window.gradio_config.dev_mode) {
- const s = window.gradio_config.root, l = window.gradio_config;
- return l.root = resolve_root(e, l.root, !1), { ...l, path: s };
- } else if (e) {
- let s = await t(`${e}/config`, {
- headers: n
- });
- if (s.status === 200) {
- const l = await s.json();
- return l.path = l.path ?? "", l.root = e, l;
- }
- throw new Error("Could not get config.");
- }
- throw new Error("No config or app endpoint found");
-}
-async function check_space_status(t, e, i) {
- let n = e === "subdomain" ? `https://huggingface.co/api/spaces/by-subdomain/${t}` : `https://huggingface.co/api/spaces/${t}`, s, l;
- try {
- if (s = await fetch(n), l = s.status, l !== 200)
- throw new Error();
- s = await s.json();
- } catch {
- i({
- status: "error",
- load_status: "error",
- message: "Could not get space status",
- detail: "NOT_FOUND"
- });
- return;
- }
- if (!s || l !== 200)
- return;
- const {
- runtime: { stage: h },
- id: _
- } = s;
- switch (h) {
- case "STOPPED":
- case "SLEEPING":
- i({
- status: "sleeping",
- load_status: "pending",
- message: "Space is asleep. Waking it up...",
- detail: h
- }), setTimeout(() => {
- check_space_status(t, e, i);
- }, 1e3);
- break;
- case "PAUSED":
- i({
- status: "paused",
- load_status: "error",
- message: "This space has been paused by the author. If you would like to try this demo, consider duplicating the space.",
- detail: h,
- discussions_enabled: await discussions_enabled(_)
- });
- break;
- case "RUNNING":
- case "RUNNING_BUILDING":
- i({
- status: "running",
- load_status: "complete",
- message: "",
- detail: h
- });
- break;
- case "BUILDING":
- i({
- status: "building",
- load_status: "pending",
- message: "Space is building...",
- detail: h
- }), setTimeout(() => {
- check_space_status(t, e, i);
- }, 1e3);
- break;
- default:
- i({
- status: "space_error",
- load_status: "error",
- message: "This space is experiencing an issue.",
- detail: h,
- discussions_enabled: await discussions_enabled(_)
- });
- break;
- }
-}
-function handle_message(t, e) {
- switch (t.msg) {
- case "send_data":
- return { type: "data" };
- case "send_hash":
- return { type: "hash" };
- case "queue_full":
- return {
- type: "update",
- status: {
- queue: !0,
- message: QUEUE_FULL_MSG,
- stage: "error",
- code: t.code,
- success: t.success
- }
- };
- case "estimation":
- return {
- type: "update",
- status: {
- queue: !0,
- stage: e || "pending",
- code: t.code,
- size: t.queue_size,
- position: t.rank,
- eta: t.rank_eta,
- success: t.success
- }
- };
- case "progress":
- return {
- type: "update",
- status: {
- queue: !0,
- stage: "pending",
- code: t.code,
- progress_data: t.progress_data,
- success: t.success
- }
- };
- case "log":
- return { type: "log", data: t };
- case "process_generating":
- return {
- type: "generating",
- status: {
- queue: !0,
- message: t.success ? null : t.output.error,
- stage: t.success ? "generating" : "error",
- code: t.code,
- progress_data: t.progress_data,
- eta: t.average_duration
- },
- data: t.success ? t.output : null
- };
- case "process_completed":
- return "error" in t.output ? {
- type: "update",
- status: {
- queue: !0,
- message: t.output.error,
- stage: "error",
- code: t.code,
- success: t.success
- }
- } : {
- type: "complete",
- status: {
- queue: !0,
- message: t.success ? void 0 : t.output.error,
- stage: t.success ? "complete" : "error",
- code: t.code,
- progress_data: t.progress_data,
- eta: t.output.average_duration
- },
- data: t.success ? t.output : null
- };
- case "process_starts":
- return {
- type: "update",
- status: {
- queue: !0,
- stage: "pending",
- code: t.code,
- size: t.rank,
- position: 0,
- success: t.success
- }
- };
- }
- return { type: "none", status: { stage: "error", queue: !0 } };
-}
-function getDefaultExportFromCjs(t) {
- return t && t.__esModule && Object.prototype.hasOwnProperty.call(t, "default") ? t.default : t;
-}
-function getAugmentedNamespace(t) {
- if (t.__esModule)
- return t;
- var e = t.default;
- if (typeof e == "function") {
- var i = function n() {
- return this instanceof n ? Reflect.construct(e, arguments, this.constructor) : e.apply(this, arguments);
- };
- i.prototype = e.prototype;
- } else
- i = {};
- return Object.defineProperty(i, "__esModule", { value: !0 }), Object.keys(t).forEach(function(n) {
- var s = Object.getOwnPropertyDescriptor(t, n);
- Object.defineProperty(i, n, s.get ? s : {
- enumerable: !0,
- get: function() {
- return t[n];
- }
- });
- }), i;
-}
-var isMergeableObject = function(e) {
- return isNonNullObject(e) && !isSpecial(e);
-};
-function isNonNullObject(t) {
- return !!t && typeof t == "object";
-}
-function isSpecial(t) {
- var e = Object.prototype.toString.call(t);
- return e === "[object RegExp]" || e === "[object Date]" || isReactElement(t);
-}
-var canUseSymbol = typeof Symbol == "function" && Symbol.for, REACT_ELEMENT_TYPE = canUseSymbol ? Symbol.for("react.element") : 60103;
-function isReactElement(t) {
- return t.$$typeof === REACT_ELEMENT_TYPE;
-}
-function emptyTarget(t) {
- return Array.isArray(t) ? [] : {};
-}
-function cloneUnlessOtherwiseSpecified(t, e) {
- return e.clone !== !1 && e.isMergeableObject(t) ? deepmerge(emptyTarget(t), t, e) : t;
-}
-function defaultArrayMerge(t, e, i) {
- return t.concat(e).map(function(n) {
- return cloneUnlessOtherwiseSpecified(n, i);
- });
-}
-function getMergeFunction(t, e) {
- if (!e.customMerge)
- return deepmerge;
- var i = e.customMerge(t);
- return typeof i == "function" ? i : deepmerge;
-}
-function getEnumerableOwnPropertySymbols(t) {
- return Object.getOwnPropertySymbols ? Object.getOwnPropertySymbols(t).filter(function(e) {
- return Object.propertyIsEnumerable.call(t, e);
- }) : [];
-}
-function getKeys(t) {
- return Object.keys(t).concat(getEnumerableOwnPropertySymbols(t));
-}
-function propertyIsOnObject(t, e) {
- try {
- return e in t;
- } catch {
- return !1;
- }
-}
-function propertyIsUnsafe(t, e) {
- return propertyIsOnObject(t, e) && !(Object.hasOwnProperty.call(t, e) && Object.propertyIsEnumerable.call(t, e));
-}
-function mergeObject(t, e, i) {
- var n = {};
- return i.isMergeableObject(t) && getKeys(t).forEach(function(s) {
- n[s] = cloneUnlessOtherwiseSpecified(t[s], i);
- }), getKeys(e).forEach(function(s) {
- propertyIsUnsafe(t, s) || (propertyIsOnObject(t, s) && i.isMergeableObject(e[s]) ? n[s] = getMergeFunction(s, i)(t[s], e[s], i) : n[s] = cloneUnlessOtherwiseSpecified(e[s], i));
- }), n;
-}
-function deepmerge(t, e, i) {
- i = i || {}, i.arrayMerge = i.arrayMerge || defaultArrayMerge, i.isMergeableObject = i.isMergeableObject || isMergeableObject, i.cloneUnlessOtherwiseSpecified = cloneUnlessOtherwiseSpecified;
- var n = Array.isArray(e), s = Array.isArray(t), l = n === s;
- return l ? n ? i.arrayMerge(t, e, i) : mergeObject(t, e, i) : cloneUnlessOtherwiseSpecified(e, i);
-}
-deepmerge.all = function(e, i) {
- if (!Array.isArray(e))
- throw new Error("first argument should be an array");
- return e.reduce(function(n, s) {
- return deepmerge(n, s, i);
- }, {});
-};
-var deepmerge_1 = deepmerge, cjs = deepmerge_1;
-const deepmerge$1 = /* @__PURE__ */ getDefaultExportFromCjs(cjs);
-var extendStatics = function(t, e) {
- return extendStatics = Object.setPrototypeOf || { __proto__: [] } instanceof Array && function(i, n) {
- i.__proto__ = n;
- } || function(i, n) {
- for (var s in n)
- Object.prototype.hasOwnProperty.call(n, s) && (i[s] = n[s]);
- }, extendStatics(t, e);
-};
-function __extends(t, e) {
- if (typeof e != "function" && e !== null)
- throw new TypeError("Class extends value " + String(e) + " is not a constructor or null");
- extendStatics(t, e);
- function i() {
- this.constructor = t;
- }
- t.prototype = e === null ? Object.create(e) : (i.prototype = e.prototype, new i());
-}
-var __assign = function() {
- return __assign = Object.assign || function(e) {
- for (var i, n = 1, s = arguments.length; n < s; n++) {
- i = arguments[n];
- for (var l in i)
- Object.prototype.hasOwnProperty.call(i, l) && (e[l] = i[l]);
- }
- return e;
- }, __assign.apply(this, arguments);
-};
-function __spreadArray(t, e, i) {
- if (i || arguments.length === 2)
- for (var n = 0, s = e.length, l; n < s; n++)
- (l || !(n in e)) && (l || (l = Array.prototype.slice.call(e, 0, n)), l[n] = e[n]);
- return t.concat(l || Array.prototype.slice.call(e));
-}
-typeof SuppressedError == "function" && SuppressedError;
-var ErrorKind;
-(function(t) {
- t[t.EXPECT_ARGUMENT_CLOSING_BRACE = 1] = "EXPECT_ARGUMENT_CLOSING_BRACE", t[t.EMPTY_ARGUMENT = 2] = "EMPTY_ARGUMENT", t[t.MALFORMED_ARGUMENT = 3] = "MALFORMED_ARGUMENT", t[t.EXPECT_ARGUMENT_TYPE = 4] = "EXPECT_ARGUMENT_TYPE", t[t.INVALID_ARGUMENT_TYPE = 5] = "INVALID_ARGUMENT_TYPE", t[t.EXPECT_ARGUMENT_STYLE = 6] = "EXPECT_ARGUMENT_STYLE", t[t.INVALID_NUMBER_SKELETON = 7] = "INVALID_NUMBER_SKELETON", t[t.INVALID_DATE_TIME_SKELETON = 8] = "INVALID_DATE_TIME_SKELETON", t[t.EXPECT_NUMBER_SKELETON = 9] = "EXPECT_NUMBER_SKELETON", t[t.EXPECT_DATE_TIME_SKELETON = 10] = "EXPECT_DATE_TIME_SKELETON", t[t.UNCLOSED_QUOTE_IN_ARGUMENT_STYLE = 11] = "UNCLOSED_QUOTE_IN_ARGUMENT_STYLE", t[t.EXPECT_SELECT_ARGUMENT_OPTIONS = 12] = "EXPECT_SELECT_ARGUMENT_OPTIONS", t[t.EXPECT_PLURAL_ARGUMENT_OFFSET_VALUE = 13] = "EXPECT_PLURAL_ARGUMENT_OFFSET_VALUE", t[t.INVALID_PLURAL_ARGUMENT_OFFSET_VALUE = 14] = "INVALID_PLURAL_ARGUMENT_OFFSET_VALUE", t[t.EXPECT_SELECT_ARGUMENT_SELECTOR = 15] = "EXPECT_SELECT_ARGUMENT_SELECTOR", t[t.EXPECT_PLURAL_ARGUMENT_SELECTOR = 16] = "EXPECT_PLURAL_ARGUMENT_SELECTOR", t[t.EXPECT_SELECT_ARGUMENT_SELECTOR_FRAGMENT = 17] = "EXPECT_SELECT_ARGUMENT_SELECTOR_FRAGMENT", t[t.EXPECT_PLURAL_ARGUMENT_SELECTOR_FRAGMENT = 18] = "EXPECT_PLURAL_ARGUMENT_SELECTOR_FRAGMENT", t[t.INVALID_PLURAL_ARGUMENT_SELECTOR = 19] = "INVALID_PLURAL_ARGUMENT_SELECTOR", t[t.DUPLICATE_PLURAL_ARGUMENT_SELECTOR = 20] = "DUPLICATE_PLURAL_ARGUMENT_SELECTOR", t[t.DUPLICATE_SELECT_ARGUMENT_SELECTOR = 21] = "DUPLICATE_SELECT_ARGUMENT_SELECTOR", t[t.MISSING_OTHER_CLAUSE = 22] = "MISSING_OTHER_CLAUSE", t[t.INVALID_TAG = 23] = "INVALID_TAG", t[t.INVALID_TAG_NAME = 25] = "INVALID_TAG_NAME", t[t.UNMATCHED_CLOSING_TAG = 26] = "UNMATCHED_CLOSING_TAG", t[t.UNCLOSED_TAG = 27] = "UNCLOSED_TAG";
-})(ErrorKind || (ErrorKind = {}));
-var TYPE;
-(function(t) {
- t[t.literal = 0] = "literal", t[t.argument = 1] = "argument", t[t.number = 2] = "number", t[t.date = 3] = "date", t[t.time = 4] = "time", t[t.select = 5] = "select", t[t.plural = 6] = "plural", t[t.pound = 7] = "pound", t[t.tag = 8] = "tag";
-})(TYPE || (TYPE = {}));
-var SKELETON_TYPE;
-(function(t) {
- t[t.number = 0] = "number", t[t.dateTime = 1] = "dateTime";
-})(SKELETON_TYPE || (SKELETON_TYPE = {}));
-function isLiteralElement(t) {
- return t.type === TYPE.literal;
-}
-function isArgumentElement(t) {
- return t.type === TYPE.argument;
-}
-function isNumberElement(t) {
- return t.type === TYPE.number;
-}
-function isDateElement(t) {
- return t.type === TYPE.date;
-}
-function isTimeElement(t) {
- return t.type === TYPE.time;
-}
-function isSelectElement(t) {
- return t.type === TYPE.select;
-}
-function isPluralElement(t) {
- return t.type === TYPE.plural;
-}
-function isPoundElement(t) {
- return t.type === TYPE.pound;
-}
-function isTagElement(t) {
- return t.type === TYPE.tag;
-}
-function isNumberSkeleton(t) {
- return !!(t && typeof t == "object" && t.type === SKELETON_TYPE.number);
-}
-function isDateTimeSkeleton(t) {
- return !!(t && typeof t == "object" && t.type === SKELETON_TYPE.dateTime);
-}
-var SPACE_SEPARATOR_REGEX = /[ \xA0\u1680\u2000-\u200A\u202F\u205F\u3000]/, DATE_TIME_REGEX = /(?:[Eec]{1,6}|G{1,5}|[Qq]{1,5}|(?:[yYur]+|U{1,5})|[ML]{1,5}|d{1,2}|D{1,3}|F{1}|[abB]{1,5}|[hkHK]{1,2}|w{1,2}|W{1}|m{1,2}|s{1,2}|[zZOvVxX]{1,4})(?=([^']*'[^']*')*[^']*$)/g;
-function parseDateTimeSkeleton(t) {
- var e = {};
- return t.replace(DATE_TIME_REGEX, function(i) {
- var n = i.length;
- switch (i[0]) {
- case "G":
- e.era = n === 4 ? "long" : n === 5 ? "narrow" : "short";
- break;
- case "y":
- e.year = n === 2 ? "2-digit" : "numeric";
- break;
- case "Y":
- case "u":
- case "U":
- case "r":
- throw new RangeError("`Y/u/U/r` (year) patterns are not supported, use `y` instead");
- case "q":
- case "Q":
- throw new RangeError("`q/Q` (quarter) patterns are not supported");
- case "M":
- case "L":
- e.month = ["numeric", "2-digit", "short", "long", "narrow"][n - 1];
- break;
- case "w":
- case "W":
- throw new RangeError("`w/W` (week) patterns are not supported");
- case "d":
- e.day = ["numeric", "2-digit"][n - 1];
- break;
- case "D":
- case "F":
- case "g":
- throw new RangeError("`D/F/g` (day) patterns are not supported, use `d` instead");
- case "E":
- e.weekday = n === 4 ? "short" : n === 5 ? "narrow" : "short";
- break;
- case "e":
- if (n < 4)
- throw new RangeError("`e..eee` (weekday) patterns are not supported");
- e.weekday = ["short", "long", "narrow", "short"][n - 4];
- break;
- case "c":
- if (n < 4)
- throw new RangeError("`c..ccc` (weekday) patterns are not supported");
- e.weekday = ["short", "long", "narrow", "short"][n - 4];
- break;
- case "a":
- e.hour12 = !0;
- break;
- case "b":
- case "B":
- throw new RangeError("`b/B` (period) patterns are not supported, use `a` instead");
- case "h":
- e.hourCycle = "h12", e.hour = ["numeric", "2-digit"][n - 1];
- break;
- case "H":
- e.hourCycle = "h23", e.hour = ["numeric", "2-digit"][n - 1];
- break;
- case "K":
- e.hourCycle = "h11", e.hour = ["numeric", "2-digit"][n - 1];
- break;
- case "k":
- e.hourCycle = "h24", e.hour = ["numeric", "2-digit"][n - 1];
- break;
- case "j":
- case "J":
- case "C":
- throw new RangeError("`j/J/C` (hour) patterns are not supported, use `h/H/K/k` instead");
- case "m":
- e.minute = ["numeric", "2-digit"][n - 1];
- break;
- case "s":
- e.second = ["numeric", "2-digit"][n - 1];
- break;
- case "S":
- case "A":
- throw new RangeError("`S/A` (second) patterns are not supported, use `s` instead");
- case "z":
- e.timeZoneName = n < 4 ? "short" : "long";
- break;
- case "Z":
- case "O":
- case "v":
- case "V":
- case "X":
- case "x":
- throw new RangeError("`Z/O/v/V/X/x` (timeZone) patterns are not supported, use `z` instead");
- }
- return "";
- }), e;
-}
-var WHITE_SPACE_REGEX = /[\t-\r \x85\u200E\u200F\u2028\u2029]/i;
-function parseNumberSkeletonFromString(t) {
- if (t.length === 0)
- throw new Error("Number skeleton cannot be empty");
- for (var e = t.split(WHITE_SPACE_REGEX).filter(function(S) {
- return S.length > 0;
- }), i = [], n = 0, s = e; n < s.length; n++) {
- var l = s[n], h = l.split("/");
- if (h.length === 0)
- throw new Error("Invalid number skeleton");
- for (var _ = h[0], c = h.slice(1), o = 0, r = c; o < r.length; o++) {
- var T = r[o];
- if (T.length === 0)
- throw new Error("Invalid number skeleton");
- }
- i.push({ stem: _, options: c });
- }
- return i;
-}
-function icuUnitToEcma(t) {
- return t.replace(/^(.*?)-/, "");
-}
-var FRACTION_PRECISION_REGEX = /^\.(?:(0+)(\*)?|(#+)|(0+)(#+))$/g, SIGNIFICANT_PRECISION_REGEX = /^(@+)?(\+|#+)?[rs]?$/g, INTEGER_WIDTH_REGEX = /(\*)(0+)|(#+)(0+)|(0+)/g, CONCISE_INTEGER_WIDTH_REGEX = /^(0+)$/;
-function parseSignificantPrecision(t) {
- var e = {};
- return t[t.length - 1] === "r" ? e.roundingPriority = "morePrecision" : t[t.length - 1] === "s" && (e.roundingPriority = "lessPrecision"), t.replace(SIGNIFICANT_PRECISION_REGEX, function(i, n, s) {
- return typeof s != "string" ? (e.minimumSignificantDigits = n.length, e.maximumSignificantDigits = n.length) : s === "+" ? e.minimumSignificantDigits = n.length : n[0] === "#" ? e.maximumSignificantDigits = n.length : (e.minimumSignificantDigits = n.length, e.maximumSignificantDigits = n.length + (typeof s == "string" ? s.length : 0)), "";
- }), e;
-}
-function parseSign(t) {
- switch (t) {
- case "sign-auto":
- return {
- signDisplay: "auto"
- };
- case "sign-accounting":
- case "()":
- return {
- currencySign: "accounting"
- };
- case "sign-always":
- case "+!":
- return {
- signDisplay: "always"
- };
- case "sign-accounting-always":
- case "()!":
- return {
- signDisplay: "always",
- currencySign: "accounting"
- };
- case "sign-except-zero":
- case "+?":
- return {
- signDisplay: "exceptZero"
- };
- case "sign-accounting-except-zero":
- case "()?":
- return {
- signDisplay: "exceptZero",
- currencySign: "accounting"
- };
- case "sign-never":
- case "+_":
- return {
- signDisplay: "never"
- };
- }
-}
-function parseConciseScientificAndEngineeringStem(t) {
- var e;
- if (t[0] === "E" && t[1] === "E" ? (e = {
- notation: "engineering"
- }, t = t.slice(2)) : t[0] === "E" && (e = {
- notation: "scientific"
- }, t = t.slice(1)), e) {
- var i = t.slice(0, 2);
- if (i === "+!" ? (e.signDisplay = "always", t = t.slice(2)) : i === "+?" && (e.signDisplay = "exceptZero", t = t.slice(2)), !CONCISE_INTEGER_WIDTH_REGEX.test(t))
- throw new Error("Malformed concise eng/scientific notation");
- e.minimumIntegerDigits = t.length;
- }
- return e;
-}
-function parseNotationOptions(t) {
- var e = {}, i = parseSign(t);
- return i || e;
-}
-function parseNumberSkeleton(t) {
- for (var e = {}, i = 0, n = t; i < n.length; i++) {
- var s = n[i];
- switch (s.stem) {
- case "percent":
- case "%":
- e.style = "percent";
- continue;
- case "%x100":
- e.style = "percent", e.scale = 100;
- continue;
- case "currency":
- e.style = "currency", e.currency = s.options[0];
- continue;
- case "group-off":
- case ",_":
- e.useGrouping = !1;
- continue;
- case "precision-integer":
- case ".":
- e.maximumFractionDigits = 0;
- continue;
- case "measure-unit":
- case "unit":
- e.style = "unit", e.unit = icuUnitToEcma(s.options[0]);
- continue;
- case "compact-short":
- case "K":
- e.notation = "compact", e.compactDisplay = "short";
- continue;
- case "compact-long":
- case "KK":
- e.notation = "compact", e.compactDisplay = "long";
- continue;
- case "scientific":
- e = __assign(__assign(__assign({}, e), { notation: "scientific" }), s.options.reduce(function(c, o) {
- return __assign(__assign({}, c), parseNotationOptions(o));
- }, {}));
- continue;
- case "engineering":
- e = __assign(__assign(__assign({}, e), { notation: "engineering" }), s.options.reduce(function(c, o) {
- return __assign(__assign({}, c), parseNotationOptions(o));
- }, {}));
- continue;
- case "notation-simple":
- e.notation = "standard";
- continue;
- case "unit-width-narrow":
- e.currencyDisplay = "narrowSymbol", e.unitDisplay = "narrow";
- continue;
- case "unit-width-short":
- e.currencyDisplay = "code", e.unitDisplay = "short";
- continue;
- case "unit-width-full-name":
- e.currencyDisplay = "name", e.unitDisplay = "long";
- continue;
- case "unit-width-iso-code":
- e.currencyDisplay = "symbol";
- continue;
- case "scale":
- e.scale = parseFloat(s.options[0]);
- continue;
- case "integer-width":
- if (s.options.length > 1)
- throw new RangeError("integer-width stems only accept a single optional option");
- s.options[0].replace(INTEGER_WIDTH_REGEX, function(c, o, r, T, S, w) {
- if (o)
- e.minimumIntegerDigits = r.length;
- else {
- if (T && S)
- throw new Error("We currently do not support maximum integer digits");
- if (w)
- throw new Error("We currently do not support exact integer digits");
- }
- return "";
- });
- continue;
- }
- if (CONCISE_INTEGER_WIDTH_REGEX.test(s.stem)) {
- e.minimumIntegerDigits = s.stem.length;
- continue;
- }
- if (FRACTION_PRECISION_REGEX.test(s.stem)) {
- if (s.options.length > 1)
- throw new RangeError("Fraction-precision stems only accept a single optional option");
- s.stem.replace(FRACTION_PRECISION_REGEX, function(c, o, r, T, S, w) {
- return r === "*" ? e.minimumFractionDigits = o.length : T && T[0] === "#" ? e.maximumFractionDigits = T.length : S && w ? (e.minimumFractionDigits = S.length, e.maximumFractionDigits = S.length + w.length) : (e.minimumFractionDigits = o.length, e.maximumFractionDigits = o.length), "";
- });
- var l = s.options[0];
- l === "w" ? e = __assign(__assign({}, e), { trailingZeroDisplay: "stripIfInteger" }) : l && (e = __assign(__assign({}, e), parseSignificantPrecision(l)));
- continue;
- }
- if (SIGNIFICANT_PRECISION_REGEX.test(s.stem)) {
- e = __assign(__assign({}, e), parseSignificantPrecision(s.stem));
- continue;
- }
- var h = parseSign(s.stem);
- h && (e = __assign(__assign({}, e), h));
- var _ = parseConciseScientificAndEngineeringStem(s.stem);
- _ && (e = __assign(__assign({}, e), _));
- }
- return e;
-}
-var timeData = {
- AX: [
- "H"
- ],
- BQ: [
- "H"
- ],
- CP: [
- "H"
- ],
- CZ: [
- "H"
- ],
- DK: [
- "H"
- ],
- FI: [
- "H"
- ],
- ID: [
- "H"
- ],
- IS: [
- "H"
- ],
- ML: [
- "H"
- ],
- NE: [
- "H"
- ],
- RU: [
- "H"
- ],
- SE: [
- "H"
- ],
- SJ: [
- "H"
- ],
- SK: [
- "H"
- ],
- AS: [
- "h",
- "H"
- ],
- BT: [
- "h",
- "H"
- ],
- DJ: [
- "h",
- "H"
- ],
- ER: [
- "h",
- "H"
- ],
- GH: [
- "h",
- "H"
- ],
- IN: [
- "h",
- "H"
- ],
- LS: [
- "h",
- "H"
- ],
- PG: [
- "h",
- "H"
- ],
- PW: [
- "h",
- "H"
- ],
- SO: [
- "h",
- "H"
- ],
- TO: [
- "h",
- "H"
- ],
- VU: [
- "h",
- "H"
- ],
- WS: [
- "h",
- "H"
- ],
- "001": [
- "H",
- "h"
- ],
- AL: [
- "h",
- "H",
- "hB"
- ],
- TD: [
- "h",
- "H",
- "hB"
- ],
- "ca-ES": [
- "H",
- "h",
- "hB"
- ],
- CF: [
- "H",
- "h",
- "hB"
- ],
- CM: [
- "H",
- "h",
- "hB"
- ],
- "fr-CA": [
- "H",
- "h",
- "hB"
- ],
- "gl-ES": [
- "H",
- "h",
- "hB"
- ],
- "it-CH": [
- "H",
- "h",
- "hB"
- ],
- "it-IT": [
- "H",
- "h",
- "hB"
- ],
- LU: [
- "H",
- "h",
- "hB"
- ],
- NP: [
- "H",
- "h",
- "hB"
- ],
- PF: [
- "H",
- "h",
- "hB"
- ],
- SC: [
- "H",
- "h",
- "hB"
- ],
- SM: [
- "H",
- "h",
- "hB"
- ],
- SN: [
- "H",
- "h",
- "hB"
- ],
- TF: [
- "H",
- "h",
- "hB"
- ],
- VA: [
- "H",
- "h",
- "hB"
- ],
- CY: [
- "h",
- "H",
- "hb",
- "hB"
- ],
- GR: [
- "h",
- "H",
- "hb",
- "hB"
- ],
- CO: [
- "h",
- "H",
- "hB",
- "hb"
- ],
- DO: [
- "h",
- "H",
- "hB",
- "hb"
- ],
- KP: [
- "h",
- "H",
- "hB",
- "hb"
- ],
- KR: [
- "h",
- "H",
- "hB",
- "hb"
- ],
- NA: [
- "h",
- "H",
- "hB",
- "hb"
- ],
- PA: [
- "h",
- "H",
- "hB",
- "hb"
- ],
- PR: [
- "h",
- "H",
- "hB",
- "hb"
- ],
- VE: [
- "h",
- "H",
- "hB",
- "hb"
- ],
- AC: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- AI: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- BW: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- BZ: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- CC: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- CK: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- CX: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- DG: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- FK: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- GB: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- GG: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- GI: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- IE: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- IM: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- IO: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- JE: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- LT: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- MK: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- MN: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- MS: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- NF: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- NG: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- NR: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- NU: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- PN: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- SH: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- SX: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- TA: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- ZA: [
- "H",
- "h",
- "hb",
- "hB"
- ],
- "af-ZA": [
- "H",
- "h",
- "hB",
- "hb"
- ],
- AR: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- CL: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- CR: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- CU: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- EA: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- "es-BO": [
- "H",
- "h",
- "hB",
- "hb"
- ],
- "es-BR": [
- "H",
- "h",
- "hB",
- "hb"
- ],
- "es-EC": [
- "H",
- "h",
- "hB",
- "hb"
- ],
- "es-ES": [
- "H",
- "h",
- "hB",
- "hb"
- ],
- "es-GQ": [
- "H",
- "h",
- "hB",
- "hb"
- ],
- "es-PE": [
- "H",
- "h",
- "hB",
- "hb"
- ],
- GT: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- HN: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- IC: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- KG: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- KM: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- LK: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- MA: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- MX: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- NI: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- PY: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- SV: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- UY: [
- "H",
- "h",
- "hB",
- "hb"
- ],
- JP: [
- "H",
- "h",
- "K"
- ],
- AD: [
- "H",
- "hB"
- ],
- AM: [
- "H",
- "hB"
- ],
- AO: [
- "H",
- "hB"
- ],
- AT: [
- "H",
- "hB"
- ],
- AW: [
- "H",
- "hB"
- ],
- BE: [
- "H",
- "hB"
- ],
- BF: [
- "H",
- "hB"
- ],
- BJ: [
- "H",
- "hB"
- ],
- BL: [
- "H",
- "hB"
- ],
- BR: [
- "H",
- "hB"
- ],
- CG: [
- "H",
- "hB"
- ],
- CI: [
- "H",
- "hB"
- ],
- CV: [
- "H",
- "hB"
- ],
- DE: [
- "H",
- "hB"
- ],
- EE: [
- "H",
- "hB"
- ],
- FR: [
- "H",
- "hB"
- ],
- GA: [
- "H",
- "hB"
- ],
- GF: [
- "H",
- "hB"
- ],
- GN: [
- "H",
- "hB"
- ],
- GP: [
- "H",
- "hB"
- ],
- GW: [
- "H",
- "hB"
- ],
- HR: [
- "H",
- "hB"
- ],
- IL: [
- "H",
- "hB"
- ],
- IT: [
- "H",
- "hB"
- ],
- KZ: [
- "H",
- "hB"
- ],
- MC: [
- "H",
- "hB"
- ],
- MD: [
- "H",
- "hB"
- ],
- MF: [
- "H",
- "hB"
- ],
- MQ: [
- "H",
- "hB"
- ],
- MZ: [
- "H",
- "hB"
- ],
- NC: [
- "H",
- "hB"
- ],
- NL: [
- "H",
- "hB"
- ],
- PM: [
- "H",
- "hB"
- ],
- PT: [
- "H",
- "hB"
- ],
- RE: [
- "H",
- "hB"
- ],
- RO: [
- "H",
- "hB"
- ],
- SI: [
- "H",
- "hB"
- ],
- SR: [
- "H",
- "hB"
- ],
- ST: [
- "H",
- "hB"
- ],
- TG: [
- "H",
- "hB"
- ],
- TR: [
- "H",
- "hB"
- ],
- WF: [
- "H",
- "hB"
- ],
- YT: [
- "H",
- "hB"
- ],
- BD: [
- "h",
- "hB",
- "H"
- ],
- PK: [
- "h",
- "hB",
- "H"
- ],
- AZ: [
- "H",
- "hB",
- "h"
- ],
- BA: [
- "H",
- "hB",
- "h"
- ],
- BG: [
- "H",
- "hB",
- "h"
- ],
- CH: [
- "H",
- "hB",
- "h"
- ],
- GE: [
- "H",
- "hB",
- "h"
- ],
- LI: [
- "H",
- "hB",
- "h"
- ],
- ME: [
- "H",
- "hB",
- "h"
- ],
- RS: [
- "H",
- "hB",
- "h"
- ],
- UA: [
- "H",
- "hB",
- "h"
- ],
- UZ: [
- "H",
- "hB",
- "h"
- ],
- XK: [
- "H",
- "hB",
- "h"
- ],
- AG: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- AU: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- BB: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- BM: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- BS: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- CA: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- DM: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- "en-001": [
- "h",
- "hb",
- "H",
- "hB"
- ],
- FJ: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- FM: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- GD: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- GM: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- GU: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- GY: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- JM: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- KI: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- KN: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- KY: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- LC: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- LR: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- MH: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- MP: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- MW: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- NZ: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- SB: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- SG: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- SL: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- SS: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- SZ: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- TC: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- TT: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- UM: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- US: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- VC: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- VG: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- VI: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- ZM: [
- "h",
- "hb",
- "H",
- "hB"
- ],
- BO: [
- "H",
- "hB",
- "h",
- "hb"
- ],
- EC: [
- "H",
- "hB",
- "h",
- "hb"
- ],
- ES: [
- "H",
- "hB",
- "h",
- "hb"
- ],
- GQ: [
- "H",
- "hB",
- "h",
- "hb"
- ],
- PE: [
- "H",
- "hB",
- "h",
- "hb"
- ],
- AE: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- "ar-001": [
- "h",
- "hB",
- "hb",
- "H"
- ],
- BH: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- DZ: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- EG: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- EH: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- HK: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- IQ: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- JO: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- KW: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- LB: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- LY: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- MO: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- MR: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- OM: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- PH: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- PS: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- QA: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- SA: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- SD: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- SY: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- TN: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- YE: [
- "h",
- "hB",
- "hb",
- "H"
- ],
- AF: [
- "H",
- "hb",
- "hB",
- "h"
- ],
- LA: [
- "H",
- "hb",
- "hB",
- "h"
- ],
- CN: [
- "H",
- "hB",
- "hb",
- "h"
- ],
- LV: [
- "H",
- "hB",
- "hb",
- "h"
- ],
- TL: [
- "H",
- "hB",
- "hb",
- "h"
- ],
- "zu-ZA": [
- "H",
- "hB",
- "hb",
- "h"
- ],
- CD: [
- "hB",
- "H"
- ],
- IR: [
- "hB",
- "H"
- ],
- "hi-IN": [
- "hB",
- "h",
- "H"
- ],
- "kn-IN": [
- "hB",
- "h",
- "H"
- ],
- "ml-IN": [
- "hB",
- "h",
- "H"
- ],
- "te-IN": [
- "hB",
- "h",
- "H"
- ],
- KH: [
- "hB",
- "h",
- "H",
- "hb"
- ],
- "ta-IN": [
- "hB",
- "h",
- "hb",
- "H"
- ],
- BN: [
- "hb",
- "hB",
- "h",
- "H"
- ],
- MY: [
- "hb",
- "hB",
- "h",
- "H"
- ],
- ET: [
- "hB",
- "hb",
- "h",
- "H"
- ],
- "gu-IN": [
- "hB",
- "hb",
- "h",
- "H"
- ],
- "mr-IN": [
- "hB",
- "hb",
- "h",
- "H"
- ],
- "pa-IN": [
- "hB",
- "hb",
- "h",
- "H"
- ],
- TW: [
- "hB",
- "hb",
- "h",
- "H"
- ],
- KE: [
- "hB",
- "hb",
- "H",
- "h"
- ],
- MM: [
- "hB",
- "hb",
- "H",
- "h"
- ],
- TZ: [
- "hB",
- "hb",
- "H",
- "h"
- ],
- UG: [
- "hB",
- "hb",
- "H",
- "h"
- ]
-};
-function getBestPattern(t, e) {
- for (var i = "", n = 0; n < t.length; n++) {
- var s = t.charAt(n);
- if (s === "j") {
- for (var l = 0; n + 1 < t.length && t.charAt(n + 1) === s; )
- l++, n++;
- var h = 1 + (l & 1), _ = l < 2 ? 1 : 3 + (l >> 1), c = "a", o = getDefaultHourSymbolFromLocale(e);
- for ((o == "H" || o == "k") && (_ = 0); _-- > 0; )
- i += c;
- for (; h-- > 0; )
- i = o + i;
- } else
- s === "J" ? i += "H" : i += s;
- }
- return i;
-}
-function getDefaultHourSymbolFromLocale(t) {
- var e = t.hourCycle;
- if (e === void 0 && // @ts-ignore hourCycle(s) is not identified yet
- t.hourCycles && // @ts-ignore
- t.hourCycles.length && (e = t.hourCycles[0]), e)
- switch (e) {
- case "h24":
- return "k";
- case "h23":
- return "H";
- case "h12":
- return "h";
- case "h11":
- return "K";
- default:
- throw new Error("Invalid hourCycle");
- }
- var i = t.language, n;
- i !== "root" && (n = t.maximize().region);
- var s = timeData[n || ""] || timeData[i || ""] || timeData["".concat(i, "-001")] || timeData["001"];
- return s[0];
-}
-var _a, SPACE_SEPARATOR_START_REGEX = new RegExp("^".concat(SPACE_SEPARATOR_REGEX.source, "*")), SPACE_SEPARATOR_END_REGEX = new RegExp("".concat(SPACE_SEPARATOR_REGEX.source, "*$"));
-function createLocation(t, e) {
- return { start: t, end: e };
-}
-var hasNativeStartsWith = !!String.prototype.startsWith, hasNativeFromCodePoint = !!String.fromCodePoint, hasNativeFromEntries = !!Object.fromEntries, hasNativeCodePointAt = !!String.prototype.codePointAt, hasTrimStart = !!String.prototype.trimStart, hasTrimEnd = !!String.prototype.trimEnd, hasNativeIsSafeInteger = !!Number.isSafeInteger, isSafeInteger = hasNativeIsSafeInteger ? Number.isSafeInteger : function(t) {
- return typeof t == "number" && isFinite(t) && Math.floor(t) === t && Math.abs(t) <= 9007199254740991;
-}, REGEX_SUPPORTS_U_AND_Y = !0;
-try {
- var re = RE("([^\\p{White_Space}\\p{Pattern_Syntax}]*)", "yu");
- REGEX_SUPPORTS_U_AND_Y = ((_a = re.exec("a")) === null || _a === void 0 ? void 0 : _a[0]) === "a";
-} catch {
- REGEX_SUPPORTS_U_AND_Y = !1;
-}
-var startsWith = hasNativeStartsWith ? (
- // Native
- function(e, i, n) {
- return e.startsWith(i, n);
- }
-) : (
- // For IE11
- function(e, i, n) {
- return e.slice(n, n + i.length) === i;
- }
-), fromCodePoint = hasNativeFromCodePoint ? String.fromCodePoint : (
- // IE11
- function() {
- for (var e = [], i = 0; i < arguments.length; i++)
- e[i] = arguments[i];
- for (var n = "", s = e.length, l = 0, h; s > l; ) {
- if (h = e[l++], h > 1114111)
- throw RangeError(h + " is not a valid code point");
- n += h < 65536 ? String.fromCharCode(h) : String.fromCharCode(((h -= 65536) >> 10) + 55296, h % 1024 + 56320);
- }
- return n;
- }
-), fromEntries = (
- // native
- hasNativeFromEntries ? Object.fromEntries : (
- // Ponyfill
- function(e) {
- for (var i = {}, n = 0, s = e; n < s.length; n++) {
- var l = s[n], h = l[0], _ = l[1];
- i[h] = _;
- }
- return i;
- }
- )
-), codePointAt = hasNativeCodePointAt ? (
- // Native
- function(e, i) {
- return e.codePointAt(i);
- }
-) : (
- // IE 11
- function(e, i) {
- var n = e.length;
- if (!(i < 0 || i >= n)) {
- var s = e.charCodeAt(i), l;
- return s < 55296 || s > 56319 || i + 1 === n || (l = e.charCodeAt(i + 1)) < 56320 || l > 57343 ? s : (s - 55296 << 10) + (l - 56320) + 65536;
- }
- }
-), trimStart = hasTrimStart ? (
- // Native
- function(e) {
- return e.trimStart();
- }
-) : (
- // Ponyfill
- function(e) {
- return e.replace(SPACE_SEPARATOR_START_REGEX, "");
- }
-), trimEnd = hasTrimEnd ? (
- // Native
- function(e) {
- return e.trimEnd();
- }
-) : (
- // Ponyfill
- function(e) {
- return e.replace(SPACE_SEPARATOR_END_REGEX, "");
- }
-);
-function RE(t, e) {
- return new RegExp(t, e);
-}
-var matchIdentifierAtIndex;
-if (REGEX_SUPPORTS_U_AND_Y) {
- var IDENTIFIER_PREFIX_RE_1 = RE("([^\\p{White_Space}\\p{Pattern_Syntax}]*)", "yu");
- matchIdentifierAtIndex = function(e, i) {
- var n;
- IDENTIFIER_PREFIX_RE_1.lastIndex = i;
- var s = IDENTIFIER_PREFIX_RE_1.exec(e);
- return (n = s[1]) !== null && n !== void 0 ? n : "";
- };
-} else
- matchIdentifierAtIndex = function(e, i) {
- for (var n = []; ; ) {
- var s = codePointAt(e, i);
- if (s === void 0 || _isWhiteSpace(s) || _isPatternSyntax(s))
- break;
- n.push(s), i += s >= 65536 ? 2 : 1;
- }
- return fromCodePoint.apply(void 0, n);
- };
-var Parser = (
- /** @class */
- function() {
- function t(e, i) {
- i === void 0 && (i = {}), this.message = e, this.position = { offset: 0, line: 1, column: 1 }, this.ignoreTag = !!i.ignoreTag, this.locale = i.locale, this.requiresOtherClause = !!i.requiresOtherClause, this.shouldParseSkeletons = !!i.shouldParseSkeletons;
- }
- return t.prototype.parse = function() {
- if (this.offset() !== 0)
- throw Error("parser can only be used once");
- return this.parseMessage(0, "", !1);
- }, t.prototype.parseMessage = function(e, i, n) {
- for (var s = []; !this.isEOF(); ) {
- var l = this.char();
- if (l === 123) {
- var h = this.parseArgument(e, n);
- if (h.err)
- return h;
- s.push(h.val);
- } else {
- if (l === 125 && e > 0)
- break;
- if (l === 35 && (i === "plural" || i === "selectordinal")) {
- var _ = this.clonePosition();
- this.bump(), s.push({
- type: TYPE.pound,
- location: createLocation(_, this.clonePosition())
- });
- } else if (l === 60 && !this.ignoreTag && this.peek() === 47) {
- if (n)
- break;
- return this.error(ErrorKind.UNMATCHED_CLOSING_TAG, createLocation(this.clonePosition(), this.clonePosition()));
- } else if (l === 60 && !this.ignoreTag && _isAlpha(this.peek() || 0)) {
- var h = this.parseTag(e, i);
- if (h.err)
- return h;
- s.push(h.val);
- } else {
- var h = this.parseLiteral(e, i);
- if (h.err)
- return h;
- s.push(h.val);
- }
- }
- }
- return { val: s, err: null };
- }, t.prototype.parseTag = function(e, i) {
- var n = this.clonePosition();
- this.bump();
- var s = this.parseTagName();
- if (this.bumpSpace(), this.bumpIf("/>"))
- return {
- val: {
- type: TYPE.literal,
- value: "<".concat(s, "/>"),
- location: createLocation(n, this.clonePosition())
- },
- err: null
- };
- if (this.bumpIf(">")) {
- var l = this.parseMessage(e + 1, i, !0);
- if (l.err)
- return l;
- var h = l.val, _ = this.clonePosition();
- if (this.bumpIf("")) {
- if (this.isEOF() || !_isAlpha(this.char()))
- return this.error(ErrorKind.INVALID_TAG, createLocation(_, this.clonePosition()));
- var c = this.clonePosition(), o = this.parseTagName();
- return s !== o ? this.error(ErrorKind.UNMATCHED_CLOSING_TAG, createLocation(c, this.clonePosition())) : (this.bumpSpace(), this.bumpIf(">") ? {
- val: {
- type: TYPE.tag,
- value: s,
- children: h,
- location: createLocation(n, this.clonePosition())
- },
- err: null
- } : this.error(ErrorKind.INVALID_TAG, createLocation(_, this.clonePosition())));
- } else
- return this.error(ErrorKind.UNCLOSED_TAG, createLocation(n, this.clonePosition()));
- } else
- return this.error(ErrorKind.INVALID_TAG, createLocation(n, this.clonePosition()));
- }, t.prototype.parseTagName = function() {
- var e = this.offset();
- for (this.bump(); !this.isEOF() && _isPotentialElementNameChar(this.char()); )
- this.bump();
- return this.message.slice(e, this.offset());
- }, t.prototype.parseLiteral = function(e, i) {
- for (var n = this.clonePosition(), s = ""; ; ) {
- var l = this.tryParseQuote(i);
- if (l) {
- s += l;
- continue;
- }
- var h = this.tryParseUnquoted(e, i);
- if (h) {
- s += h;
- continue;
- }
- var _ = this.tryParseLeftAngleBracket();
- if (_) {
- s += _;
- continue;
- }
- break;
- }
- var c = createLocation(n, this.clonePosition());
- return {
- val: { type: TYPE.literal, value: s, location: c },
- err: null
- };
- }, t.prototype.tryParseLeftAngleBracket = function() {
- return !this.isEOF() && this.char() === 60 && (this.ignoreTag || // If at the opening tag or closing tag position, bail.
- !_isAlphaOrSlash(this.peek() || 0)) ? (this.bump(), "<") : null;
- }, t.prototype.tryParseQuote = function(e) {
- if (this.isEOF() || this.char() !== 39)
- return null;
- switch (this.peek()) {
- case 39:
- return this.bump(), this.bump(), "'";
- case 123:
- case 60:
- case 62:
- case 125:
- break;
- case 35:
- if (e === "plural" || e === "selectordinal")
- break;
- return null;
- default:
- return null;
- }
- this.bump();
- var i = [this.char()];
- for (this.bump(); !this.isEOF(); ) {
- var n = this.char();
- if (n === 39)
- if (this.peek() === 39)
- i.push(39), this.bump();
- else {
- this.bump();
- break;
- }
- else
- i.push(n);
- this.bump();
- }
- return fromCodePoint.apply(void 0, i);
- }, t.prototype.tryParseUnquoted = function(e, i) {
- if (this.isEOF())
- return null;
- var n = this.char();
- return n === 60 || n === 123 || n === 35 && (i === "plural" || i === "selectordinal") || n === 125 && e > 0 ? null : (this.bump(), fromCodePoint(n));
- }, t.prototype.parseArgument = function(e, i) {
- var n = this.clonePosition();
- if (this.bump(), this.bumpSpace(), this.isEOF())
- return this.error(ErrorKind.EXPECT_ARGUMENT_CLOSING_BRACE, createLocation(n, this.clonePosition()));
- if (this.char() === 125)
- return this.bump(), this.error(ErrorKind.EMPTY_ARGUMENT, createLocation(n, this.clonePosition()));
- var s = this.parseIdentifierIfPossible().value;
- if (!s)
- return this.error(ErrorKind.MALFORMED_ARGUMENT, createLocation(n, this.clonePosition()));
- if (this.bumpSpace(), this.isEOF())
- return this.error(ErrorKind.EXPECT_ARGUMENT_CLOSING_BRACE, createLocation(n, this.clonePosition()));
- switch (this.char()) {
- case 125:
- return this.bump(), {
- val: {
- type: TYPE.argument,
- // value does not include the opening and closing braces.
- value: s,
- location: createLocation(n, this.clonePosition())
- },
- err: null
- };
- case 44:
- return this.bump(), this.bumpSpace(), this.isEOF() ? this.error(ErrorKind.EXPECT_ARGUMENT_CLOSING_BRACE, createLocation(n, this.clonePosition())) : this.parseArgumentOptions(e, i, s, n);
- default:
- return this.error(ErrorKind.MALFORMED_ARGUMENT, createLocation(n, this.clonePosition()));
- }
- }, t.prototype.parseIdentifierIfPossible = function() {
- var e = this.clonePosition(), i = this.offset(), n = matchIdentifierAtIndex(this.message, i), s = i + n.length;
- this.bumpTo(s);
- var l = this.clonePosition(), h = createLocation(e, l);
- return { value: n, location: h };
- }, t.prototype.parseArgumentOptions = function(e, i, n, s) {
- var l, h = this.clonePosition(), _ = this.parseIdentifierIfPossible().value, c = this.clonePosition();
- switch (_) {
- case "":
- return this.error(ErrorKind.EXPECT_ARGUMENT_TYPE, createLocation(h, c));
- case "number":
- case "date":
- case "time": {
- this.bumpSpace();
- var o = null;
- if (this.bumpIf(",")) {
- this.bumpSpace();
- var r = this.clonePosition(), T = this.parseSimpleArgStyleIfPossible();
- if (T.err)
- return T;
- var S = trimEnd(T.val);
- if (S.length === 0)
- return this.error(ErrorKind.EXPECT_ARGUMENT_STYLE, createLocation(this.clonePosition(), this.clonePosition()));
- var w = createLocation(r, this.clonePosition());
- o = { style: S, styleLocation: w };
- }
- var C = this.tryParseArgumentClose(s);
- if (C.err)
- return C;
- var P = createLocation(s, this.clonePosition());
- if (o && startsWith(o == null ? void 0 : o.style, "::", 0)) {
- var b = trimStart(o.style.slice(2));
- if (_ === "number") {
- var T = this.parseNumberSkeletonFromString(b, o.styleLocation);
- return T.err ? T : {
- val: { type: TYPE.number, value: n, location: P, style: T.val },
- err: null
- };
- } else {
- if (b.length === 0)
- return this.error(ErrorKind.EXPECT_DATE_TIME_SKELETON, P);
- var k = b;
- this.locale && (k = getBestPattern(b, this.locale));
- var S = {
- type: SKELETON_TYPE.dateTime,
- pattern: k,
- location: o.styleLocation,
- parsedOptions: this.shouldParseSkeletons ? parseDateTimeSkeleton(k) : {}
- }, F = _ === "date" ? TYPE.date : TYPE.time;
- return {
- val: { type: F, value: n, location: P, style: S },
- err: null
- };
- }
- }
- return {
- val: {
- type: _ === "number" ? TYPE.number : _ === "date" ? TYPE.date : TYPE.time,
- value: n,
- location: P,
- style: (l = o == null ? void 0 : o.style) !== null && l !== void 0 ? l : null
- },
- err: null
- };
- }
- case "plural":
- case "selectordinal":
- case "select": {
- var x = this.clonePosition();
- if (this.bumpSpace(), !this.bumpIf(","))
- return this.error(ErrorKind.EXPECT_SELECT_ARGUMENT_OPTIONS, createLocation(x, __assign({}, x)));
- this.bumpSpace();
- var y = this.parseIdentifierIfPossible(), p = 0;
- if (_ !== "select" && y.value === "offset") {
- if (!this.bumpIf(":"))
- return this.error(ErrorKind.EXPECT_PLURAL_ARGUMENT_OFFSET_VALUE, createLocation(this.clonePosition(), this.clonePosition()));
- this.bumpSpace();
- var T = this.tryParseDecimalInteger(ErrorKind.EXPECT_PLURAL_ARGUMENT_OFFSET_VALUE, ErrorKind.INVALID_PLURAL_ARGUMENT_OFFSET_VALUE);
- if (T.err)
- return T;
- this.bumpSpace(), y = this.parseIdentifierIfPossible(), p = T.val;
- }
- var E = this.tryParsePluralOrSelectOptions(e, _, i, y);
- if (E.err)
- return E;
- var C = this.tryParseArgumentClose(s);
- if (C.err)
- return C;
- var $ = createLocation(s, this.clonePosition());
- return _ === "select" ? {
- val: {
- type: TYPE.select,
- value: n,
- options: fromEntries(E.val),
- location: $
- },
- err: null
- } : {
- val: {
- type: TYPE.plural,
- value: n,
- options: fromEntries(E.val),
- offset: p,
- pluralType: _ === "plural" ? "cardinal" : "ordinal",
- location: $
- },
- err: null
- };
- }
- default:
- return this.error(ErrorKind.INVALID_ARGUMENT_TYPE, createLocation(h, c));
- }
- }, t.prototype.tryParseArgumentClose = function(e) {
- return this.isEOF() || this.char() !== 125 ? this.error(ErrorKind.EXPECT_ARGUMENT_CLOSING_BRACE, createLocation(e, this.clonePosition())) : (this.bump(), { val: !0, err: null });
- }, t.prototype.parseSimpleArgStyleIfPossible = function() {
- for (var e = 0, i = this.clonePosition(); !this.isEOF(); ) {
- var n = this.char();
- switch (n) {
- case 39: {
- this.bump();
- var s = this.clonePosition();
- if (!this.bumpUntil("'"))
- return this.error(ErrorKind.UNCLOSED_QUOTE_IN_ARGUMENT_STYLE, createLocation(s, this.clonePosition()));
- this.bump();
- break;
- }
- case 123: {
- e += 1, this.bump();
- break;
- }
- case 125: {
- if (e > 0)
- e -= 1;
- else
- return {
- val: this.message.slice(i.offset, this.offset()),
- err: null
- };
- break;
- }
- default:
- this.bump();
- break;
- }
- }
- return {
- val: this.message.slice(i.offset, this.offset()),
- err: null
- };
- }, t.prototype.parseNumberSkeletonFromString = function(e, i) {
- var n = [];
- try {
- n = parseNumberSkeletonFromString(e);
- } catch {
- return this.error(ErrorKind.INVALID_NUMBER_SKELETON, i);
- }
- return {
- val: {
- type: SKELETON_TYPE.number,
- tokens: n,
- location: i,
- parsedOptions: this.shouldParseSkeletons ? parseNumberSkeleton(n) : {}
- },
- err: null
- };
- }, t.prototype.tryParsePluralOrSelectOptions = function(e, i, n, s) {
- for (var l, h = !1, _ = [], c = /* @__PURE__ */ new Set(), o = s.value, r = s.location; ; ) {
- if (o.length === 0) {
- var T = this.clonePosition();
- if (i !== "select" && this.bumpIf("=")) {
- var S = this.tryParseDecimalInteger(ErrorKind.EXPECT_PLURAL_ARGUMENT_SELECTOR, ErrorKind.INVALID_PLURAL_ARGUMENT_SELECTOR);
- if (S.err)
- return S;
- r = createLocation(T, this.clonePosition()), o = this.message.slice(T.offset, this.offset());
- } else
- break;
- }
- if (c.has(o))
- return this.error(i === "select" ? ErrorKind.DUPLICATE_SELECT_ARGUMENT_SELECTOR : ErrorKind.DUPLICATE_PLURAL_ARGUMENT_SELECTOR, r);
- o === "other" && (h = !0), this.bumpSpace();
- var w = this.clonePosition();
- if (!this.bumpIf("{"))
- return this.error(i === "select" ? ErrorKind.EXPECT_SELECT_ARGUMENT_SELECTOR_FRAGMENT : ErrorKind.EXPECT_PLURAL_ARGUMENT_SELECTOR_FRAGMENT, createLocation(this.clonePosition(), this.clonePosition()));
- var C = this.parseMessage(e + 1, i, n);
- if (C.err)
- return C;
- var P = this.tryParseArgumentClose(w);
- if (P.err)
- return P;
- _.push([
- o,
- {
- value: C.val,
- location: createLocation(w, this.clonePosition())
- }
- ]), c.add(o), this.bumpSpace(), l = this.parseIdentifierIfPossible(), o = l.value, r = l.location;
- }
- return _.length === 0 ? this.error(i === "select" ? ErrorKind.EXPECT_SELECT_ARGUMENT_SELECTOR : ErrorKind.EXPECT_PLURAL_ARGUMENT_SELECTOR, createLocation(this.clonePosition(), this.clonePosition())) : this.requiresOtherClause && !h ? this.error(ErrorKind.MISSING_OTHER_CLAUSE, createLocation(this.clonePosition(), this.clonePosition())) : { val: _, err: null };
- }, t.prototype.tryParseDecimalInteger = function(e, i) {
- var n = 1, s = this.clonePosition();
- this.bumpIf("+") || this.bumpIf("-") && (n = -1);
- for (var l = !1, h = 0; !this.isEOF(); ) {
- var _ = this.char();
- if (_ >= 48 && _ <= 57)
- l = !0, h = h * 10 + (_ - 48), this.bump();
- else
- break;
- }
- var c = createLocation(s, this.clonePosition());
- return l ? (h *= n, isSafeInteger(h) ? { val: h, err: null } : this.error(i, c)) : this.error(e, c);
- }, t.prototype.offset = function() {
- return this.position.offset;
- }, t.prototype.isEOF = function() {
- return this.offset() === this.message.length;
- }, t.prototype.clonePosition = function() {
- return {
- offset: this.position.offset,
- line: this.position.line,
- column: this.position.column
- };
- }, t.prototype.char = function() {
- var e = this.position.offset;
- if (e >= this.message.length)
- throw Error("out of bound");
- var i = codePointAt(this.message, e);
- if (i === void 0)
- throw Error("Offset ".concat(e, " is at invalid UTF-16 code unit boundary"));
- return i;
- }, t.prototype.error = function(e, i) {
- return {
- val: null,
- err: {
- kind: e,
- message: this.message,
- location: i
- }
- };
- }, t.prototype.bump = function() {
- if (!this.isEOF()) {
- var e = this.char();
- e === 10 ? (this.position.line += 1, this.position.column = 1, this.position.offset += 1) : (this.position.column += 1, this.position.offset += e < 65536 ? 1 : 2);
- }
- }, t.prototype.bumpIf = function(e) {
- if (startsWith(this.message, e, this.offset())) {
- for (var i = 0; i < e.length; i++)
- this.bump();
- return !0;
- }
- return !1;
- }, t.prototype.bumpUntil = function(e) {
- var i = this.offset(), n = this.message.indexOf(e, i);
- return n >= 0 ? (this.bumpTo(n), !0) : (this.bumpTo(this.message.length), !1);
- }, t.prototype.bumpTo = function(e) {
- if (this.offset() > e)
- throw Error("targetOffset ".concat(e, " must be greater than or equal to the current offset ").concat(this.offset()));
- for (e = Math.min(e, this.message.length); ; ) {
- var i = this.offset();
- if (i === e)
- break;
- if (i > e)
- throw Error("targetOffset ".concat(e, " is at invalid UTF-16 code unit boundary"));
- if (this.bump(), this.isEOF())
- break;
- }
- }, t.prototype.bumpSpace = function() {
- for (; !this.isEOF() && _isWhiteSpace(this.char()); )
- this.bump();
- }, t.prototype.peek = function() {
- if (this.isEOF())
- return null;
- var e = this.char(), i = this.offset(), n = this.message.charCodeAt(i + (e >= 65536 ? 2 : 1));
- return n ?? null;
- }, t;
- }()
-);
-function _isAlpha(t) {
- return t >= 97 && t <= 122 || t >= 65 && t <= 90;
-}
-function _isAlphaOrSlash(t) {
- return _isAlpha(t) || t === 47;
-}
-function _isPotentialElementNameChar(t) {
- return t === 45 || t === 46 || t >= 48 && t <= 57 || t === 95 || t >= 97 && t <= 122 || t >= 65 && t <= 90 || t == 183 || t >= 192 && t <= 214 || t >= 216 && t <= 246 || t >= 248 && t <= 893 || t >= 895 && t <= 8191 || t >= 8204 && t <= 8205 || t >= 8255 && t <= 8256 || t >= 8304 && t <= 8591 || t >= 11264 && t <= 12271 || t >= 12289 && t <= 55295 || t >= 63744 && t <= 64975 || t >= 65008 && t <= 65533 || t >= 65536 && t <= 983039;
-}
-function _isWhiteSpace(t) {
- return t >= 9 && t <= 13 || t === 32 || t === 133 || t >= 8206 && t <= 8207 || t === 8232 || t === 8233;
-}
-function _isPatternSyntax(t) {
- return t >= 33 && t <= 35 || t === 36 || t >= 37 && t <= 39 || t === 40 || t === 41 || t === 42 || t === 43 || t === 44 || t === 45 || t >= 46 && t <= 47 || t >= 58 && t <= 59 || t >= 60 && t <= 62 || t >= 63 && t <= 64 || t === 91 || t === 92 || t === 93 || t === 94 || t === 96 || t === 123 || t === 124 || t === 125 || t === 126 || t === 161 || t >= 162 && t <= 165 || t === 166 || t === 167 || t === 169 || t === 171 || t === 172 || t === 174 || t === 176 || t === 177 || t === 182 || t === 187 || t === 191 || t === 215 || t === 247 || t >= 8208 && t <= 8213 || t >= 8214 && t <= 8215 || t === 8216 || t === 8217 || t === 8218 || t >= 8219 && t <= 8220 || t === 8221 || t === 8222 || t === 8223 || t >= 8224 && t <= 8231 || t >= 8240 && t <= 8248 || t === 8249 || t === 8250 || t >= 8251 && t <= 8254 || t >= 8257 && t <= 8259 || t === 8260 || t === 8261 || t === 8262 || t >= 8263 && t <= 8273 || t === 8274 || t === 8275 || t >= 8277 && t <= 8286 || t >= 8592 && t <= 8596 || t >= 8597 && t <= 8601 || t >= 8602 && t <= 8603 || t >= 8604 && t <= 8607 || t === 8608 || t >= 8609 && t <= 8610 || t === 8611 || t >= 8612 && t <= 8613 || t === 8614 || t >= 8615 && t <= 8621 || t === 8622 || t >= 8623 && t <= 8653 || t >= 8654 && t <= 8655 || t >= 8656 && t <= 8657 || t === 8658 || t === 8659 || t === 8660 || t >= 8661 && t <= 8691 || t >= 8692 && t <= 8959 || t >= 8960 && t <= 8967 || t === 8968 || t === 8969 || t === 8970 || t === 8971 || t >= 8972 && t <= 8991 || t >= 8992 && t <= 8993 || t >= 8994 && t <= 9e3 || t === 9001 || t === 9002 || t >= 9003 && t <= 9083 || t === 9084 || t >= 9085 && t <= 9114 || t >= 9115 && t <= 9139 || t >= 9140 && t <= 9179 || t >= 9180 && t <= 9185 || t >= 9186 && t <= 9254 || t >= 9255 && t <= 9279 || t >= 9280 && t <= 9290 || t >= 9291 && t <= 9311 || t >= 9472 && t <= 9654 || t === 9655 || t >= 9656 && t <= 9664 || t === 9665 || t >= 9666 && t <= 9719 || t >= 9720 && t <= 9727 || t >= 9728 && t <= 9838 || t === 9839 || t >= 9840 && t <= 10087 || t === 10088 || t === 10089 || t === 10090 || t === 10091 || t === 10092 || t === 10093 || t === 10094 || t === 10095 || t === 10096 || t === 10097 || t === 10098 || t === 10099 || t === 10100 || t === 10101 || t >= 10132 && t <= 10175 || t >= 10176 && t <= 10180 || t === 10181 || t === 10182 || t >= 10183 && t <= 10213 || t === 10214 || t === 10215 || t === 10216 || t === 10217 || t === 10218 || t === 10219 || t === 10220 || t === 10221 || t === 10222 || t === 10223 || t >= 10224 && t <= 10239 || t >= 10240 && t <= 10495 || t >= 10496 && t <= 10626 || t === 10627 || t === 10628 || t === 10629 || t === 10630 || t === 10631 || t === 10632 || t === 10633 || t === 10634 || t === 10635 || t === 10636 || t === 10637 || t === 10638 || t === 10639 || t === 10640 || t === 10641 || t === 10642 || t === 10643 || t === 10644 || t === 10645 || t === 10646 || t === 10647 || t === 10648 || t >= 10649 && t <= 10711 || t === 10712 || t === 10713 || t === 10714 || t === 10715 || t >= 10716 && t <= 10747 || t === 10748 || t === 10749 || t >= 10750 && t <= 11007 || t >= 11008 && t <= 11055 || t >= 11056 && t <= 11076 || t >= 11077 && t <= 11078 || t >= 11079 && t <= 11084 || t >= 11085 && t <= 11123 || t >= 11124 && t <= 11125 || t >= 11126 && t <= 11157 || t === 11158 || t >= 11159 && t <= 11263 || t >= 11776 && t <= 11777 || t === 11778 || t === 11779 || t === 11780 || t === 11781 || t >= 11782 && t <= 11784 || t === 11785 || t === 11786 || t === 11787 || t === 11788 || t === 11789 || t >= 11790 && t <= 11798 || t === 11799 || t >= 11800 && t <= 11801 || t === 11802 || t === 11803 || t === 11804 || t === 11805 || t >= 11806 && t <= 11807 || t === 11808 || t === 11809 || t === 11810 || t === 11811 || t === 11812 || t === 11813 || t === 11814 || t === 11815 || t === 11816 || t === 11817 || t >= 11818 && t <= 11822 || t === 11823 || t >= 11824 && t <= 11833 || t >= 11834 && t <= 11835 || t >= 11836 && t <= 11839 || t === 11840 || t === 11841 || t === 11842 || t >= 11843 && t <= 11855 || t >= 11856 && t <= 11857 || t === 11858 || t >= 11859 && t <= 11903 || t >= 12289 && t <= 12291 || t === 12296 || t === 12297 || t === 12298 || t === 12299 || t === 12300 || t === 12301 || t === 12302 || t === 12303 || t === 12304 || t === 12305 || t >= 12306 && t <= 12307 || t === 12308 || t === 12309 || t === 12310 || t === 12311 || t === 12312 || t === 12313 || t === 12314 || t === 12315 || t === 12316 || t === 12317 || t >= 12318 && t <= 12319 || t === 12320 || t === 12336 || t === 64830 || t === 64831 || t >= 65093 && t <= 65094;
-}
-function pruneLocation(t) {
- t.forEach(function(e) {
- if (delete e.location, isSelectElement(e) || isPluralElement(e))
- for (var i in e.options)
- delete e.options[i].location, pruneLocation(e.options[i].value);
- else
- isNumberElement(e) && isNumberSkeleton(e.style) || (isDateElement(e) || isTimeElement(e)) && isDateTimeSkeleton(e.style) ? delete e.style.location : isTagElement(e) && pruneLocation(e.children);
- });
-}
-function parse(t, e) {
- e === void 0 && (e = {}), e = __assign({ shouldParseSkeletons: !0, requiresOtherClause: !0 }, e);
- var i = new Parser(t, e).parse();
- if (i.err) {
- var n = SyntaxError(ErrorKind[i.err.kind]);
- throw n.location = i.err.location, n.originalMessage = i.err.message, n;
- }
- return e != null && e.captureLocation || pruneLocation(i.val), i.val;
-}
-function memoize(t, e) {
- var i = e && e.cache ? e.cache : cacheDefault, n = e && e.serializer ? e.serializer : serializerDefault, s = e && e.strategy ? e.strategy : strategyDefault;
- return s(t, {
- cache: i,
- serializer: n
- });
-}
-function isPrimitive(t) {
- return t == null || typeof t == "number" || typeof t == "boolean";
-}
-function monadic(t, e, i, n) {
- var s = isPrimitive(n) ? n : i(n), l = e.get(s);
- return typeof l > "u" && (l = t.call(this, n), e.set(s, l)), l;
-}
-function variadic(t, e, i) {
- var n = Array.prototype.slice.call(arguments, 3), s = i(n), l = e.get(s);
- return typeof l > "u" && (l = t.apply(this, n), e.set(s, l)), l;
-}
-function assemble(t, e, i, n, s) {
- return i.bind(e, t, n, s);
-}
-function strategyDefault(t, e) {
- var i = t.length === 1 ? monadic : variadic;
- return assemble(t, this, i, e.cache.create(), e.serializer);
-}
-function strategyVariadic(t, e) {
- return assemble(t, this, variadic, e.cache.create(), e.serializer);
-}
-function strategyMonadic(t, e) {
- return assemble(t, this, monadic, e.cache.create(), e.serializer);
-}
-var serializerDefault = function() {
- return JSON.stringify(arguments);
-};
-function ObjectWithoutPrototypeCache() {
- this.cache = /* @__PURE__ */ Object.create(null);
-}
-ObjectWithoutPrototypeCache.prototype.get = function(t) {
- return this.cache[t];
-};
-ObjectWithoutPrototypeCache.prototype.set = function(t, e) {
- this.cache[t] = e;
-};
-var cacheDefault = {
- create: function() {
- return new ObjectWithoutPrototypeCache();
- }
-}, strategies = {
- variadic: strategyVariadic,
- monadic: strategyMonadic
-}, ErrorCode;
-(function(t) {
- t.MISSING_VALUE = "MISSING_VALUE", t.INVALID_VALUE = "INVALID_VALUE", t.MISSING_INTL_API = "MISSING_INTL_API";
-})(ErrorCode || (ErrorCode = {}));
-var FormatError = (
- /** @class */
- function(t) {
- __extends(e, t);
- function e(i, n, s) {
- var l = t.call(this, i) || this;
- return l.code = n, l.originalMessage = s, l;
- }
- return e.prototype.toString = function() {
- return "[formatjs Error: ".concat(this.code, "] ").concat(this.message);
- }, e;
- }(Error)
-), InvalidValueError = (
- /** @class */
- function(t) {
- __extends(e, t);
- function e(i, n, s, l) {
- return t.call(this, 'Invalid values for "'.concat(i, '": "').concat(n, '". Options are "').concat(Object.keys(s).join('", "'), '"'), ErrorCode.INVALID_VALUE, l) || this;
- }
- return e;
- }(FormatError)
-), InvalidValueTypeError = (
- /** @class */
- function(t) {
- __extends(e, t);
- function e(i, n, s) {
- return t.call(this, 'Value for "'.concat(i, '" must be of type ').concat(n), ErrorCode.INVALID_VALUE, s) || this;
- }
- return e;
- }(FormatError)
-), MissingValueError = (
- /** @class */
- function(t) {
- __extends(e, t);
- function e(i, n) {
- return t.call(this, 'The intl string context variable "'.concat(i, '" was not provided to the string "').concat(n, '"'), ErrorCode.MISSING_VALUE, n) || this;
- }
- return e;
- }(FormatError)
-), PART_TYPE;
-(function(t) {
- t[t.literal = 0] = "literal", t[t.object = 1] = "object";
-})(PART_TYPE || (PART_TYPE = {}));
-function mergeLiteral(t) {
- return t.length < 2 ? t : t.reduce(function(e, i) {
- var n = e[e.length - 1];
- return !n || n.type !== PART_TYPE.literal || i.type !== PART_TYPE.literal ? e.push(i) : n.value += i.value, e;
- }, []);
-}
-function isFormatXMLElementFn(t) {
- return typeof t == "function";
-}
-function formatToParts(t, e, i, n, s, l, h) {
- if (t.length === 1 && isLiteralElement(t[0]))
- return [
- {
- type: PART_TYPE.literal,
- value: t[0].value
- }
- ];
- for (var _ = [], c = 0, o = t; c < o.length; c++) {
- var r = o[c];
- if (isLiteralElement(r)) {
- _.push({
- type: PART_TYPE.literal,
- value: r.value
- });
- continue;
- }
- if (isPoundElement(r)) {
- typeof l == "number" && _.push({
- type: PART_TYPE.literal,
- value: i.getNumberFormat(e).format(l)
- });
- continue;
- }
- var T = r.value;
- if (!(s && T in s))
- throw new MissingValueError(T, h);
- var S = s[T];
- if (isArgumentElement(r)) {
- (!S || typeof S == "string" || typeof S == "number") && (S = typeof S == "string" || typeof S == "number" ? String(S) : ""), _.push({
- type: typeof S == "string" ? PART_TYPE.literal : PART_TYPE.object,
- value: S
- });
- continue;
- }
- if (isDateElement(r)) {
- var w = typeof r.style == "string" ? n.date[r.style] : isDateTimeSkeleton(r.style) ? r.style.parsedOptions : void 0;
- _.push({
- type: PART_TYPE.literal,
- value: i.getDateTimeFormat(e, w).format(S)
- });
- continue;
- }
- if (isTimeElement(r)) {
- var w = typeof r.style == "string" ? n.time[r.style] : isDateTimeSkeleton(r.style) ? r.style.parsedOptions : n.time.medium;
- _.push({
- type: PART_TYPE.literal,
- value: i.getDateTimeFormat(e, w).format(S)
- });
- continue;
- }
- if (isNumberElement(r)) {
- var w = typeof r.style == "string" ? n.number[r.style] : isNumberSkeleton(r.style) ? r.style.parsedOptions : void 0;
- w && w.scale && (S = S * (w.scale || 1)), _.push({
- type: PART_TYPE.literal,
- value: i.getNumberFormat(e, w).format(S)
- });
- continue;
- }
- if (isTagElement(r)) {
- var C = r.children, P = r.value, b = s[P];
- if (!isFormatXMLElementFn(b))
- throw new InvalidValueTypeError(P, "function", h);
- var k = formatToParts(C, e, i, n, s, l), F = b(k.map(function(p) {
- return p.value;
- }));
- Array.isArray(F) || (F = [F]), _.push.apply(_, F.map(function(p) {
- return {
- type: typeof p == "string" ? PART_TYPE.literal : PART_TYPE.object,
- value: p
- };
- }));
- }
- if (isSelectElement(r)) {
- var x = r.options[S] || r.options.other;
- if (!x)
- throw new InvalidValueError(r.value, S, Object.keys(r.options), h);
- _.push.apply(_, formatToParts(x.value, e, i, n, s));
- continue;
- }
- if (isPluralElement(r)) {
- var x = r.options["=".concat(S)];
- if (!x) {
- if (!Intl.PluralRules)
- throw new FormatError(`Intl.PluralRules is not available in this environment.
-Try polyfilling it using "@formatjs/intl-pluralrules"
-`, ErrorCode.MISSING_INTL_API, h);
- var y = i.getPluralRules(e, { type: r.pluralType }).select(S - (r.offset || 0));
- x = r.options[y] || r.options.other;
- }
- if (!x)
- throw new InvalidValueError(r.value, S, Object.keys(r.options), h);
- _.push.apply(_, formatToParts(x.value, e, i, n, s, S - (r.offset || 0)));
- continue;
- }
- }
- return mergeLiteral(_);
-}
-function mergeConfig(t, e) {
- return e ? __assign(__assign(__assign({}, t || {}), e || {}), Object.keys(t).reduce(function(i, n) {
- return i[n] = __assign(__assign({}, t[n]), e[n] || {}), i;
- }, {})) : t;
-}
-function mergeConfigs(t, e) {
- return e ? Object.keys(t).reduce(function(i, n) {
- return i[n] = mergeConfig(t[n], e[n]), i;
- }, __assign({}, t)) : t;
-}
-function createFastMemoizeCache(t) {
- return {
- create: function() {
- return {
- get: function(e) {
- return t[e];
- },
- set: function(e, i) {
- t[e] = i;
- }
- };
- }
- };
-}
-function createDefaultFormatters(t) {
- return t === void 0 && (t = {
- number: {},
- dateTime: {},
- pluralRules: {}
- }), {
- getNumberFormat: memoize(function() {
- for (var e, i = [], n = 0; n < arguments.length; n++)
- i[n] = arguments[n];
- return new ((e = Intl.NumberFormat).bind.apply(e, __spreadArray([void 0], i, !1)))();
- }, {
- cache: createFastMemoizeCache(t.number),
- strategy: strategies.variadic
- }),
- getDateTimeFormat: memoize(function() {
- for (var e, i = [], n = 0; n < arguments.length; n++)
- i[n] = arguments[n];
- return new ((e = Intl.DateTimeFormat).bind.apply(e, __spreadArray([void 0], i, !1)))();
- }, {
- cache: createFastMemoizeCache(t.dateTime),
- strategy: strategies.variadic
- }),
- getPluralRules: memoize(function() {
- for (var e, i = [], n = 0; n < arguments.length; n++)
- i[n] = arguments[n];
- return new ((e = Intl.PluralRules).bind.apply(e, __spreadArray([void 0], i, !1)))();
- }, {
- cache: createFastMemoizeCache(t.pluralRules),
- strategy: strategies.variadic
- })
- };
-}
-var IntlMessageFormat = (
- /** @class */
- function() {
- function t(e, i, n, s) {
- var l = this;
- if (i === void 0 && (i = t.defaultLocale), this.formatterCache = {
- number: {},
- dateTime: {},
- pluralRules: {}
- }, this.format = function(h) {
- var _ = l.formatToParts(h);
- if (_.length === 1)
- return _[0].value;
- var c = _.reduce(function(o, r) {
- return !o.length || r.type !== PART_TYPE.literal || typeof o[o.length - 1] != "string" ? o.push(r.value) : o[o.length - 1] += r.value, o;
- }, []);
- return c.length <= 1 ? c[0] || "" : c;
- }, this.formatToParts = function(h) {
- return formatToParts(l.ast, l.locales, l.formatters, l.formats, h, void 0, l.message);
- }, this.resolvedOptions = function() {
- return {
- locale: l.resolvedLocale.toString()
- };
- }, this.getAst = function() {
- return l.ast;
- }, this.locales = i, this.resolvedLocale = t.resolveLocale(i), typeof e == "string") {
- if (this.message = e, !t.__parse)
- throw new TypeError("IntlMessageFormat.__parse must be set to process `message` of type `string`");
- this.ast = t.__parse(e, {
- ignoreTag: s == null ? void 0 : s.ignoreTag,
- locale: this.resolvedLocale
- });
- } else
- this.ast = e;
- if (!Array.isArray(this.ast))
- throw new TypeError("A message must be provided as a String or AST.");
- this.formats = mergeConfigs(t.formats, n), this.formatters = s && s.formatters || createDefaultFormatters(this.formatterCache);
- }
- return Object.defineProperty(t, "defaultLocale", {
- get: function() {
- return t.memoizedDefaultLocale || (t.memoizedDefaultLocale = new Intl.NumberFormat().resolvedOptions().locale), t.memoizedDefaultLocale;
- },
- enumerable: !1,
- configurable: !0
- }), t.memoizedDefaultLocale = null, t.resolveLocale = function(e) {
- var i = Intl.NumberFormat.supportedLocalesOf(e);
- return i.length > 0 ? new Intl.Locale(i[0]) : new Intl.Locale(typeof e == "string" ? e : e[0]);
- }, t.__parse = parse, t.formats = {
- number: {
- integer: {
- maximumFractionDigits: 0
- },
- currency: {
- style: "currency"
- },
- percent: {
- style: "percent"
- }
- },
- date: {
- short: {
- month: "numeric",
- day: "numeric",
- year: "2-digit"
- },
- medium: {
- month: "short",
- day: "numeric",
- year: "numeric"
- },
- long: {
- month: "long",
- day: "numeric",
- year: "numeric"
- },
- full: {
- weekday: "long",
- month: "long",
- day: "numeric",
- year: "numeric"
- }
- },
- time: {
- short: {
- hour: "numeric",
- minute: "numeric"
- },
- medium: {
- hour: "numeric",
- minute: "numeric",
- second: "numeric"
- },
- long: {
- hour: "numeric",
- minute: "numeric",
- second: "numeric",
- timeZoneName: "short"
- },
- full: {
- hour: "numeric",
- minute: "numeric",
- second: "numeric",
- timeZoneName: "short"
- }
- }
- }, t;
- }()
-);
-function delve(t, e) {
- if (e == null)
- return;
- if (e in t)
- return t[e];
- const i = e.split(".");
- let n = t;
- for (let s = 0; s < i.length; s++)
- if (typeof n == "object") {
- if (s > 0) {
- const l = i.slice(s, i.length).join(".");
- if (l in n) {
- n = n[l];
- break;
- }
- }
- n = n[i[s]];
- } else
- n = void 0;
- return n;
-}
-const lookupCache = {}, addToCache = (t, e, i) => i && (e in lookupCache || (lookupCache[e] = {}), t in lookupCache[e] || (lookupCache[e][t] = i), i), lookup = (t, e) => {
- if (e == null)
- return;
- if (e in lookupCache && t in lookupCache[e])
- return lookupCache[e][t];
- const i = getPossibleLocales(e);
- for (let n = 0; n < i.length; n++) {
- const s = i[n], l = getMessageFromDictionary(s, t);
- if (l)
- return addToCache(t, e, l);
- }
-};
-let dictionary;
-const $dictionary = writable({});
-function getLocaleDictionary(t) {
- return dictionary[t] || null;
-}
-function hasLocaleDictionary(t) {
- return t in dictionary;
-}
-function getMessageFromDictionary(t, e) {
- if (!hasLocaleDictionary(t))
- return null;
- const i = getLocaleDictionary(t);
- return delve(i, e);
-}
-function getClosestAvailableLocale(t) {
- if (t == null)
- return;
- const e = getPossibleLocales(t);
- for (let i = 0; i < e.length; i++) {
- const n = e[i];
- if (hasLocaleDictionary(n))
- return n;
- }
-}
-function addMessages(t, ...e) {
- delete lookupCache[t], $dictionary.update((i) => (i[t] = deepmerge$1.all([i[t] || {}, ...e]), i));
-}
-derived(
- [$dictionary],
- ([t]) => Object.keys(t)
-);
-$dictionary.subscribe((t) => dictionary = t);
-const queue = {};
-function removeLoaderFromQueue(t, e) {
- queue[t].delete(e), queue[t].size === 0 && delete queue[t];
-}
-function getLocaleQueue(t) {
- return queue[t];
-}
-function getLocalesQueues(t) {
- return getPossibleLocales(t).map((e) => {
- const i = getLocaleQueue(e);
- return [e, i ? [...i] : []];
- }).filter(([, e]) => e.length > 0);
-}
-function hasLocaleQueue(t) {
- return t == null ? !1 : getPossibleLocales(t).some(
- (e) => {
- var i;
- return (i = getLocaleQueue(e)) == null ? void 0 : i.size;
- }
- );
-}
-function loadLocaleQueue(t, e) {
- return Promise.all(
- e.map((n) => (removeLoaderFromQueue(t, n), n().then((s) => s.default || s)))
- ).then((n) => addMessages(t, ...n));
-}
-const activeFlushes = {};
-function flush(t) {
- if (!hasLocaleQueue(t))
- return t in activeFlushes ? activeFlushes[t] : Promise.resolve();
- const e = getLocalesQueues(t);
- return activeFlushes[t] = Promise.all(
- e.map(
- ([i, n]) => loadLocaleQueue(i, n)
- )
- ).then(() => {
- if (hasLocaleQueue(t))
- return flush(t);
- delete activeFlushes[t];
- }), activeFlushes[t];
-}
-const defaultFormats = {
- number: {
- scientific: { notation: "scientific" },
- engineering: { notation: "engineering" },
- compactLong: { notation: "compact", compactDisplay: "long" },
- compactShort: { notation: "compact", compactDisplay: "short" }
- },
- date: {
- short: { month: "numeric", day: "numeric", year: "2-digit" },
- medium: { month: "short", day: "numeric", year: "numeric" },
- long: { month: "long", day: "numeric", year: "numeric" },
- full: { weekday: "long", month: "long", day: "numeric", year: "numeric" }
- },
- time: {
- short: { hour: "numeric", minute: "numeric" },
- medium: { hour: "numeric", minute: "numeric", second: "numeric" },
- long: {
- hour: "numeric",
- minute: "numeric",
- second: "numeric",
- timeZoneName: "short"
- },
- full: {
- hour: "numeric",
- minute: "numeric",
- second: "numeric",
- timeZoneName: "short"
- }
- }
-}, defaultOptions = {
- fallbackLocale: null,
- loadingDelay: 200,
- formats: defaultFormats,
- warnOnMissingMessages: !0,
- handleMissingMessage: void 0,
- ignoreTag: !0
-}, options = defaultOptions;
-function getOptions() {
- return options;
-}
-const $isLoading = writable(!1);
-var __defProp$1 = Object.defineProperty, __defProps = Object.defineProperties, __getOwnPropDescs = Object.getOwnPropertyDescriptors, __getOwnPropSymbols$1 = Object.getOwnPropertySymbols, __hasOwnProp$1 = Object.prototype.hasOwnProperty, __propIsEnum$1 = Object.prototype.propertyIsEnumerable, __defNormalProp$1 = (t, e, i) => e in t ? __defProp$1(t, e, { enumerable: !0, configurable: !0, writable: !0, value: i }) : t[e] = i, __spreadValues$1 = (t, e) => {
- for (var i in e || (e = {}))
- __hasOwnProp$1.call(e, i) && __defNormalProp$1(t, i, e[i]);
- if (__getOwnPropSymbols$1)
- for (var i of __getOwnPropSymbols$1(e))
- __propIsEnum$1.call(e, i) && __defNormalProp$1(t, i, e[i]);
- return t;
-}, __spreadProps = (t, e) => __defProps(t, __getOwnPropDescs(e));
-let current;
-const internalLocale = writable(null);
-function getSubLocales(t) {
- return t.split("-").map((e, i, n) => n.slice(0, i + 1).join("-")).reverse();
-}
-function getPossibleLocales(t, e = getOptions().fallbackLocale) {
- const i = getSubLocales(t);
- return e ? [.../* @__PURE__ */ new Set([...i, ...getSubLocales(e)])] : i;
-}
-function getCurrentLocale() {
- return current ?? void 0;
-}
-internalLocale.subscribe((t) => {
- current = t ?? void 0, typeof window < "u" && t != null && document.documentElement.setAttribute("lang", t);
-});
-const set = (t) => {
- if (t && getClosestAvailableLocale(t) && hasLocaleQueue(t)) {
- const { loadingDelay: e } = getOptions();
- let i;
- return typeof window < "u" && getCurrentLocale() != null && e ? i = window.setTimeout(
- () => $isLoading.set(!0),
- e
- ) : $isLoading.set(!0), flush(t).then(() => {
- internalLocale.set(t);
- }).finally(() => {
- clearTimeout(i), $isLoading.set(!1);
- });
- }
- return internalLocale.set(t);
-}, $locale = __spreadProps(__spreadValues$1({}, internalLocale), {
- set
-}), monadicMemoize = (t) => {
- const e = /* @__PURE__ */ Object.create(null);
- return (n) => {
- const s = JSON.stringify(n);
- return s in e ? e[s] : e[s] = t(n);
- };
-};
-var __defProp = Object.defineProperty, __getOwnPropSymbols = Object.getOwnPropertySymbols, __hasOwnProp = Object.prototype.hasOwnProperty, __propIsEnum = Object.prototype.propertyIsEnumerable, __defNormalProp = (t, e, i) => e in t ? __defProp(t, e, { enumerable: !0, configurable: !0, writable: !0, value: i }) : t[e] = i, __spreadValues = (t, e) => {
- for (var i in e || (e = {}))
- __hasOwnProp.call(e, i) && __defNormalProp(t, i, e[i]);
- if (__getOwnPropSymbols)
- for (var i of __getOwnPropSymbols(e))
- __propIsEnum.call(e, i) && __defNormalProp(t, i, e[i]);
- return t;
-}, __objRest = (t, e) => {
- var i = {};
- for (var n in t)
- __hasOwnProp.call(t, n) && e.indexOf(n) < 0 && (i[n] = t[n]);
- if (t != null && __getOwnPropSymbols)
- for (var n of __getOwnPropSymbols(t))
- e.indexOf(n) < 0 && __propIsEnum.call(t, n) && (i[n] = t[n]);
- return i;
-};
-const getIntlFormatterOptions = (t, e) => {
- const { formats: i } = getOptions();
- if (t in i && e in i[t])
- return i[t][e];
- throw new Error(`[svelte-i18n] Unknown "${e}" ${t} format.`);
-}, createNumberFormatter = monadicMemoize(
- (t) => {
- var e = t, { locale: i, format: n } = e, s = __objRest(e, ["locale", "format"]);
- if (i == null)
- throw new Error('[svelte-i18n] A "locale" must be set to format numbers');
- return n && (s = getIntlFormatterOptions("number", n)), new Intl.NumberFormat(i, s);
- }
-), createDateFormatter = monadicMemoize(
- (t) => {
- var e = t, { locale: i, format: n } = e, s = __objRest(e, ["locale", "format"]);
- if (i == null)
- throw new Error('[svelte-i18n] A "locale" must be set to format dates');
- return n ? s = getIntlFormatterOptions("date", n) : Object.keys(s).length === 0 && (s = getIntlFormatterOptions("date", "short")), new Intl.DateTimeFormat(i, s);
- }
-), createTimeFormatter = monadicMemoize(
- (t) => {
- var e = t, { locale: i, format: n } = e, s = __objRest(e, ["locale", "format"]);
- if (i == null)
- throw new Error(
- '[svelte-i18n] A "locale" must be set to format time values'
- );
- return n ? s = getIntlFormatterOptions("time", n) : Object.keys(s).length === 0 && (s = getIntlFormatterOptions("time", "short")), new Intl.DateTimeFormat(i, s);
- }
-), getNumberFormatter = (t = {}) => {
- var e = t, {
- locale: i = getCurrentLocale()
- } = e, n = __objRest(e, [
- "locale"
- ]);
- return createNumberFormatter(__spreadValues({ locale: i }, n));
-}, getDateFormatter = (t = {}) => {
- var e = t, {
- locale: i = getCurrentLocale()
- } = e, n = __objRest(e, [
- "locale"
- ]);
- return createDateFormatter(__spreadValues({ locale: i }, n));
-}, getTimeFormatter = (t = {}) => {
- var e = t, {
- locale: i = getCurrentLocale()
- } = e, n = __objRest(e, [
- "locale"
- ]);
- return createTimeFormatter(__spreadValues({ locale: i }, n));
-}, getMessageFormatter = monadicMemoize(
- // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
- (t, e = getCurrentLocale()) => new IntlMessageFormat(t, e, getOptions().formats, {
- ignoreTag: getOptions().ignoreTag
- })
-), formatMessage = (t, e = {}) => {
- var i, n, s, l;
- let h = e;
- typeof t == "object" && (h = t, t = h.id);
- const {
- values: _,
- locale: c = getCurrentLocale(),
- default: o
- } = h;
- if (c == null)
- throw new Error(
- "[svelte-i18n] Cannot format a message without first setting the initial locale."
- );
- let r = lookup(t, c);
- if (!r)
- r = (l = (s = (n = (i = getOptions()).handleMissingMessage) == null ? void 0 : n.call(i, { locale: c, id: t, defaultValue: o })) != null ? s : o) != null ? l : t;
- else if (typeof r != "string")
- return console.warn(
- `[svelte-i18n] Message with id "${t}" must be of type "string", found: "${typeof r}". Gettin its value through the "$format" method is deprecated; use the "json" method instead.`
- ), r;
- if (!_)
- return r;
- let T = r;
- try {
- T = getMessageFormatter(r, c).format(_);
- } catch (S) {
- S instanceof Error && console.warn(
- `[svelte-i18n] Message "${t}" has syntax error:`,
- S.message
- );
- }
- return T;
-}, formatTime = (t, e) => getTimeFormatter(e).format(t), formatDate = (t, e) => getDateFormatter(e).format(t), formatNumber = (t, e) => getNumberFormatter(e).format(t), getJSON = (t, e = getCurrentLocale()) => lookup(t, e);
-derived([$locale, $dictionary], () => formatMessage);
-derived([$locale], () => formatTime);
-derived([$locale], () => formatDate);
-derived([$locale], () => formatNumber);
-derived([$locale, $dictionary], () => getJSON);
-const Upload_svelte_svelte_type_style_lang = "", {
- SvelteComponent: SvelteComponent$2,
- append: append$2,
- attr: attr$2,
- binding_callbacks: binding_callbacks$1,
- bubble,
- create_slot,
- detach: detach$2,
- element: element$2,
- get_all_dirty_from_scope,
- get_slot_changes,
- init: init$2,
- insert: insert$2,
- listen,
- prevent_default,
- run_all,
- safe_not_equal: safe_not_equal$2,
- space: space$2,
- stop_propagation,
- toggle_class: toggle_class$1,
- transition_in: transition_in$2,
- transition_out: transition_out$2,
- update_slot_base
-} = window.__gradio__svelte__internal, { createEventDispatcher: createEventDispatcher$1, tick: tick$1, getContext } = window.__gradio__svelte__internal;
-function create_fragment$2(t) {
- let e, i, n, s, l, h, _, c, o;
- const r = (
- /*#slots*/
- t[16].default
- ), T = create_slot(
- r,
- t,
- /*$$scope*/
- t[15],
- null
- );
- return {
- c() {
- e = element$2("button"), T && T.c(), i = space$2(), n = element$2("input"), attr$2(n, "type", "file"), attr$2(
- n,
- "accept",
- /*filetype*/
- t[0]
- ), n.multiple = s = /*file_count*/
- t[4] === "multiple" || void 0, attr$2(n, "webkitdirectory", l = /*file_count*/
- t[4] === "directory" || void 0), attr$2(n, "mozdirectory", h = /*file_count*/
- t[4] === "directory" || void 0), attr$2(n, "class", "svelte-18dlsnh"), attr$2(e, "class", "svelte-18dlsnh"), toggle_class$1(
- e,
- "hidden",
- /*hidden*/
- t[5]
- ), toggle_class$1(
- e,
- "center",
- /*center*/
- t[2]
- ), toggle_class$1(
- e,
- "boundedheight",
- /*boundedheight*/
- t[1]
- ), toggle_class$1(
- e,
- "flex",
- /*flex*/
- t[3]
- );
- },
- m(S, w) {
- insert$2(S, e, w), T && T.m(e, null), append$2(e, i), append$2(e, n), t[24](n), _ = !0, c || (o = [
- listen(
- n,
- "change",
- /*load_files_from_upload*/
- t[9]
- ),
- listen(e, "drag", stop_propagation(prevent_default(
- /*drag_handler*/
- t[17]
- ))),
- listen(e, "dragstart", stop_propagation(prevent_default(
- /*dragstart_handler*/
- t[18]
- ))),
- listen(e, "dragend", stop_propagation(prevent_default(
- /*dragend_handler*/
- t[19]
- ))),
- listen(e, "dragover", stop_propagation(prevent_default(
- /*dragover_handler*/
- t[20]
- ))),
- listen(e, "dragenter", stop_propagation(prevent_default(
- /*dragenter_handler*/
- t[21]
- ))),
- listen(e, "dragleave", stop_propagation(prevent_default(
- /*dragleave_handler*/
- t[22]
- ))),
- listen(e, "drop", stop_propagation(prevent_default(
- /*drop_handler*/
- t[23]
- ))),
- listen(
- e,
- "click",
- /*open_file_upload*/
- t[6]
- ),
- listen(
- e,
- "drop",
- /*loadFilesFromDrop*/
- t[10]
- ),
- listen(
- e,
- "dragenter",
- /*updateDragging*/
- t[8]
- ),
- listen(
- e,
- "dragleave",
- /*updateDragging*/
- t[8]
- )
- ], c = !0);
- },
- p(S, [w]) {
- T && T.p && (!_ || w & /*$$scope*/
- 32768) && update_slot_base(
- T,
- r,
- S,
- /*$$scope*/
- S[15],
- _ ? get_slot_changes(
- r,
- /*$$scope*/
- S[15],
- w,
- null
- ) : get_all_dirty_from_scope(
- /*$$scope*/
- S[15]
- ),
- null
- ), (!_ || w & /*filetype*/
- 1) && attr$2(
- n,
- "accept",
- /*filetype*/
- S[0]
- ), (!_ || w & /*file_count*/
- 16 && s !== (s = /*file_count*/
- S[4] === "multiple" || void 0)) && (n.multiple = s), (!_ || w & /*file_count*/
- 16 && l !== (l = /*file_count*/
- S[4] === "directory" || void 0)) && attr$2(n, "webkitdirectory", l), (!_ || w & /*file_count*/
- 16 && h !== (h = /*file_count*/
- S[4] === "directory" || void 0)) && attr$2(n, "mozdirectory", h), (!_ || w & /*hidden*/
- 32) && toggle_class$1(
- e,
- "hidden",
- /*hidden*/
- S[5]
- ), (!_ || w & /*center*/
- 4) && toggle_class$1(
- e,
- "center",
- /*center*/
- S[2]
- ), (!_ || w & /*boundedheight*/
- 2) && toggle_class$1(
- e,
- "boundedheight",
- /*boundedheight*/
- S[1]
- ), (!_ || w & /*flex*/
- 8) && toggle_class$1(
- e,
- "flex",
- /*flex*/
- S[3]
- );
- },
- i(S) {
- _ || (transition_in$2(T, S), _ = !0);
- },
- o(S) {
- transition_out$2(T, S), _ = !1;
- },
- d(S) {
- S && detach$2(e), T && T.d(S), t[24](null), c = !1, run_all(o);
- }
- };
-}
-function _optionalChain(t) {
- let e, i = t[0], n = 1;
- for (; n < t.length; ) {
- const s = t[n], l = t[n + 1];
- if (n += 2, (s === "optionalAccess" || s === "optionalCall") && i == null)
- return;
- s === "access" || s === "optionalAccess" ? (e = i, i = l(i)) : (s === "call" || s === "optionalCall") && (i = l((...h) => i.call(e, ...h)), e = void 0);
- }
- return i;
-}
-function is_valid_mimetype(t, e) {
- return !t || t === "*" ? !0 : t.endsWith("/*") ? e.startsWith(t.slice(0, -1)) : t === e;
-}
-function instance$2(t, e, i) {
- let { $$slots: n = {}, $$scope: s } = e, { filetype: l = null } = e, { dragging: h = !1 } = e, { boundedheight: _ = !0 } = e, { center: c = !0 } = e, { flex: o = !0 } = e, { file_count: r = "single" } = e, { disable_click: T = !1 } = e, { root: S } = e, { hidden: w = !1 } = e;
- const C = getContext("upload_files");
- let P;
- const b = createEventDispatcher$1();
- function k() {
- i(11, h = !h);
- }
- function F() {
- T || (i(7, P.value = "", P), P.click());
- }
- async function x(B) {
- await tick$1();
- const ee = await upload(B, S, C);
- return b("load", r === "single" ? _optionalChain([ee, "optionalAccess", (Y) => Y[0]]) : ee), ee || [];
- }
- async function y(B) {
- if (!B.length)
- return;
- let ee = B.map((q) => new File([q], q.name)), Y = await prepare_files(ee);
- return await x(Y);
- }
- async function p(B) {
- const ee = B.target;
- ee.files && await y(Array.from(ee.files));
- }
- async function E(B) {
- if (i(11, h = !1), !_optionalChain([B, "access", (Y) => Y.dataTransfer, "optionalAccess", (Y) => Y.files]))
- return;
- const ee = Array.from(B.dataTransfer.files).filter((Y) => _optionalChain([
- l,
- "optionalAccess",
- (q) => q.split,
- "call",
- (q) => q(","),
- "access",
- (q) => q.some,
- "call",
- (q) => q((le) => is_valid_mimetype(le, Y.type))
- ]) ? !0 : (b("error", `Invalid file type only ${l} allowed.`), !1));
- await y(ee);
- }
- function $(B) {
- bubble.call(this, t, B);
- }
- function M(B) {
- bubble.call(this, t, B);
- }
- function m(B) {
- bubble.call(this, t, B);
- }
- function N(B) {
- bubble.call(this, t, B);
- }
- function D(B) {
- bubble.call(this, t, B);
- }
- function X(B) {
- bubble.call(this, t, B);
- }
- function G(B) {
- bubble.call(this, t, B);
- }
- function I(B) {
- binding_callbacks$1[B ? "unshift" : "push"](() => {
- P = B, i(7, P);
- });
- }
- return t.$$set = (B) => {
- "filetype" in B && i(0, l = B.filetype), "dragging" in B && i(11, h = B.dragging), "boundedheight" in B && i(1, _ = B.boundedheight), "center" in B && i(2, c = B.center), "flex" in B && i(3, o = B.flex), "file_count" in B && i(4, r = B.file_count), "disable_click" in B && i(12, T = B.disable_click), "root" in B && i(13, S = B.root), "hidden" in B && i(5, w = B.hidden), "$$scope" in B && i(15, s = B.$$scope);
- }, [
- l,
- _,
- c,
- o,
- r,
- w,
- F,
- P,
- k,
- p,
- E,
- h,
- T,
- S,
- y,
- s,
- n,
- $,
- M,
- m,
- N,
- D,
- X,
- G,
- I
- ];
-}
-class Upload extends SvelteComponent$2 {
- constructor(e) {
- super(), init$2(this, e, instance$2, create_fragment$2, safe_not_equal$2, {
- filetype: 0,
- dragging: 11,
- boundedheight: 1,
- center: 2,
- flex: 3,
- file_count: 4,
- disable_click: 12,
- root: 13,
- hidden: 5,
- open_file_upload: 6,
- load_files: 14
- });
- }
- get open_file_upload() {
- return this.$$.ctx[6];
- }
- get load_files() {
- return this.$$.ctx[14];
- }
-}
-const ModifyUpload_svelte_svelte_type_style_lang = "", {
- SvelteComponent: SvelteComponent$1,
- append: append$1,
- attr: attr$1,
- check_outros: check_outros$1,
- create_component: create_component$1,
- destroy_component: destroy_component$1,
- detach: detach$1,
- element: element$1,
- group_outros: group_outros$1,
- init: init$1,
- insert: insert$1,
- mount_component: mount_component$1,
- safe_not_equal: safe_not_equal$1,
- set_style: set_style$1,
- space: space$1,
- toggle_class,
- transition_in: transition_in$1,
- transition_out: transition_out$1
-} = window.__gradio__svelte__internal, { createEventDispatcher } = window.__gradio__svelte__internal;
-function create_if_block_1$1(t) {
- let e, i;
- return e = new IconButton({
- props: {
- Icon: Edit,
- label: (
- /*i18n*/
- t[3]("common.edit")
- )
- }
- }), e.$on(
- "click",
- /*click_handler*/
- t[5]
- ), {
- c() {
- create_component$1(e.$$.fragment);
- },
- m(n, s) {
- mount_component$1(e, n, s), i = !0;
- },
- p(n, s) {
- const l = {};
- s & /*i18n*/
- 8 && (l.label = /*i18n*/
- n[3]("common.edit")), e.$set(l);
- },
- i(n) {
- i || (transition_in$1(e.$$.fragment, n), i = !0);
- },
- o(n) {
- transition_out$1(e.$$.fragment, n), i = !1;
- },
- d(n) {
- destroy_component$1(e, n);
- }
- };
-}
-function create_if_block$1(t) {
- let e, i;
- return e = new IconButton({
- props: {
- Icon: Undo,
- label: (
- /*i18n*/
- t[3]("common.undo")
- )
- }
- }), e.$on(
- "click",
- /*click_handler_1*/
- t[6]
- ), {
- c() {
- create_component$1(e.$$.fragment);
- },
- m(n, s) {
- mount_component$1(e, n, s), i = !0;
- },
- p(n, s) {
- const l = {};
- s & /*i18n*/
- 8 && (l.label = /*i18n*/
- n[3]("common.undo")), e.$set(l);
- },
- i(n) {
- i || (transition_in$1(e.$$.fragment, n), i = !0);
- },
- o(n) {
- transition_out$1(e.$$.fragment, n), i = !1;
- },
- d(n) {
- destroy_component$1(e, n);
- }
- };
-}
-function create_fragment$1(t) {
- let e, i, n, s, l, h = (
- /*editable*/
- t[0] && create_if_block_1$1(t)
- ), _ = (
- /*undoable*/
- t[1] && create_if_block$1(t)
- );
- return s = new IconButton({
- props: {
- Icon: Clear,
- label: (
- /*i18n*/
- t[3]("common.clear")
- )
- }
- }), s.$on(
- "click",
- /*click_handler_2*/
- t[7]
- ), {
- c() {
- e = element$1("div"), h && h.c(), i = space$1(), _ && _.c(), n = space$1(), create_component$1(s.$$.fragment), attr$1(e, "class", "svelte-1wj0ocy"), toggle_class(e, "not-absolute", !/*absolute*/
- t[2]), set_style$1(
- e,
- "position",
- /*absolute*/
- t[2] ? "absolute" : "static"
- );
- },
- m(c, o) {
- insert$1(c, e, o), h && h.m(e, null), append$1(e, i), _ && _.m(e, null), append$1(e, n), mount_component$1(s, e, null), l = !0;
- },
- p(c, [o]) {
- /*editable*/
- c[0] ? h ? (h.p(c, o), o & /*editable*/
- 1 && transition_in$1(h, 1)) : (h = create_if_block_1$1(c), h.c(), transition_in$1(h, 1), h.m(e, i)) : h && (group_outros$1(), transition_out$1(h, 1, 1, () => {
- h = null;
- }), check_outros$1()), /*undoable*/
- c[1] ? _ ? (_.p(c, o), o & /*undoable*/
- 2 && transition_in$1(_, 1)) : (_ = create_if_block$1(c), _.c(), transition_in$1(_, 1), _.m(e, n)) : _ && (group_outros$1(), transition_out$1(_, 1, 1, () => {
- _ = null;
- }), check_outros$1());
- const r = {};
- o & /*i18n*/
- 8 && (r.label = /*i18n*/
- c[3]("common.clear")), s.$set(r), (!l || o & /*absolute*/
- 4) && toggle_class(e, "not-absolute", !/*absolute*/
- c[2]), o & /*absolute*/
- 4 && set_style$1(
- e,
- "position",
- /*absolute*/
- c[2] ? "absolute" : "static"
- );
- },
- i(c) {
- l || (transition_in$1(h), transition_in$1(_), transition_in$1(s.$$.fragment, c), l = !0);
- },
- o(c) {
- transition_out$1(h), transition_out$1(_), transition_out$1(s.$$.fragment, c), l = !1;
- },
- d(c) {
- c && detach$1(e), h && h.d(), _ && _.d(), destroy_component$1(s);
- }
- };
-}
-function instance$1(t, e, i) {
- let { editable: n = !1 } = e, { undoable: s = !1 } = e, { absolute: l = !0 } = e, { i18n: h } = e;
- const _ = createEventDispatcher(), c = () => _("edit"), o = () => _("undo"), r = (T) => {
- _("clear"), T.stopPropagation();
- };
- return t.$$set = (T) => {
- "editable" in T && i(0, n = T.editable), "undoable" in T && i(1, s = T.undoable), "absolute" in T && i(2, l = T.absolute), "i18n" in T && i(3, h = T.i18n);
- }, [
- n,
- s,
- l,
- h,
- _,
- c,
- o,
- r
- ];
-}
-class ModifyUpload extends SvelteComponent$1 {
- constructor(e) {
- super(), init$1(this, e, instance$1, create_fragment$1, safe_not_equal$1, {
- editable: 0,
- undoable: 1,
- absolute: 2,
- i18n: 3
- });
- }
-}
-function commonjsRequire(t) {
- throw new Error('Could not dynamically require "' + t + '". Please configure the dynamicRequireTargets or/and ignoreDynamicRequires option of @rollup/plugin-commonjs appropriately for this require call to work.');
-}
-var pdf = { exports: {} };
-const require$$5$1 = {}, __viteBrowserExternal = /* @__PURE__ */ Object.freeze(/* @__PURE__ */ Object.defineProperty({
- __proto__: null,
- default: require$$5$1
-}, Symbol.toStringTag, { value: "Module" })), require$$5 = /* @__PURE__ */ getAugmentedNamespace(__viteBrowserExternal);
-(function(module, exports) {
- (function(e, i) {
- module.exports = e.pdfjsLib = i();
- })(globalThis, () => (
- /******/
- (() => {
- var __webpack_modules__ = [
- ,
- /* 1 */
- /***/
- (t, e) => {
- var Ye;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.VerbosityLevel = e.Util = e.UnknownErrorException = e.UnexpectedResponseException = e.TextRenderingMode = e.RenderingIntentFlag = e.PromiseCapability = e.PermissionFlag = e.PasswordResponses = e.PasswordException = e.PageActionEventType = e.OPS = e.MissingPDFException = e.MAX_IMAGE_SIZE_TO_CACHE = e.LINE_FACTOR = e.LINE_DESCENT_FACTOR = e.InvalidPDFException = e.ImageKind = e.IDENTITY_MATRIX = e.FormatError = e.FeatureTest = e.FONT_IDENTITY_MATRIX = e.DocumentActionEventType = e.CMapCompressionType = e.BaseException = e.BASELINE_FACTOR = e.AnnotationType = e.AnnotationReplyType = e.AnnotationPrefix = e.AnnotationMode = e.AnnotationFlag = e.AnnotationFieldFlag = e.AnnotationEditorType = e.AnnotationEditorPrefix = e.AnnotationEditorParamsType = e.AnnotationBorderStyleType = e.AnnotationActionEventType = e.AbortException = void 0, e.assert = le, e.bytesToString = z, e.createValidAbsoluteUrl = we, e.getModificationDate = xe, e.getUuid = Xe, e.getVerbosityLevel = B, e.info = ee, e.isArrayBuffer = ge, e.isArrayEqual = Ce, e.isNodeJS = void 0, e.normalizeUnicode = ze, e.objectFromMap = ue, e.objectSize = ce, e.setVerbosityLevel = I, e.shadow = be, e.string32 = Q, e.stringToBytes = ae, e.stringToPDFString = _e, e.stringToUTF8String = ie, e.unreachable = q, e.utf8StringToString = se, e.warn = Y;
- const i = typeof process == "object" && process + "" == "[object process]" && !process.versions.nw && !(process.versions.electron && process.type && process.type !== "browser");
- e.isNodeJS = i;
- const n = [1, 0, 0, 1, 0, 0];
- e.IDENTITY_MATRIX = n;
- const s = [1e-3, 0, 0, 1e-3, 0, 0];
- e.FONT_IDENTITY_MATRIX = s;
- const l = 1e7;
- e.MAX_IMAGE_SIZE_TO_CACHE = l;
- const h = 1.35;
- e.LINE_FACTOR = h;
- const _ = 0.35;
- e.LINE_DESCENT_FACTOR = _;
- const c = _ / h;
- e.BASELINE_FACTOR = c;
- const o = {
- ANY: 1,
- DISPLAY: 2,
- PRINT: 4,
- SAVE: 8,
- ANNOTATIONS_FORMS: 16,
- ANNOTATIONS_STORAGE: 32,
- ANNOTATIONS_DISABLE: 64,
- OPLIST: 256
- };
- e.RenderingIntentFlag = o;
- const r = {
- DISABLE: 0,
- ENABLE: 1,
- ENABLE_FORMS: 2,
- ENABLE_STORAGE: 3
- };
- e.AnnotationMode = r;
- const T = "pdfjs_internal_editor_";
- e.AnnotationEditorPrefix = T;
- const S = {
- DISABLE: -1,
- NONE: 0,
- FREETEXT: 3,
- STAMP: 13,
- INK: 15
- };
- e.AnnotationEditorType = S;
- const w = {
- RESIZE: 1,
- CREATE: 2,
- FREETEXT_SIZE: 11,
- FREETEXT_COLOR: 12,
- FREETEXT_OPACITY: 13,
- INK_COLOR: 21,
- INK_THICKNESS: 22,
- INK_OPACITY: 23
- };
- e.AnnotationEditorParamsType = w;
- const C = {
- PRINT: 4,
- MODIFY_CONTENTS: 8,
- COPY: 16,
- MODIFY_ANNOTATIONS: 32,
- FILL_INTERACTIVE_FORMS: 256,
- COPY_FOR_ACCESSIBILITY: 512,
- ASSEMBLE: 1024,
- PRINT_HIGH_QUALITY: 2048
- };
- e.PermissionFlag = C;
- const P = {
- FILL: 0,
- STROKE: 1,
- FILL_STROKE: 2,
- INVISIBLE: 3,
- FILL_ADD_TO_PATH: 4,
- STROKE_ADD_TO_PATH: 5,
- FILL_STROKE_ADD_TO_PATH: 6,
- ADD_TO_PATH: 7,
- FILL_STROKE_MASK: 3,
- ADD_TO_PATH_FLAG: 4
- };
- e.TextRenderingMode = P;
- const b = {
- GRAYSCALE_1BPP: 1,
- RGB_24BPP: 2,
- RGBA_32BPP: 3
- };
- e.ImageKind = b;
- const k = {
- TEXT: 1,
- LINK: 2,
- FREETEXT: 3,
- LINE: 4,
- SQUARE: 5,
- CIRCLE: 6,
- POLYGON: 7,
- POLYLINE: 8,
- HIGHLIGHT: 9,
- UNDERLINE: 10,
- SQUIGGLY: 11,
- STRIKEOUT: 12,
- STAMP: 13,
- CARET: 14,
- INK: 15,
- POPUP: 16,
- FILEATTACHMENT: 17,
- SOUND: 18,
- MOVIE: 19,
- WIDGET: 20,
- SCREEN: 21,
- PRINTERMARK: 22,
- TRAPNET: 23,
- WATERMARK: 24,
- THREED: 25,
- REDACT: 26
- };
- e.AnnotationType = k;
- const F = {
- GROUP: "Group",
- REPLY: "R"
- };
- e.AnnotationReplyType = F;
- const x = {
- INVISIBLE: 1,
- HIDDEN: 2,
- PRINT: 4,
- NOZOOM: 8,
- NOROTATE: 16,
- NOVIEW: 32,
- READONLY: 64,
- LOCKED: 128,
- TOGGLENOVIEW: 256,
- LOCKEDCONTENTS: 512
- };
- e.AnnotationFlag = x;
- const y = {
- READONLY: 1,
- REQUIRED: 2,
- NOEXPORT: 4,
- MULTILINE: 4096,
- PASSWORD: 8192,
- NOTOGGLETOOFF: 16384,
- RADIO: 32768,
- PUSHBUTTON: 65536,
- COMBO: 131072,
- EDIT: 262144,
- SORT: 524288,
- FILESELECT: 1048576,
- MULTISELECT: 2097152,
- DONOTSPELLCHECK: 4194304,
- DONOTSCROLL: 8388608,
- COMB: 16777216,
- RICHTEXT: 33554432,
- RADIOSINUNISON: 33554432,
- COMMITONSELCHANGE: 67108864
- };
- e.AnnotationFieldFlag = y;
- const p = {
- SOLID: 1,
- DASHED: 2,
- BEVELED: 3,
- INSET: 4,
- UNDERLINE: 5
- };
- e.AnnotationBorderStyleType = p;
- const E = {
- E: "Mouse Enter",
- X: "Mouse Exit",
- D: "Mouse Down",
- U: "Mouse Up",
- Fo: "Focus",
- Bl: "Blur",
- PO: "PageOpen",
- PC: "PageClose",
- PV: "PageVisible",
- PI: "PageInvisible",
- K: "Keystroke",
- F: "Format",
- V: "Validate",
- C: "Calculate"
- };
- e.AnnotationActionEventType = E;
- const $ = {
- WC: "WillClose",
- WS: "WillSave",
- DS: "DidSave",
- WP: "WillPrint",
- DP: "DidPrint"
- };
- e.DocumentActionEventType = $;
- const M = {
- O: "PageOpen",
- C: "PageClose"
- };
- e.PageActionEventType = M;
- const m = {
- ERRORS: 0,
- WARNINGS: 1,
- INFOS: 5
- };
- e.VerbosityLevel = m;
- const N = {
- NONE: 0,
- BINARY: 1
- };
- e.CMapCompressionType = N;
- const D = {
- dependency: 1,
- setLineWidth: 2,
- setLineCap: 3,
- setLineJoin: 4,
- setMiterLimit: 5,
- setDash: 6,
- setRenderingIntent: 7,
- setFlatness: 8,
- setGState: 9,
- save: 10,
- restore: 11,
- transform: 12,
- moveTo: 13,
- lineTo: 14,
- curveTo: 15,
- curveTo2: 16,
- curveTo3: 17,
- closePath: 18,
- rectangle: 19,
- stroke: 20,
- closeStroke: 21,
- fill: 22,
- eoFill: 23,
- fillStroke: 24,
- eoFillStroke: 25,
- closeFillStroke: 26,
- closeEOFillStroke: 27,
- endPath: 28,
- clip: 29,
- eoClip: 30,
- beginText: 31,
- endText: 32,
- setCharSpacing: 33,
- setWordSpacing: 34,
- setHScale: 35,
- setLeading: 36,
- setFont: 37,
- setTextRenderingMode: 38,
- setTextRise: 39,
- moveText: 40,
- setLeadingMoveText: 41,
- setTextMatrix: 42,
- nextLine: 43,
- showText: 44,
- showSpacedText: 45,
- nextLineShowText: 46,
- nextLineSetSpacingShowText: 47,
- setCharWidth: 48,
- setCharWidthAndBounds: 49,
- setStrokeColorSpace: 50,
- setFillColorSpace: 51,
- setStrokeColor: 52,
- setStrokeColorN: 53,
- setFillColor: 54,
- setFillColorN: 55,
- setStrokeGray: 56,
- setFillGray: 57,
- setStrokeRGBColor: 58,
- setFillRGBColor: 59,
- setStrokeCMYKColor: 60,
- setFillCMYKColor: 61,
- shadingFill: 62,
- beginInlineImage: 63,
- beginImageData: 64,
- endInlineImage: 65,
- paintXObject: 66,
- markPoint: 67,
- markPointProps: 68,
- beginMarkedContent: 69,
- beginMarkedContentProps: 70,
- endMarkedContent: 71,
- beginCompat: 72,
- endCompat: 73,
- paintFormXObjectBegin: 74,
- paintFormXObjectEnd: 75,
- beginGroup: 76,
- endGroup: 77,
- beginAnnotation: 80,
- endAnnotation: 81,
- paintImageMaskXObject: 83,
- paintImageMaskXObjectGroup: 84,
- paintImageXObject: 85,
- paintInlineImageXObject: 86,
- paintInlineImageXObjectGroup: 87,
- paintImageXObjectRepeat: 88,
- paintImageMaskXObjectRepeat: 89,
- paintSolidColorImageMask: 90,
- constructPath: 91
- };
- e.OPS = D;
- const X = {
- NEED_PASSWORD: 1,
- INCORRECT_PASSWORD: 2
- };
- e.PasswordResponses = X;
- let G = m.WARNINGS;
- function I(de) {
- Number.isInteger(de) && (G = de);
- }
- function B() {
- return G;
- }
- function ee(de) {
- G >= m.INFOS && console.log(`Info: ${de}`);
- }
- function Y(de) {
- G >= m.WARNINGS && console.log(`Warning: ${de}`);
- }
- function q(de) {
- throw new Error(de);
- }
- function le(de, ne) {
- de || q(ne);
- }
- function pe(de) {
- switch (de == null ? void 0 : de.protocol) {
- case "http:":
- case "https:":
- case "ftp:":
- case "mailto:":
- case "tel:":
- return !0;
- default:
- return !1;
- }
- }
- function we(de, ne = null, J = null) {
- if (!de)
- return null;
- try {
- if (J && typeof de == "string") {
- if (J.addDefaultProtocol && de.startsWith("www.")) {
- const Se = de.match(/\./g);
- (Se == null ? void 0 : Se.length) >= 2 && (de = `http://${de}`);
- }
- if (J.tryConvertEncoding)
- try {
- de = ie(de);
- } catch {
- }
- }
- const ve = ne ? new URL(de, ne) : new URL(de);
- if (pe(ve))
- return ve;
- } catch {
- }
- return null;
- }
- function be(de, ne, J, ve = !1) {
- return Object.defineProperty(de, ne, {
- value: J,
- enumerable: !ve,
- configurable: !0,
- writable: !1
- }), J;
- }
- const R = function() {
- function ne(J, ve) {
- this.constructor === ne && q("Cannot initialize BaseException."), this.message = J, this.name = ve;
- }
- return ne.prototype = new Error(), ne.constructor = ne, ne;
- }();
- e.BaseException = R;
- class d extends R {
- constructor(ne, J) {
- super(ne, "PasswordException"), this.code = J;
- }
- }
- e.PasswordException = d;
- class g extends R {
- constructor(ne, J) {
- super(ne, "UnknownErrorException"), this.details = J;
- }
- }
- e.UnknownErrorException = g;
- class f extends R {
- constructor(ne) {
- super(ne, "InvalidPDFException");
- }
- }
- e.InvalidPDFException = f;
- class v extends R {
- constructor(ne) {
- super(ne, "MissingPDFException");
- }
- }
- e.MissingPDFException = v;
- class A extends R {
- constructor(ne, J) {
- super(ne, "UnexpectedResponseException"), this.status = J;
- }
- }
- e.UnexpectedResponseException = A;
- class O extends R {
- constructor(ne) {
- super(ne, "FormatError");
- }
- }
- e.FormatError = O;
- class H extends R {
- constructor(ne) {
- super(ne, "AbortException");
- }
- }
- e.AbortException = H;
- function z(de) {
- (typeof de != "object" || (de == null ? void 0 : de.length) === void 0) && q("Invalid argument for bytesToString");
- const ne = de.length, J = 8192;
- if (ne < J)
- return String.fromCharCode.apply(null, de);
- const ve = [];
- for (let Se = 0; Se < ne; Se += J) {
- const tt = Math.min(Se + J, ne), et = de.subarray(Se, tt);
- ve.push(String.fromCharCode.apply(null, et));
- }
- return ve.join("");
- }
- function ae(de) {
- typeof de != "string" && q("Invalid argument for stringToBytes");
- const ne = de.length, J = new Uint8Array(ne);
- for (let ve = 0; ve < ne; ++ve)
- J[ve] = de.charCodeAt(ve) & 255;
- return J;
- }
- function Q(de) {
- return String.fromCharCode(de >> 24 & 255, de >> 16 & 255, de >> 8 & 255, de & 255);
- }
- function ce(de) {
- return Object.keys(de).length;
- }
- function ue(de) {
- const ne = /* @__PURE__ */ Object.create(null);
- for (const [J, ve] of de)
- ne[J] = ve;
- return ne;
- }
- function me() {
- const de = new Uint8Array(4);
- return de[0] = 1, new Uint32Array(de.buffer, 0, 1)[0] === 1;
- }
- function fe() {
- try {
- return new Function(""), !0;
- } catch {
- return !1;
- }
- }
- class Pe {
- static get isLittleEndian() {
- return be(this, "isLittleEndian", me());
- }
- static get isEvalSupported() {
- return be(this, "isEvalSupported", fe());
- }
- static get isOffscreenCanvasSupported() {
- return be(this, "isOffscreenCanvasSupported", typeof OffscreenCanvas < "u");
- }
- static get platform() {
- return typeof navigator > "u" ? be(this, "platform", {
- isWin: !1,
- isMac: !1
- }) : be(this, "platform", {
- isWin: navigator.platform.includes("Win"),
- isMac: navigator.platform.includes("Mac")
- });
- }
- static get isCSSRoundSupported() {
- var ne, J;
- return be(this, "isCSSRoundSupported", (J = (ne = globalThis.CSS) == null ? void 0 : ne.supports) == null ? void 0 : J.call(ne, "width: round(1.5px, 1px)"));
- }
- }
- e.FeatureTest = Pe;
- const Fe = [...Array(256).keys()].map((de) => de.toString(16).padStart(2, "0"));
- class Ee {
- static makeHexColor(ne, J, ve) {
- return `#${Fe[ne]}${Fe[J]}${Fe[ve]}`;
- }
- static scaleMinMax(ne, J) {
- let ve;
- ne[0] ? (ne[0] < 0 && (ve = J[0], J[0] = J[1], J[1] = ve), J[0] *= ne[0], J[1] *= ne[0], ne[3] < 0 && (ve = J[2], J[2] = J[3], J[3] = ve), J[2] *= ne[3], J[3] *= ne[3]) : (ve = J[0], J[0] = J[2], J[2] = ve, ve = J[1], J[1] = J[3], J[3] = ve, ne[1] < 0 && (ve = J[2], J[2] = J[3], J[3] = ve), J[2] *= ne[1], J[3] *= ne[1], ne[2] < 0 && (ve = J[0], J[0] = J[1], J[1] = ve), J[0] *= ne[2], J[1] *= ne[2]), J[0] += ne[4], J[1] += ne[4], J[2] += ne[5], J[3] += ne[5];
- }
- static transform(ne, J) {
- return [ne[0] * J[0] + ne[2] * J[1], ne[1] * J[0] + ne[3] * J[1], ne[0] * J[2] + ne[2] * J[3], ne[1] * J[2] + ne[3] * J[3], ne[0] * J[4] + ne[2] * J[5] + ne[4], ne[1] * J[4] + ne[3] * J[5] + ne[5]];
- }
- static applyTransform(ne, J) {
- const ve = ne[0] * J[0] + ne[1] * J[2] + J[4], Se = ne[0] * J[1] + ne[1] * J[3] + J[5];
- return [ve, Se];
- }
- static applyInverseTransform(ne, J) {
- const ve = J[0] * J[3] - J[1] * J[2], Se = (ne[0] * J[3] - ne[1] * J[2] + J[2] * J[5] - J[4] * J[3]) / ve, tt = (-ne[0] * J[1] + ne[1] * J[0] + J[4] * J[1] - J[5] * J[0]) / ve;
- return [Se, tt];
- }
- static getAxialAlignedBoundingBox(ne, J) {
- const ve = this.applyTransform(ne, J), Se = this.applyTransform(ne.slice(2, 4), J), tt = this.applyTransform([ne[0], ne[3]], J), et = this.applyTransform([ne[2], ne[1]], J);
- return [Math.min(ve[0], Se[0], tt[0], et[0]), Math.min(ve[1], Se[1], tt[1], et[1]), Math.max(ve[0], Se[0], tt[0], et[0]), Math.max(ve[1], Se[1], tt[1], et[1])];
- }
- static inverseTransform(ne) {
- const J = ne[0] * ne[3] - ne[1] * ne[2];
- return [ne[3] / J, -ne[1] / J, -ne[2] / J, ne[0] / J, (ne[2] * ne[5] - ne[4] * ne[3]) / J, (ne[4] * ne[1] - ne[5] * ne[0]) / J];
- }
- static singularValueDecompose2dScale(ne) {
- const J = [ne[0], ne[2], ne[1], ne[3]], ve = ne[0] * J[0] + ne[1] * J[2], Se = ne[0] * J[1] + ne[1] * J[3], tt = ne[2] * J[0] + ne[3] * J[2], et = ne[2] * J[1] + ne[3] * J[3], te = (ve + et) / 2, Te = Math.sqrt((ve + et) ** 2 - 4 * (ve * et - tt * Se)) / 2, Ne = te + Te || 1, ke = te - Te || 1;
- return [Math.sqrt(Ne), Math.sqrt(ke)];
- }
- static normalizeRect(ne) {
- const J = ne.slice(0);
- return ne[0] > ne[2] && (J[0] = ne[2], J[2] = ne[0]), ne[1] > ne[3] && (J[1] = ne[3], J[3] = ne[1]), J;
- }
- static intersect(ne, J) {
- const ve = Math.max(Math.min(ne[0], ne[2]), Math.min(J[0], J[2])), Se = Math.min(Math.max(ne[0], ne[2]), Math.max(J[0], J[2]));
- if (ve > Se)
- return null;
- const tt = Math.max(Math.min(ne[1], ne[3]), Math.min(J[1], J[3])), et = Math.min(Math.max(ne[1], ne[3]), Math.max(J[1], J[3]));
- return tt > et ? null : [ve, tt, Se, et];
- }
- static bezierBoundingBox(ne, J, ve, Se, tt, et, te, Te) {
- const Ne = [], ke = [[], []];
- let $e, Be, Qe, Ae, Ke, Oe, U, u;
- for (let Z = 0; Z < 2; ++Z) {
- if (Z === 0 ? (Be = 6 * ne - 12 * ve + 6 * tt, $e = -3 * ne + 9 * ve - 9 * tt + 3 * te, Qe = 3 * ve - 3 * ne) : (Be = 6 * J - 12 * Se + 6 * et, $e = -3 * J + 9 * Se - 9 * et + 3 * Te, Qe = 3 * Se - 3 * J), Math.abs($e) < 1e-12) {
- if (Math.abs(Be) < 1e-12)
- continue;
- Ae = -Qe / Be, 0 < Ae && Ae < 1 && Ne.push(Ae);
- continue;
- }
- U = Be * Be - 4 * Qe * $e, u = Math.sqrt(U), !(U < 0) && (Ke = (-Be + u) / (2 * $e), 0 < Ke && Ke < 1 && Ne.push(Ke), Oe = (-Be - u) / (2 * $e), 0 < Oe && Oe < 1 && Ne.push(Oe));
- }
- let L = Ne.length, j;
- const V = L;
- for (; L--; )
- Ae = Ne[L], j = 1 - Ae, ke[0][L] = j * j * j * ne + 3 * j * j * Ae * ve + 3 * j * Ae * Ae * tt + Ae * Ae * Ae * te, ke[1][L] = j * j * j * J + 3 * j * j * Ae * Se + 3 * j * Ae * Ae * et + Ae * Ae * Ae * Te;
- return ke[0][V] = ne, ke[1][V] = J, ke[0][V + 1] = te, ke[1][V + 1] = Te, ke[0].length = ke[1].length = V + 2, [Math.min(...ke[0]), Math.min(...ke[1]), Math.max(...ke[0]), Math.max(...ke[1])];
- }
- }
- e.Util = Ee;
- const De = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 728, 711, 710, 729, 733, 731, 730, 732, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8226, 8224, 8225, 8230, 8212, 8211, 402, 8260, 8249, 8250, 8722, 8240, 8222, 8220, 8221, 8216, 8217, 8218, 8482, 64257, 64258, 321, 338, 352, 376, 381, 305, 322, 339, 353, 382, 0, 8364];
- function _e(de) {
- if (de[0] >= "ï") {
- let J;
- if (de[0] === "þ" && de[1] === "ÿ" ? J = "utf-16be" : de[0] === "ÿ" && de[1] === "þ" ? J = "utf-16le" : de[0] === "ï" && de[1] === "»" && de[2] === "¿" && (J = "utf-8"), J)
- try {
- const ve = new TextDecoder(J, {
- fatal: !0
- }), Se = ae(de);
- return ve.decode(Se);
- } catch (ve) {
- Y(`stringToPDFString: "${ve}".`);
- }
- }
- const ne = [];
- for (let J = 0, ve = de.length; J < ve; J++) {
- const Se = De[de.charCodeAt(J)];
- ne.push(Se ? String.fromCharCode(Se) : de.charAt(J));
- }
- return ne.join("");
- }
- function ie(de) {
- return decodeURIComponent(escape(de));
- }
- function se(de) {
- return unescape(encodeURIComponent(de));
- }
- function ge(de) {
- return typeof de == "object" && (de == null ? void 0 : de.byteLength) !== void 0;
- }
- function Ce(de, ne) {
- if (de.length !== ne.length)
- return !1;
- for (let J = 0, ve = de.length; J < ve; J++)
- if (de[J] !== ne[J])
- return !1;
- return !0;
- }
- function xe(de = /* @__PURE__ */ new Date()) {
- return [de.getUTCFullYear().toString(), (de.getUTCMonth() + 1).toString().padStart(2, "0"), de.getUTCDate().toString().padStart(2, "0"), de.getUTCHours().toString().padStart(2, "0"), de.getUTCMinutes().toString().padStart(2, "0"), de.getUTCSeconds().toString().padStart(2, "0")].join("");
- }
- class Ue {
- constructor() {
- W(this, Ye, !1);
- this.promise = new Promise((ne, J) => {
- this.resolve = (ve) => {
- oe(this, Ye, !0), ne(ve);
- }, this.reject = (ve) => {
- oe(this, Ye, !0), J(ve);
- };
- });
- }
- get settled() {
- return a(this, Ye);
- }
- }
- Ye = new WeakMap(), e.PromiseCapability = Ue;
- let We = null, je = null;
- function ze(de) {
- return We || (We = /([\u00a0\u00b5\u037e\u0eb3\u2000-\u200a\u202f\u2126\ufb00-\ufb04\ufb06\ufb20-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufba1\ufba4-\ufba9\ufbae-\ufbb1\ufbd3-\ufbdc\ufbde-\ufbe7\ufbea-\ufbf8\ufbfc-\ufbfd\ufc00-\ufc5d\ufc64-\ufcf1\ufcf5-\ufd3d\ufd88\ufdf4\ufdfa-\ufdfb\ufe71\ufe77\ufe79\ufe7b\ufe7d]+)|(\ufb05+)/gu, je = /* @__PURE__ */ new Map([["ſt", "ſt"]])), de.replaceAll(We, (ne, J, ve) => J ? J.normalize("NFKC") : je.get(ve));
- }
- function Xe() {
- if (typeof crypto < "u" && typeof (crypto == null ? void 0 : crypto.randomUUID) == "function")
- return crypto.randomUUID();
- const de = new Uint8Array(32);
- if (typeof crypto < "u" && typeof (crypto == null ? void 0 : crypto.getRandomValues) == "function")
- crypto.getRandomValues(de);
- else
- for (let ne = 0; ne < 32; ne++)
- de[ne] = Math.floor(Math.random() * 255);
- return z(de);
- }
- const Ge = "pdfjs_internal_id_";
- e.AnnotationPrefix = Ge;
- },
- /* 2 */
- /***/
- (__unused_webpack_module, exports, __w_pdfjs_require__) => {
- var t, i, n, s, ut, h, wt, c, o, r, T, S, w, C, P, b, Tt, F, x, Bt, p, E;
- Object.defineProperty(exports, "__esModule", {
- value: !0
- }), exports.RenderTask = exports.PDFWorkerUtil = exports.PDFWorker = exports.PDFPageProxy = exports.PDFDocumentProxy = exports.PDFDocumentLoadingTask = exports.PDFDataRangeTransport = exports.LoopbackPort = exports.DefaultStandardFontDataFactory = exports.DefaultFilterFactory = exports.DefaultCanvasFactory = exports.DefaultCMapReaderFactory = void 0, Object.defineProperty(exports, "SVGGraphics", {
- enumerable: !0,
- get: function() {
- return _displaySvg.SVGGraphics;
- }
- }), exports.build = void 0, exports.getDocument = getDocument, exports.version = void 0;
- var _util = __w_pdfjs_require__(1), _annotation_storage = __w_pdfjs_require__(3), _display_utils = __w_pdfjs_require__(6), _font_loader = __w_pdfjs_require__(9), _displayNode_utils = __w_pdfjs_require__(10), _canvas = __w_pdfjs_require__(11), _worker_options = __w_pdfjs_require__(14), _message_handler = __w_pdfjs_require__(15), _metadata = __w_pdfjs_require__(16), _optional_content_config = __w_pdfjs_require__(17), _transport_stream = __w_pdfjs_require__(18), _displayFetch_stream = __w_pdfjs_require__(19), _displayNetwork = __w_pdfjs_require__(22), _displayNode_stream = __w_pdfjs_require__(23), _displaySvg = __w_pdfjs_require__(24), _xfa_text = __w_pdfjs_require__(25);
- const DEFAULT_RANGE_CHUNK_SIZE = 65536, RENDERING_CANCELLED_TIMEOUT = 100, DELAYED_CLEANUP_TIMEOUT = 5e3, DefaultCanvasFactory = _util.isNodeJS ? _displayNode_utils.NodeCanvasFactory : _display_utils.DOMCanvasFactory;
- exports.DefaultCanvasFactory = DefaultCanvasFactory;
- const DefaultCMapReaderFactory = _util.isNodeJS ? _displayNode_utils.NodeCMapReaderFactory : _display_utils.DOMCMapReaderFactory;
- exports.DefaultCMapReaderFactory = DefaultCMapReaderFactory;
- const DefaultFilterFactory = _util.isNodeJS ? _displayNode_utils.NodeFilterFactory : _display_utils.DOMFilterFactory;
- exports.DefaultFilterFactory = DefaultFilterFactory;
- const DefaultStandardFontDataFactory = _util.isNodeJS ? _displayNode_utils.NodeStandardFontDataFactory : _display_utils.DOMStandardFontDataFactory;
- exports.DefaultStandardFontDataFactory = DefaultStandardFontDataFactory;
- function getDocument(M) {
- if (typeof M == "string" || M instanceof URL ? M = {
- url: M
- } : (0, _util.isArrayBuffer)(M) && (M = {
- data: M
- }), typeof M != "object")
- throw new Error("Invalid parameter in getDocument, need parameter object.");
- if (!M.url && !M.data && !M.range)
- throw new Error("Invalid parameter object: need either .data, .range or .url");
- const m = new PDFDocumentLoadingTask(), {
- docId: N
- } = m, D = M.url ? getUrlProp(M.url) : null, X = M.data ? getDataProp(M.data) : null, G = M.httpHeaders || null, I = M.withCredentials === !0, B = M.password ?? null, ee = M.range instanceof PDFDataRangeTransport ? M.range : null, Y = Number.isInteger(M.rangeChunkSize) && M.rangeChunkSize > 0 ? M.rangeChunkSize : DEFAULT_RANGE_CHUNK_SIZE;
- let q = M.worker instanceof PDFWorker ? M.worker : null;
- const le = M.verbosity, pe = typeof M.docBaseUrl == "string" && !(0, _display_utils.isDataScheme)(M.docBaseUrl) ? M.docBaseUrl : null, we = typeof M.cMapUrl == "string" ? M.cMapUrl : null, be = M.cMapPacked !== !1, R = M.CMapReaderFactory || DefaultCMapReaderFactory, d = typeof M.standardFontDataUrl == "string" ? M.standardFontDataUrl : null, g = M.StandardFontDataFactory || DefaultStandardFontDataFactory, f = M.stopAtErrors !== !0, v = Number.isInteger(M.maxImageSize) && M.maxImageSize > -1 ? M.maxImageSize : -1, A = M.isEvalSupported !== !1, O = typeof M.isOffscreenCanvasSupported == "boolean" ? M.isOffscreenCanvasSupported : !_util.isNodeJS, H = Number.isInteger(M.canvasMaxAreaInBytes) ? M.canvasMaxAreaInBytes : -1, z = typeof M.disableFontFace == "boolean" ? M.disableFontFace : _util.isNodeJS, ae = M.fontExtraProperties === !0, Q = M.enableXfa === !0, ce = M.ownerDocument || globalThis.document, ue = M.disableRange === !0, me = M.disableStream === !0, fe = M.disableAutoFetch === !0, Pe = M.pdfBug === !0, Fe = ee ? ee.length : M.length ?? NaN, Ee = typeof M.useSystemFonts == "boolean" ? M.useSystemFonts : !_util.isNodeJS && !z, De = typeof M.useWorkerFetch == "boolean" ? M.useWorkerFetch : R === _display_utils.DOMCMapReaderFactory && g === _display_utils.DOMStandardFontDataFactory && we && d && (0, _display_utils.isValidFetchUrl)(we, document.baseURI) && (0, _display_utils.isValidFetchUrl)(d, document.baseURI), _e = M.canvasFactory || new DefaultCanvasFactory({
- ownerDocument: ce
- }), ie = M.filterFactory || new DefaultFilterFactory({
- docId: N,
- ownerDocument: ce
- }), se = null;
- (0, _util.setVerbosityLevel)(le);
- const ge = {
- canvasFactory: _e,
- filterFactory: ie
- };
- if (De || (ge.cMapReaderFactory = new R({
- baseUrl: we,
- isCompressed: be
- }), ge.standardFontDataFactory = new g({
- baseUrl: d
- })), !q) {
- const Ue = {
- verbosity: le,
- port: _worker_options.GlobalWorkerOptions.workerPort
- };
- q = Ue.port ? PDFWorker.fromPort(Ue) : new PDFWorker(Ue), m._worker = q;
- }
- const Ce = {
- docId: N,
- apiVersion: "3.11.174",
- data: X,
- password: B,
- disableAutoFetch: fe,
- rangeChunkSize: Y,
- length: Fe,
- docBaseUrl: pe,
- enableXfa: Q,
- evaluatorOptions: {
- maxImageSize: v,
- disableFontFace: z,
- ignoreErrors: f,
- isEvalSupported: A,
- isOffscreenCanvasSupported: O,
- canvasMaxAreaInBytes: H,
- fontExtraProperties: ae,
- useSystemFonts: Ee,
- cMapUrl: De ? we : null,
- standardFontDataUrl: De ? d : null
- }
- }, xe = {
- ignoreErrors: f,
- isEvalSupported: A,
- disableFontFace: z,
- fontExtraProperties: ae,
- enableXfa: Q,
- ownerDocument: ce,
- disableAutoFetch: fe,
- pdfBug: Pe,
- styleElement: se
- };
- return q.promise.then(function() {
- if (m.destroyed)
- throw new Error("Loading aborted");
- const Ue = _fetchDocument(q, Ce), We = new Promise(function(je) {
- let ze;
- ee ? ze = new _transport_stream.PDFDataTransportStream({
- length: Fe,
- initialData: ee.initialData,
- progressiveDone: ee.progressiveDone,
- contentDispositionFilename: ee.contentDispositionFilename,
- disableRange: ue,
- disableStream: me
- }, ee) : X || (ze = ((Ge) => _util.isNodeJS ? new _displayNode_stream.PDFNodeStream(Ge) : (0, _display_utils.isValidFetchUrl)(Ge.url) ? new _displayFetch_stream.PDFFetchStream(Ge) : new _displayNetwork.PDFNetworkStream(Ge))({
- url: D,
- length: Fe,
- httpHeaders: G,
- withCredentials: I,
- rangeChunkSize: Y,
- disableRange: ue,
- disableStream: me
- })), je(ze);
- });
- return Promise.all([Ue, We]).then(function([je, ze]) {
- if (m.destroyed)
- throw new Error("Loading aborted");
- const Xe = new _message_handler.MessageHandler(N, je, q.port), Ge = new WorkerTransport(Xe, m, ze, xe, ge);
- m._transport = Ge, Xe.send("Ready", null);
- });
- }).catch(m._capability.reject), m;
- }
- async function _fetchDocument(M, m) {
- if (M.destroyed)
- throw new Error("Worker was destroyed");
- const N = await M.messageHandler.sendWithPromise("GetDocRequest", m, m.data ? [m.data.buffer] : null);
- if (M.destroyed)
- throw new Error("Worker was destroyed");
- return N;
- }
- function getUrlProp(M) {
- if (M instanceof URL)
- return M.href;
- try {
- return new URL(M, window.location).href;
- } catch {
- if (_util.isNodeJS && typeof M == "string")
- return M;
- }
- throw new Error("Invalid PDF url data: either string or URL-object is expected in the url property.");
- }
- function getDataProp(M) {
- if (_util.isNodeJS && typeof Buffer < "u" && M instanceof Buffer)
- throw new Error("Please provide binary data as `Uint8Array`, rather than `Buffer`.");
- if (M instanceof Uint8Array && M.byteLength === M.buffer.byteLength)
- return M;
- if (typeof M == "string")
- return (0, _util.stringToBytes)(M);
- if (typeof M == "object" && !isNaN(M == null ? void 0 : M.length) || (0, _util.isArrayBuffer)(M))
- return new Uint8Array(M);
- throw new Error("Invalid PDF binary data: either TypedArray, string, or array-like object is expected in the data property.");
- }
- const e = class e {
- constructor() {
- this._capability = new _util.PromiseCapability(), this._transport = null, this._worker = null, this.docId = `d${_t(e, t)._++}`, this.destroyed = !1, this.onPassword = null, this.onProgress = null;
- }
- get promise() {
- return this._capability.promise;
- }
- async destroy() {
- var m, N, D;
- this.destroyed = !0;
- try {
- (m = this._worker) != null && m.port && (this._worker._pendingDestroy = !0), await ((N = this._transport) == null ? void 0 : N.destroy());
- } catch (X) {
- throw (D = this._worker) != null && D.port && delete this._worker._pendingDestroy, X;
- }
- this._transport = null, this._worker && (this._worker.destroy(), this._worker = null);
- }
- };
- t = new WeakMap(), W(e, t, 0);
- let PDFDocumentLoadingTask = e;
- exports.PDFDocumentLoadingTask = PDFDocumentLoadingTask;
- class PDFDataRangeTransport {
- constructor(m, N, D = !1, X = null) {
- this.length = m, this.initialData = N, this.progressiveDone = D, this.contentDispositionFilename = X, this._rangeListeners = [], this._progressListeners = [], this._progressiveReadListeners = [], this._progressiveDoneListeners = [], this._readyCapability = new _util.PromiseCapability();
- }
- addRangeListener(m) {
- this._rangeListeners.push(m);
- }
- addProgressListener(m) {
- this._progressListeners.push(m);
- }
- addProgressiveReadListener(m) {
- this._progressiveReadListeners.push(m);
- }
- addProgressiveDoneListener(m) {
- this._progressiveDoneListeners.push(m);
- }
- onDataRange(m, N) {
- for (const D of this._rangeListeners)
- D(m, N);
- }
- onDataProgress(m, N) {
- this._readyCapability.promise.then(() => {
- for (const D of this._progressListeners)
- D(m, N);
- });
- }
- onDataProgressiveRead(m) {
- this._readyCapability.promise.then(() => {
- for (const N of this._progressiveReadListeners)
- N(m);
- });
- }
- onDataProgressiveDone() {
- this._readyCapability.promise.then(() => {
- for (const m of this._progressiveDoneListeners)
- m();
- });
- }
- transportReady() {
- this._readyCapability.resolve();
- }
- requestDataRange(m, N) {
- (0, _util.unreachable)("Abstract method PDFDataRangeTransport.requestDataRange");
- }
- abort() {
- }
- }
- exports.PDFDataRangeTransport = PDFDataRangeTransport;
- class PDFDocumentProxy {
- constructor(m, N) {
- this._pdfInfo = m, this._transport = N, Object.defineProperty(this, "getJavaScript", {
- value: () => ((0, _display_utils.deprecated)("`PDFDocumentProxy.getJavaScript`, please use `PDFDocumentProxy.getJSActions` instead."), this.getJSActions().then((D) => {
- if (!D)
- return D;
- const X = [];
- for (const G in D)
- X.push(...D[G]);
- return X;
- }))
- });
- }
- get annotationStorage() {
- return this._transport.annotationStorage;
- }
- get filterFactory() {
- return this._transport.filterFactory;
- }
- get numPages() {
- return this._pdfInfo.numPages;
- }
- get fingerprints() {
- return this._pdfInfo.fingerprints;
- }
- get isPureXfa() {
- return (0, _util.shadow)(this, "isPureXfa", !!this._transport._htmlForXfa);
- }
- get allXfaHtml() {
- return this._transport._htmlForXfa;
- }
- getPage(m) {
- return this._transport.getPage(m);
- }
- getPageIndex(m) {
- return this._transport.getPageIndex(m);
- }
- getDestinations() {
- return this._transport.getDestinations();
- }
- getDestination(m) {
- return this._transport.getDestination(m);
- }
- getPageLabels() {
- return this._transport.getPageLabels();
- }
- getPageLayout() {
- return this._transport.getPageLayout();
- }
- getPageMode() {
- return this._transport.getPageMode();
- }
- getViewerPreferences() {
- return this._transport.getViewerPreferences();
- }
- getOpenAction() {
- return this._transport.getOpenAction();
- }
- getAttachments() {
- return this._transport.getAttachments();
- }
- getJSActions() {
- return this._transport.getDocJSActions();
- }
- getOutline() {
- return this._transport.getOutline();
- }
- getOptionalContentConfig() {
- return this._transport.getOptionalContentConfig();
- }
- getPermissions() {
- return this._transport.getPermissions();
- }
- getMetadata() {
- return this._transport.getMetadata();
- }
- getMarkInfo() {
- return this._transport.getMarkInfo();
- }
- getData() {
- return this._transport.getData();
- }
- saveDocument() {
- return this._transport.saveDocument();
- }
- getDownloadInfo() {
- return this._transport.downloadInfoCapability.promise;
- }
- cleanup(m = !1) {
- return this._transport.startCleanup(m || this.isPureXfa);
- }
- destroy() {
- return this.loadingTask.destroy();
- }
- get loadingParams() {
- return this._transport.loadingParams;
- }
- get loadingTask() {
- return this._transport.loadingTask;
- }
- getFieldObjects() {
- return this._transport.getFieldObjects();
- }
- hasJSActions() {
- return this._transport.hasJSActions();
- }
- getCalculationOrderIds() {
- return this._transport.getCalculationOrderIds();
- }
- }
- exports.PDFDocumentProxy = PDFDocumentProxy;
- class PDFPageProxy {
- constructor(m, N, D, X = !1) {
- W(this, s);
- W(this, h);
- W(this, i, null);
- W(this, n, !1);
- this._pageIndex = m, this._pageInfo = N, this._transport = D, this._stats = X ? new _display_utils.StatTimer() : null, this._pdfBug = X, this.commonObjs = D.commonObjs, this.objs = new PDFObjects(), this._maybeCleanupAfterRender = !1, this._intentStates = /* @__PURE__ */ new Map(), this.destroyed = !1;
- }
- get pageNumber() {
- return this._pageIndex + 1;
- }
- get rotate() {
- return this._pageInfo.rotate;
- }
- get ref() {
- return this._pageInfo.ref;
- }
- get userUnit() {
- return this._pageInfo.userUnit;
- }
- get view() {
- return this._pageInfo.view;
- }
- getViewport({
- scale: m,
- rotation: N = this.rotate,
- offsetX: D = 0,
- offsetY: X = 0,
- dontFlip: G = !1
- } = {}) {
- return new _display_utils.PageViewport({
- viewBox: this.view,
- scale: m,
- rotation: N,
- offsetX: D,
- offsetY: X,
- dontFlip: G
- });
- }
- getAnnotations({
- intent: m = "display"
- } = {}) {
- const N = this._transport.getRenderingIntent(m);
- return this._transport.getAnnotations(this._pageIndex, N.renderingIntent);
- }
- getJSActions() {
- return this._transport.getPageJSActions(this._pageIndex);
- }
- get filterFactory() {
- return this._transport.filterFactory;
- }
- get isPureXfa() {
- return (0, _util.shadow)(this, "isPureXfa", !!this._transport._htmlForXfa);
- }
- async getXfa() {
- var m;
- return ((m = this._transport._htmlForXfa) == null ? void 0 : m.children[this._pageIndex]) || null;
- }
- render({
- canvasContext: m,
- viewport: N,
- intent: D = "display",
- annotationMode: X = _util.AnnotationMode.ENABLE,
- transform: G = null,
- background: I = null,
- optionalContentConfigPromise: B = null,
- annotationCanvasMap: ee = null,
- pageColors: Y = null,
- printAnnotationStorage: q = null
- }) {
- var g, f;
- (g = this._stats) == null || g.time("Overall");
- const le = this._transport.getRenderingIntent(D, X, q);
- oe(this, n, !1), K(this, h, wt).call(this), B || (B = this._transport.getOptionalContentConfig());
- let pe = this._intentStates.get(le.cacheKey);
- pe || (pe = /* @__PURE__ */ Object.create(null), this._intentStates.set(le.cacheKey, pe)), pe.streamReaderCancelTimeout && (clearTimeout(pe.streamReaderCancelTimeout), pe.streamReaderCancelTimeout = null);
- const we = !!(le.renderingIntent & _util.RenderingIntentFlag.PRINT);
- pe.displayReadyCapability || (pe.displayReadyCapability = new _util.PromiseCapability(), pe.operatorList = {
- fnArray: [],
- argsArray: [],
- lastChunk: !1,
- separateAnnots: null
- }, (f = this._stats) == null || f.time("Page Request"), this._pumpOperatorList(le));
- const be = (v) => {
- var A, O;
- pe.renderTasks.delete(R), (this._maybeCleanupAfterRender || we) && oe(this, n, !0), K(this, s, ut).call(this, !we), v ? (R.capability.reject(v), this._abortOperatorList({
- intentState: pe,
- reason: v instanceof Error ? v : new Error(v)
- })) : R.capability.resolve(), (A = this._stats) == null || A.timeEnd("Rendering"), (O = this._stats) == null || O.timeEnd("Overall");
- }, R = new InternalRenderTask({
- callback: be,
- params: {
- canvasContext: m,
- viewport: N,
- transform: G,
- background: I
- },
- objs: this.objs,
- commonObjs: this.commonObjs,
- annotationCanvasMap: ee,
- operatorList: pe.operatorList,
- pageIndex: this._pageIndex,
- canvasFactory: this._transport.canvasFactory,
- filterFactory: this._transport.filterFactory,
- useRequestAnimationFrame: !we,
- pdfBug: this._pdfBug,
- pageColors: Y
- });
- (pe.renderTasks || (pe.renderTasks = /* @__PURE__ */ new Set())).add(R);
- const d = R.task;
- return Promise.all([pe.displayReadyCapability.promise, B]).then(([v, A]) => {
- var O;
- if (this.destroyed) {
- be();
- return;
- }
- (O = this._stats) == null || O.time("Rendering"), R.initializeGraphics({
- transparency: v,
- optionalContentConfig: A
- }), R.operatorListChanged();
- }).catch(be), d;
- }
- getOperatorList({
- intent: m = "display",
- annotationMode: N = _util.AnnotationMode.ENABLE,
- printAnnotationStorage: D = null
- } = {}) {
- var ee;
- function X() {
- I.operatorList.lastChunk && (I.opListReadCapability.resolve(I.operatorList), I.renderTasks.delete(B));
- }
- const G = this._transport.getRenderingIntent(m, N, D, !0);
- let I = this._intentStates.get(G.cacheKey);
- I || (I = /* @__PURE__ */ Object.create(null), this._intentStates.set(G.cacheKey, I));
- let B;
- return I.opListReadCapability || (B = /* @__PURE__ */ Object.create(null), B.operatorListChanged = X, I.opListReadCapability = new _util.PromiseCapability(), (I.renderTasks || (I.renderTasks = /* @__PURE__ */ new Set())).add(B), I.operatorList = {
- fnArray: [],
- argsArray: [],
- lastChunk: !1,
- separateAnnots: null
- }, (ee = this._stats) == null || ee.time("Page Request"), this._pumpOperatorList(G)), I.opListReadCapability.promise;
- }
- streamTextContent({
- includeMarkedContent: m = !1,
- disableNormalization: N = !1
- } = {}) {
- return this._transport.messageHandler.sendWithStream("GetTextContent", {
- pageIndex: this._pageIndex,
- includeMarkedContent: m === !0,
- disableNormalization: N === !0
- }, {
- highWaterMark: 100,
- size(X) {
- return X.items.length;
- }
- });
- }
- getTextContent(m = {}) {
- if (this._transport._htmlForXfa)
- return this.getXfa().then((D) => _xfa_text.XfaText.textContent(D));
- const N = this.streamTextContent(m);
- return new Promise(function(D, X) {
- function G() {
- I.read().then(function({
- value: ee,
- done: Y
- }) {
- if (Y) {
- D(B);
- return;
- }
- Object.assign(B.styles, ee.styles), B.items.push(...ee.items), G();
- }, X);
- }
- const I = N.getReader(), B = {
- items: [],
- styles: /* @__PURE__ */ Object.create(null)
- };
- G();
- });
- }
- getStructTree() {
- return this._transport.getStructTree(this._pageIndex);
- }
- _destroy() {
- this.destroyed = !0;
- const m = [];
- for (const N of this._intentStates.values())
- if (this._abortOperatorList({
- intentState: N,
- reason: new Error("Page was destroyed."),
- force: !0
- }), !N.opListReadCapability)
- for (const D of N.renderTasks)
- m.push(D.completed), D.cancel();
- return this.objs.clear(), oe(this, n, !1), K(this, h, wt).call(this), Promise.all(m);
- }
- cleanup(m = !1) {
- oe(this, n, !0);
- const N = K(this, s, ut).call(this, !1);
- return m && N && this._stats && (this._stats = new _display_utils.StatTimer()), N;
- }
- _startRenderPage(m, N) {
- var X, G;
- const D = this._intentStates.get(N);
- D && ((X = this._stats) == null || X.timeEnd("Page Request"), (G = D.displayReadyCapability) == null || G.resolve(m));
- }
- _renderPageChunk(m, N) {
- for (let D = 0, X = m.length; D < X; D++)
- N.operatorList.fnArray.push(m.fnArray[D]), N.operatorList.argsArray.push(m.argsArray[D]);
- N.operatorList.lastChunk = m.lastChunk, N.operatorList.separateAnnots = m.separateAnnots;
- for (const D of N.renderTasks)
- D.operatorListChanged();
- m.lastChunk && K(this, s, ut).call(this, !0);
- }
- _pumpOperatorList({
- renderingIntent: m,
- cacheKey: N,
- annotationStorageSerializable: D
- }) {
- const {
- map: X,
- transfers: G
- } = D, B = this._transport.messageHandler.sendWithStream("GetOperatorList", {
- pageIndex: this._pageIndex,
- intent: m,
- cacheKey: N,
- annotationStorage: X
- }, G).getReader(), ee = this._intentStates.get(N);
- ee.streamReader = B;
- const Y = () => {
- B.read().then(({
- value: q,
- done: le
- }) => {
- if (le) {
- ee.streamReader = null;
- return;
- }
- this._transport.destroyed || (this._renderPageChunk(q, ee), Y());
- }, (q) => {
- if (ee.streamReader = null, !this._transport.destroyed) {
- if (ee.operatorList) {
- ee.operatorList.lastChunk = !0;
- for (const le of ee.renderTasks)
- le.operatorListChanged();
- K(this, s, ut).call(this, !0);
- }
- if (ee.displayReadyCapability)
- ee.displayReadyCapability.reject(q);
- else if (ee.opListReadCapability)
- ee.opListReadCapability.reject(q);
- else
- throw q;
- }
- });
- };
- Y();
- }
- _abortOperatorList({
- intentState: m,
- reason: N,
- force: D = !1
- }) {
- if (m.streamReader) {
- if (m.streamReaderCancelTimeout && (clearTimeout(m.streamReaderCancelTimeout), m.streamReaderCancelTimeout = null), !D) {
- if (m.renderTasks.size > 0)
- return;
- if (N instanceof _display_utils.RenderingCancelledException) {
- let X = RENDERING_CANCELLED_TIMEOUT;
- N.extraDelay > 0 && N.extraDelay < 1e3 && (X += N.extraDelay), m.streamReaderCancelTimeout = setTimeout(() => {
- m.streamReaderCancelTimeout = null, this._abortOperatorList({
- intentState: m,
- reason: N,
- force: !0
- });
- }, X);
- return;
- }
- }
- if (m.streamReader.cancel(new _util.AbortException(N.message)).catch(() => {
- }), m.streamReader = null, !this._transport.destroyed) {
- for (const [X, G] of this._intentStates)
- if (G === m) {
- this._intentStates.delete(X);
- break;
- }
- this.cleanup();
- }
- }
- }
- get stats() {
- return this._stats;
- }
- }
- i = new WeakMap(), n = new WeakMap(), s = new WeakSet(), ut = function(m = !1) {
- if (K(this, h, wt).call(this), !a(this, n) || this.destroyed)
- return !1;
- if (m)
- return oe(this, i, setTimeout(() => {
- oe(this, i, null), K(this, s, ut).call(this, !1);
- }, DELAYED_CLEANUP_TIMEOUT)), !1;
- for (const {
- renderTasks: N,
- operatorList: D
- } of this._intentStates.values())
- if (N.size > 0 || !D.lastChunk)
- return !1;
- return this._intentStates.clear(), this.objs.clear(), oe(this, n, !1), !0;
- }, h = new WeakSet(), wt = function() {
- a(this, i) && (clearTimeout(a(this, i)), oe(this, i, null));
- }, exports.PDFPageProxy = PDFPageProxy;
- class LoopbackPort {
- constructor() {
- W(this, c, /* @__PURE__ */ new Set());
- W(this, o, Promise.resolve());
- }
- postMessage(m, N) {
- const D = {
- data: structuredClone(m, N ? {
- transfer: N
- } : null)
- };
- a(this, o).then(() => {
- for (const X of a(this, c))
- X.call(this, D);
- });
- }
- addEventListener(m, N) {
- a(this, c).add(N);
- }
- removeEventListener(m, N) {
- a(this, c).delete(N);
- }
- terminate() {
- a(this, c).clear();
- }
- }
- c = new WeakMap(), o = new WeakMap(), exports.LoopbackPort = LoopbackPort;
- const PDFWorkerUtil = {
- isWorkerDisabled: !1,
- fallbackWorkerSrc: null,
- fakeWorkerId: 0
- };
- exports.PDFWorkerUtil = PDFWorkerUtil;
- {
- if (_util.isNodeJS && typeof commonjsRequire == "function")
- PDFWorkerUtil.isWorkerDisabled = !0, PDFWorkerUtil.fallbackWorkerSrc = "./pdf.worker.js";
- else if (typeof document == "object") {
- const M = (r = document == null ? void 0 : document.currentScript) == null ? void 0 : r.src;
- M && (PDFWorkerUtil.fallbackWorkerSrc = M.replace(/(\.(?:min\.)?js)(\?.*)?$/i, ".worker$1$2"));
- }
- PDFWorkerUtil.isSameOrigin = function(M, m) {
- let N;
- try {
- if (N = new URL(M), !N.origin || N.origin === "null")
- return !1;
- } catch {
- return !1;
- }
- const D = new URL(m, N);
- return N.origin === D.origin;
- }, PDFWorkerUtil.createCDNWrapper = function(M) {
- const m = `importScripts("${M}");`;
- return URL.createObjectURL(new Blob([m]));
- };
- }
- const _PDFWorker = class _PDFWorker {
- constructor({
- name: M = null,
- port: m = null,
- verbosity: N = (0, _util.getVerbosityLevel)()
- } = {}) {
- var D;
- if (this.name = M, this.destroyed = !1, this.verbosity = N, this._readyCapability = new _util.PromiseCapability(), this._port = null, this._webWorker = null, this._messageHandler = null, m) {
- if ((D = a(_PDFWorker, T)) != null && D.has(m))
- throw new Error("Cannot use more than one PDFWorker per port.");
- (a(_PDFWorker, T) || oe(_PDFWorker, T, /* @__PURE__ */ new WeakMap())).set(m, this), this._initializeFromPort(m);
- return;
- }
- this._initialize();
- }
- get promise() {
- return this._readyCapability.promise;
- }
- get port() {
- return this._port;
- }
- get messageHandler() {
- return this._messageHandler;
- }
- _initializeFromPort(M) {
- this._port = M, this._messageHandler = new _message_handler.MessageHandler("main", "worker", M), this._messageHandler.on("ready", function() {
- }), this._readyCapability.resolve(), this._messageHandler.send("configure", {
- verbosity: this.verbosity
- });
- }
- _initialize() {
- if (!PDFWorkerUtil.isWorkerDisabled && !_PDFWorker._mainThreadWorkerMessageHandler) {
- let {
- workerSrc: M
- } = _PDFWorker;
- try {
- PDFWorkerUtil.isSameOrigin(window.location.href, M) || (M = PDFWorkerUtil.createCDNWrapper(new URL(M, window.location).href));
- const m = new Worker(M), N = new _message_handler.MessageHandler("main", "worker", m), D = () => {
- m.removeEventListener("error", X), N.destroy(), m.terminate(), this.destroyed ? this._readyCapability.reject(new Error("Worker was destroyed")) : this._setupFakeWorker();
- }, X = () => {
- this._webWorker || D();
- };
- m.addEventListener("error", X), N.on("test", (I) => {
- if (m.removeEventListener("error", X), this.destroyed) {
- D();
- return;
- }
- I ? (this._messageHandler = N, this._port = m, this._webWorker = m, this._readyCapability.resolve(), N.send("configure", {
- verbosity: this.verbosity
- })) : (this._setupFakeWorker(), N.destroy(), m.terminate());
- }), N.on("ready", (I) => {
- if (m.removeEventListener("error", X), this.destroyed) {
- D();
- return;
- }
- try {
- G();
- } catch {
- this._setupFakeWorker();
- }
- });
- const G = () => {
- const I = new Uint8Array();
- N.send("test", I, [I.buffer]);
- };
- G();
- return;
- } catch {
- (0, _util.info)("The worker has been disabled.");
- }
- }
- this._setupFakeWorker();
- }
- _setupFakeWorker() {
- PDFWorkerUtil.isWorkerDisabled || ((0, _util.warn)("Setting up fake worker."), PDFWorkerUtil.isWorkerDisabled = !0), _PDFWorker._setupFakeWorkerGlobal.then((M) => {
- if (this.destroyed) {
- this._readyCapability.reject(new Error("Worker was destroyed"));
- return;
- }
- const m = new LoopbackPort();
- this._port = m;
- const N = `fake${PDFWorkerUtil.fakeWorkerId++}`, D = new _message_handler.MessageHandler(N + "_worker", N, m);
- M.setup(D, m);
- const X = new _message_handler.MessageHandler(N, N + "_worker", m);
- this._messageHandler = X, this._readyCapability.resolve(), X.send("configure", {
- verbosity: this.verbosity
- });
- }).catch((M) => {
- this._readyCapability.reject(new Error(`Setting up fake worker failed: "${M.message}".`));
- });
- }
- destroy() {
- var M;
- this.destroyed = !0, this._webWorker && (this._webWorker.terminate(), this._webWorker = null), (M = a(_PDFWorker, T)) == null || M.delete(this._port), this._port = null, this._messageHandler && (this._messageHandler.destroy(), this._messageHandler = null);
- }
- static fromPort(M) {
- var N;
- if (!(M != null && M.port))
- throw new Error("PDFWorker.fromPort - invalid method signature.");
- const m = (N = a(this, T)) == null ? void 0 : N.get(M.port);
- if (m) {
- if (m._pendingDestroy)
- throw new Error("PDFWorker.fromPort - the worker is being destroyed.\nPlease remember to await `PDFDocumentLoadingTask.destroy()`-calls.");
- return m;
- }
- return new _PDFWorker(M);
- }
- static get workerSrc() {
- if (_worker_options.GlobalWorkerOptions.workerSrc)
- return _worker_options.GlobalWorkerOptions.workerSrc;
- if (PDFWorkerUtil.fallbackWorkerSrc !== null)
- return _util.isNodeJS || (0, _display_utils.deprecated)('No "GlobalWorkerOptions.workerSrc" specified.'), PDFWorkerUtil.fallbackWorkerSrc;
- throw new Error('No "GlobalWorkerOptions.workerSrc" specified.');
- }
- static get _mainThreadWorkerMessageHandler() {
- var M;
- try {
- return ((M = globalThis.pdfjsWorker) == null ? void 0 : M.WorkerMessageHandler) || null;
- } catch {
- return null;
- }
- }
- static get _setupFakeWorkerGlobal() {
- const loader = async () => {
- const mainWorkerMessageHandler = this._mainThreadWorkerMessageHandler;
- if (mainWorkerMessageHandler)
- return mainWorkerMessageHandler;
- if (_util.isNodeJS && typeof commonjsRequire == "function") {
- const worker = eval("require")(this.workerSrc);
- return worker.WorkerMessageHandler;
- }
- return await (0, _display_utils.loadScript)(this.workerSrc), window.pdfjsWorker.WorkerMessageHandler;
- };
- return (0, _util.shadow)(this, "_setupFakeWorkerGlobal", loader());
- }
- };
- T = new WeakMap(), W(_PDFWorker, T, void 0);
- let PDFWorker = _PDFWorker;
- exports.PDFWorker = PDFWorker;
- class WorkerTransport {
- constructor(m, N, D, X, G) {
- W(this, b);
- W(this, S, /* @__PURE__ */ new Map());
- W(this, w, /* @__PURE__ */ new Map());
- W(this, C, /* @__PURE__ */ new Map());
- W(this, P, null);
- this.messageHandler = m, this.loadingTask = N, this.commonObjs = new PDFObjects(), this.fontLoader = new _font_loader.FontLoader({
- ownerDocument: X.ownerDocument,
- styleElement: X.styleElement
- }), this._params = X, this.canvasFactory = G.canvasFactory, this.filterFactory = G.filterFactory, this.cMapReaderFactory = G.cMapReaderFactory, this.standardFontDataFactory = G.standardFontDataFactory, this.destroyed = !1, this.destroyCapability = null, this._networkStream = D, this._fullReader = null, this._lastProgress = null, this.downloadInfoCapability = new _util.PromiseCapability(), this.setupMessageHandler();
- }
- get annotationStorage() {
- return (0, _util.shadow)(this, "annotationStorage", new _annotation_storage.AnnotationStorage());
- }
- getRenderingIntent(m, N = _util.AnnotationMode.ENABLE, D = null, X = !1) {
- let G = _util.RenderingIntentFlag.DISPLAY, I = _annotation_storage.SerializableEmpty;
- switch (m) {
- case "any":
- G = _util.RenderingIntentFlag.ANY;
- break;
- case "display":
- break;
- case "print":
- G = _util.RenderingIntentFlag.PRINT;
- break;
- default:
- (0, _util.warn)(`getRenderingIntent - invalid intent: ${m}`);
- }
- switch (N) {
- case _util.AnnotationMode.DISABLE:
- G += _util.RenderingIntentFlag.ANNOTATIONS_DISABLE;
- break;
- case _util.AnnotationMode.ENABLE:
- break;
- case _util.AnnotationMode.ENABLE_FORMS:
- G += _util.RenderingIntentFlag.ANNOTATIONS_FORMS;
- break;
- case _util.AnnotationMode.ENABLE_STORAGE:
- G += _util.RenderingIntentFlag.ANNOTATIONS_STORAGE, I = (G & _util.RenderingIntentFlag.PRINT && D instanceof _annotation_storage.PrintAnnotationStorage ? D : this.annotationStorage).serializable;
- break;
- default:
- (0, _util.warn)(`getRenderingIntent - invalid annotationMode: ${N}`);
- }
- return X && (G += _util.RenderingIntentFlag.OPLIST), {
- renderingIntent: G,
- cacheKey: `${G}_${I.hash}`,
- annotationStorageSerializable: I
- };
- }
- destroy() {
- var D;
- if (this.destroyCapability)
- return this.destroyCapability.promise;
- this.destroyed = !0, this.destroyCapability = new _util.PromiseCapability(), (D = a(this, P)) == null || D.reject(new Error("Worker was destroyed during onPassword callback"));
- const m = [];
- for (const X of a(this, w).values())
- m.push(X._destroy());
- a(this, w).clear(), a(this, C).clear(), this.hasOwnProperty("annotationStorage") && this.annotationStorage.resetModified();
- const N = this.messageHandler.sendWithPromise("Terminate", null);
- return m.push(N), Promise.all(m).then(() => {
- var X;
- this.commonObjs.clear(), this.fontLoader.clear(), a(this, S).clear(), this.filterFactory.destroy(), (X = this._networkStream) == null || X.cancelAllRequests(new _util.AbortException("Worker was terminated.")), this.messageHandler && (this.messageHandler.destroy(), this.messageHandler = null), this.destroyCapability.resolve();
- }, this.destroyCapability.reject), this.destroyCapability.promise;
- }
- setupMessageHandler() {
- const {
- messageHandler: m,
- loadingTask: N
- } = this;
- m.on("GetReader", (D, X) => {
- (0, _util.assert)(this._networkStream, "GetReader - no `IPDFStream` instance available."), this._fullReader = this._networkStream.getFullReader(), this._fullReader.onProgress = (G) => {
- this._lastProgress = {
- loaded: G.loaded,
- total: G.total
- };
- }, X.onPull = () => {
- this._fullReader.read().then(function({
- value: G,
- done: I
- }) {
- if (I) {
- X.close();
- return;
- }
- (0, _util.assert)(G instanceof ArrayBuffer, "GetReader - expected an ArrayBuffer."), X.enqueue(new Uint8Array(G), 1, [G]);
- }).catch((G) => {
- X.error(G);
- });
- }, X.onCancel = (G) => {
- this._fullReader.cancel(G), X.ready.catch((I) => {
- if (!this.destroyed)
- throw I;
- });
- };
- }), m.on("ReaderHeadersReady", (D) => {
- const X = new _util.PromiseCapability(), G = this._fullReader;
- return G.headersReady.then(() => {
- var I;
- (!G.isStreamingSupported || !G.isRangeSupported) && (this._lastProgress && ((I = N.onProgress) == null || I.call(N, this._lastProgress)), G.onProgress = (B) => {
- var ee;
- (ee = N.onProgress) == null || ee.call(N, {
- loaded: B.loaded,
- total: B.total
- });
- }), X.resolve({
- isStreamingSupported: G.isStreamingSupported,
- isRangeSupported: G.isRangeSupported,
- contentLength: G.contentLength
- });
- }, X.reject), X.promise;
- }), m.on("GetRangeReader", (D, X) => {
- (0, _util.assert)(this._networkStream, "GetRangeReader - no `IPDFStream` instance available.");
- const G = this._networkStream.getRangeReader(D.begin, D.end);
- if (!G) {
- X.close();
- return;
- }
- X.onPull = () => {
- G.read().then(function({
- value: I,
- done: B
- }) {
- if (B) {
- X.close();
- return;
- }
- (0, _util.assert)(I instanceof ArrayBuffer, "GetRangeReader - expected an ArrayBuffer."), X.enqueue(new Uint8Array(I), 1, [I]);
- }).catch((I) => {
- X.error(I);
- });
- }, X.onCancel = (I) => {
- G.cancel(I), X.ready.catch((B) => {
- if (!this.destroyed)
- throw B;
- });
- };
- }), m.on("GetDoc", ({
- pdfInfo: D
- }) => {
- this._numPages = D.numPages, this._htmlForXfa = D.htmlForXfa, delete D.htmlForXfa, N._capability.resolve(new PDFDocumentProxy(D, this));
- }), m.on("DocException", function(D) {
- let X;
- switch (D.name) {
- case "PasswordException":
- X = new _util.PasswordException(D.message, D.code);
- break;
- case "InvalidPDFException":
- X = new _util.InvalidPDFException(D.message);
- break;
- case "MissingPDFException":
- X = new _util.MissingPDFException(D.message);
- break;
- case "UnexpectedResponseException":
- X = new _util.UnexpectedResponseException(D.message, D.status);
- break;
- case "UnknownErrorException":
- X = new _util.UnknownErrorException(D.message, D.details);
- break;
- default:
- (0, _util.unreachable)("DocException - expected a valid Error.");
- }
- N._capability.reject(X);
- }), m.on("PasswordRequest", (D) => {
- if (oe(this, P, new _util.PromiseCapability()), N.onPassword) {
- const X = (G) => {
- G instanceof Error ? a(this, P).reject(G) : a(this, P).resolve({
- password: G
- });
- };
- try {
- N.onPassword(X, D.code);
- } catch (G) {
- a(this, P).reject(G);
- }
- } else
- a(this, P).reject(new _util.PasswordException(D.message, D.code));
- return a(this, P).promise;
- }), m.on("DataLoaded", (D) => {
- var X;
- (X = N.onProgress) == null || X.call(N, {
- loaded: D.length,
- total: D.length
- }), this.downloadInfoCapability.resolve(D);
- }), m.on("StartRenderPage", (D) => {
- if (this.destroyed)
- return;
- a(this, w).get(D.pageIndex)._startRenderPage(D.transparency, D.cacheKey);
- }), m.on("commonobj", ([D, X, G]) => {
- var I;
- if (!this.destroyed && !this.commonObjs.has(D))
- switch (X) {
- case "Font":
- const B = this._params;
- if ("error" in G) {
- const q = G.error;
- (0, _util.warn)(`Error during font loading: ${q}`), this.commonObjs.resolve(D, q);
- break;
- }
- const ee = B.pdfBug && ((I = globalThis.FontInspector) != null && I.enabled) ? (q, le) => globalThis.FontInspector.fontAdded(q, le) : null, Y = new _font_loader.FontFaceObject(G, {
- isEvalSupported: B.isEvalSupported,
- disableFontFace: B.disableFontFace,
- ignoreErrors: B.ignoreErrors,
- inspectFont: ee
- });
- this.fontLoader.bind(Y).catch((q) => m.sendWithPromise("FontFallback", {
- id: D
- })).finally(() => {
- !B.fontExtraProperties && Y.data && (Y.data = null), this.commonObjs.resolve(D, Y);
- });
- break;
- case "FontPath":
- case "Image":
- case "Pattern":
- this.commonObjs.resolve(D, G);
- break;
- default:
- throw new Error(`Got unknown common object type ${X}`);
- }
- }), m.on("obj", ([D, X, G, I]) => {
- var ee;
- if (this.destroyed)
- return;
- const B = a(this, w).get(X);
- if (!B.objs.has(D))
- switch (G) {
- case "Image":
- if (B.objs.resolve(D, I), I) {
- let Y;
- if (I.bitmap) {
- const {
- width: q,
- height: le
- } = I;
- Y = q * le * 4;
- } else
- Y = ((ee = I.data) == null ? void 0 : ee.length) || 0;
- Y > _util.MAX_IMAGE_SIZE_TO_CACHE && (B._maybeCleanupAfterRender = !0);
- }
- break;
- case "Pattern":
- B.objs.resolve(D, I);
- break;
- default:
- throw new Error(`Got unknown object type ${G}`);
- }
- }), m.on("DocProgress", (D) => {
- var X;
- this.destroyed || (X = N.onProgress) == null || X.call(N, {
- loaded: D.loaded,
- total: D.total
- });
- }), m.on("FetchBuiltInCMap", (D) => this.destroyed ? Promise.reject(new Error("Worker was destroyed.")) : this.cMapReaderFactory ? this.cMapReaderFactory.fetch(D) : Promise.reject(new Error("CMapReaderFactory not initialized, see the `useWorkerFetch` parameter."))), m.on("FetchStandardFontData", (D) => this.destroyed ? Promise.reject(new Error("Worker was destroyed.")) : this.standardFontDataFactory ? this.standardFontDataFactory.fetch(D) : Promise.reject(new Error("StandardFontDataFactory not initialized, see the `useWorkerFetch` parameter.")));
- }
- getData() {
- return this.messageHandler.sendWithPromise("GetData", null);
- }
- saveDocument() {
- var D;
- this.annotationStorage.size <= 0 && (0, _util.warn)("saveDocument called while `annotationStorage` is empty, please use the getData-method instead.");
- const {
- map: m,
- transfers: N
- } = this.annotationStorage.serializable;
- return this.messageHandler.sendWithPromise("SaveDocument", {
- isPureXfa: !!this._htmlForXfa,
- numPages: this._numPages,
- annotationStorage: m,
- filename: ((D = this._fullReader) == null ? void 0 : D.filename) ?? null
- }, N).finally(() => {
- this.annotationStorage.resetModified();
- });
- }
- getPage(m) {
- if (!Number.isInteger(m) || m <= 0 || m > this._numPages)
- return Promise.reject(new Error("Invalid page request."));
- const N = m - 1, D = a(this, C).get(N);
- if (D)
- return D;
- const X = this.messageHandler.sendWithPromise("GetPage", {
- pageIndex: N
- }).then((G) => {
- if (this.destroyed)
- throw new Error("Transport destroyed");
- const I = new PDFPageProxy(N, G, this, this._params.pdfBug);
- return a(this, w).set(N, I), I;
- });
- return a(this, C).set(N, X), X;
- }
- getPageIndex(m) {
- return typeof m != "object" || m === null || !Number.isInteger(m.num) || m.num < 0 || !Number.isInteger(m.gen) || m.gen < 0 ? Promise.reject(new Error("Invalid pageIndex request.")) : this.messageHandler.sendWithPromise("GetPageIndex", {
- num: m.num,
- gen: m.gen
- });
- }
- getAnnotations(m, N) {
- return this.messageHandler.sendWithPromise("GetAnnotations", {
- pageIndex: m,
- intent: N
- });
- }
- getFieldObjects() {
- return K(this, b, Tt).call(this, "GetFieldObjects");
- }
- hasJSActions() {
- return K(this, b, Tt).call(this, "HasJSActions");
- }
- getCalculationOrderIds() {
- return this.messageHandler.sendWithPromise("GetCalculationOrderIds", null);
- }
- getDestinations() {
- return this.messageHandler.sendWithPromise("GetDestinations", null);
- }
- getDestination(m) {
- return typeof m != "string" ? Promise.reject(new Error("Invalid destination request.")) : this.messageHandler.sendWithPromise("GetDestination", {
- id: m
- });
- }
- getPageLabels() {
- return this.messageHandler.sendWithPromise("GetPageLabels", null);
- }
- getPageLayout() {
- return this.messageHandler.sendWithPromise("GetPageLayout", null);
- }
- getPageMode() {
- return this.messageHandler.sendWithPromise("GetPageMode", null);
- }
- getViewerPreferences() {
- return this.messageHandler.sendWithPromise("GetViewerPreferences", null);
- }
- getOpenAction() {
- return this.messageHandler.sendWithPromise("GetOpenAction", null);
- }
- getAttachments() {
- return this.messageHandler.sendWithPromise("GetAttachments", null);
- }
- getDocJSActions() {
- return K(this, b, Tt).call(this, "GetDocJSActions");
- }
- getPageJSActions(m) {
- return this.messageHandler.sendWithPromise("GetPageJSActions", {
- pageIndex: m
- });
- }
- getStructTree(m) {
- return this.messageHandler.sendWithPromise("GetStructTree", {
- pageIndex: m
- });
- }
- getOutline() {
- return this.messageHandler.sendWithPromise("GetOutline", null);
- }
- getOptionalContentConfig() {
- return this.messageHandler.sendWithPromise("GetOptionalContentConfig", null).then((m) => new _optional_content_config.OptionalContentConfig(m));
- }
- getPermissions() {
- return this.messageHandler.sendWithPromise("GetPermissions", null);
- }
- getMetadata() {
- const m = "GetMetadata", N = a(this, S).get(m);
- if (N)
- return N;
- const D = this.messageHandler.sendWithPromise(m, null).then((X) => {
- var G, I;
- return {
- info: X[0],
- metadata: X[1] ? new _metadata.Metadata(X[1]) : null,
- contentDispositionFilename: ((G = this._fullReader) == null ? void 0 : G.filename) ?? null,
- contentLength: ((I = this._fullReader) == null ? void 0 : I.contentLength) ?? null
- };
- });
- return a(this, S).set(m, D), D;
- }
- getMarkInfo() {
- return this.messageHandler.sendWithPromise("GetMarkInfo", null);
- }
- async startCleanup(m = !1) {
- if (!this.destroyed) {
- await this.messageHandler.sendWithPromise("Cleanup", null);
- for (const N of a(this, w).values())
- if (!N.cleanup())
- throw new Error(`startCleanup: Page ${N.pageNumber} is currently rendering.`);
- this.commonObjs.clear(), m || this.fontLoader.clear(), a(this, S).clear(), this.filterFactory.destroy(!0);
- }
- }
- get loadingParams() {
- const {
- disableAutoFetch: m,
- enableXfa: N
- } = this._params;
- return (0, _util.shadow)(this, "loadingParams", {
- disableAutoFetch: m,
- enableXfa: N
- });
- }
- }
- S = new WeakMap(), w = new WeakMap(), C = new WeakMap(), P = new WeakMap(), b = new WeakSet(), Tt = function(m, N = null) {
- const D = a(this, S).get(m);
- if (D)
- return D;
- const X = this.messageHandler.sendWithPromise(m, N);
- return a(this, S).set(m, X), X;
- };
- class PDFObjects {
- constructor() {
- W(this, x);
- W(this, F, /* @__PURE__ */ Object.create(null));
- }
- get(m, N = null) {
- if (N) {
- const X = K(this, x, Bt).call(this, m);
- return X.capability.promise.then(() => N(X.data)), null;
- }
- const D = a(this, F)[m];
- if (!(D != null && D.capability.settled))
- throw new Error(`Requesting object that isn't resolved yet ${m}.`);
- return D.data;
- }
- has(m) {
- const N = a(this, F)[m];
- return (N == null ? void 0 : N.capability.settled) || !1;
- }
- resolve(m, N = null) {
- const D = K(this, x, Bt).call(this, m);
- D.data = N, D.capability.resolve();
- }
- clear() {
- var m;
- for (const N in a(this, F)) {
- const {
- data: D
- } = a(this, F)[N];
- (m = D == null ? void 0 : D.bitmap) == null || m.close();
- }
- oe(this, F, /* @__PURE__ */ Object.create(null));
- }
- }
- F = new WeakMap(), x = new WeakSet(), Bt = function(m) {
- var N;
- return (N = a(this, F))[m] || (N[m] = {
- capability: new _util.PromiseCapability(),
- data: null
- });
- };
- class RenderTask {
- constructor(m) {
- W(this, p, null);
- oe(this, p, m), this.onContinue = null;
- }
- get promise() {
- return a(this, p).capability.promise;
- }
- cancel(m = 0) {
- a(this, p).cancel(null, m);
- }
- get separateAnnots() {
- const {
- separateAnnots: m
- } = a(this, p).operatorList;
- if (!m)
- return !1;
- const {
- annotationCanvasMap: N
- } = a(this, p);
- return m.form || m.canvas && (N == null ? void 0 : N.size) > 0;
- }
- }
- p = new WeakMap(), exports.RenderTask = RenderTask;
- const $ = class $ {
- constructor({
- callback: m,
- params: N,
- objs: D,
- commonObjs: X,
- annotationCanvasMap: G,
- operatorList: I,
- pageIndex: B,
- canvasFactory: ee,
- filterFactory: Y,
- useRequestAnimationFrame: q = !1,
- pdfBug: le = !1,
- pageColors: pe = null
- }) {
- this.callback = m, this.params = N, this.objs = D, this.commonObjs = X, this.annotationCanvasMap = G, this.operatorListIdx = null, this.operatorList = I, this._pageIndex = B, this.canvasFactory = ee, this.filterFactory = Y, this._pdfBug = le, this.pageColors = pe, this.running = !1, this.graphicsReadyCallback = null, this.graphicsReady = !1, this._useRequestAnimationFrame = q === !0 && typeof window < "u", this.cancelled = !1, this.capability = new _util.PromiseCapability(), this.task = new RenderTask(this), this._cancelBound = this.cancel.bind(this), this._continueBound = this._continue.bind(this), this._scheduleNextBound = this._scheduleNext.bind(this), this._nextBound = this._next.bind(this), this._canvas = N.canvasContext.canvas;
- }
- get completed() {
- return this.capability.promise.catch(function() {
- });
- }
- initializeGraphics({
- transparency: m = !1,
- optionalContentConfig: N
- }) {
- var B, ee;
- if (this.cancelled)
- return;
- if (this._canvas) {
- if (a($, E).has(this._canvas))
- throw new Error("Cannot use the same canvas during multiple render() operations. Use different canvas or ensure previous operations were cancelled or completed.");
- a($, E).add(this._canvas);
- }
- this._pdfBug && ((B = globalThis.StepperManager) != null && B.enabled) && (this.stepper = globalThis.StepperManager.create(this._pageIndex), this.stepper.init(this.operatorList), this.stepper.nextBreakPoint = this.stepper.getNextBreakPoint());
- const {
- canvasContext: D,
- viewport: X,
- transform: G,
- background: I
- } = this.params;
- this.gfx = new _canvas.CanvasGraphics(D, this.commonObjs, this.objs, this.canvasFactory, this.filterFactory, {
- optionalContentConfig: N
- }, this.annotationCanvasMap, this.pageColors), this.gfx.beginDrawing({
- transform: G,
- viewport: X,
- transparency: m,
- background: I
- }), this.operatorListIdx = 0, this.graphicsReady = !0, (ee = this.graphicsReadyCallback) == null || ee.call(this);
- }
- cancel(m = null, N = 0) {
- var D;
- this.running = !1, this.cancelled = !0, (D = this.gfx) == null || D.endDrawing(), a($, E).delete(this._canvas), this.callback(m || new _display_utils.RenderingCancelledException(`Rendering cancelled, page ${this._pageIndex + 1}`, N));
- }
- operatorListChanged() {
- var m;
- if (!this.graphicsReady) {
- this.graphicsReadyCallback || (this.graphicsReadyCallback = this._continueBound);
- return;
- }
- (m = this.stepper) == null || m.updateOperatorList(this.operatorList), !this.running && this._continue();
- }
- _continue() {
- this.running = !0, !this.cancelled && (this.task.onContinue ? this.task.onContinue(this._scheduleNextBound) : this._scheduleNext());
- }
- _scheduleNext() {
- this._useRequestAnimationFrame ? window.requestAnimationFrame(() => {
- this._nextBound().catch(this._cancelBound);
- }) : Promise.resolve().then(this._nextBound).catch(this._cancelBound);
- }
- async _next() {
- this.cancelled || (this.operatorListIdx = this.gfx.executeOperatorList(this.operatorList, this.operatorListIdx, this._continueBound, this.stepper), this.operatorListIdx === this.operatorList.argsArray.length && (this.running = !1, this.operatorList.lastChunk && (this.gfx.endDrawing(), a($, E).delete(this._canvas), this.callback())));
- }
- };
- E = new WeakMap(), W($, E, /* @__PURE__ */ new WeakSet());
- let InternalRenderTask = $;
- const version = "3.11.174";
- exports.version = version;
- const build = "ce8716743";
- exports.build = build;
- },
- /* 3 */
- /***/
- (t, e, i) => {
- var o, r, T, En, w;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.SerializableEmpty = e.PrintAnnotationStorage = e.AnnotationStorage = void 0;
- var n = i(1), s = i(4), l = i(8);
- const h = Object.freeze({
- map: null,
- hash: "",
- transfers: void 0
- });
- e.SerializableEmpty = h;
- class _ {
- constructor() {
- W(this, T);
- W(this, o, !1);
- W(this, r, /* @__PURE__ */ new Map());
- this.onSetModified = null, this.onResetModified = null, this.onAnnotationEditor = null;
- }
- getValue(P, b) {
- const k = a(this, r).get(P);
- return k === void 0 ? b : Object.assign(b, k);
- }
- getRawValue(P) {
- return a(this, r).get(P);
- }
- remove(P) {
- if (a(this, r).delete(P), a(this, r).size === 0 && this.resetModified(), typeof this.onAnnotationEditor == "function") {
- for (const b of a(this, r).values())
- if (b instanceof s.AnnotationEditor)
- return;
- this.onAnnotationEditor(null);
- }
- }
- setValue(P, b) {
- const k = a(this, r).get(P);
- let F = !1;
- if (k !== void 0)
- for (const [x, y] of Object.entries(b))
- k[x] !== y && (F = !0, k[x] = y);
- else
- F = !0, a(this, r).set(P, b);
- F && K(this, T, En).call(this), b instanceof s.AnnotationEditor && typeof this.onAnnotationEditor == "function" && this.onAnnotationEditor(b.constructor._type);
- }
- has(P) {
- return a(this, r).has(P);
- }
- getAll() {
- return a(this, r).size > 0 ? (0, n.objectFromMap)(a(this, r)) : null;
- }
- setAll(P) {
- for (const [b, k] of Object.entries(P))
- this.setValue(b, k);
- }
- get size() {
- return a(this, r).size;
- }
- resetModified() {
- a(this, o) && (oe(this, o, !1), typeof this.onResetModified == "function" && this.onResetModified());
- }
- get print() {
- return new c(this);
- }
- get serializable() {
- if (a(this, r).size === 0)
- return h;
- const P = /* @__PURE__ */ new Map(), b = new l.MurmurHash3_64(), k = [], F = /* @__PURE__ */ Object.create(null);
- let x = !1;
- for (const [y, p] of a(this, r)) {
- const E = p instanceof s.AnnotationEditor ? p.serialize(!1, F) : p;
- E && (P.set(y, E), b.update(`${y}:${JSON.stringify(E)}`), x || (x = !!E.bitmap));
- }
- if (x)
- for (const y of P.values())
- y.bitmap && k.push(y.bitmap);
- return P.size > 0 ? {
- map: P,
- hash: b.hexdigest(),
- transfers: k
- } : h;
- }
- }
- o = new WeakMap(), r = new WeakMap(), T = new WeakSet(), En = function() {
- a(this, o) || (oe(this, o, !0), typeof this.onSetModified == "function" && this.onSetModified());
- }, e.AnnotationStorage = _;
- class c extends _ {
- constructor(b) {
- super();
- W(this, w, void 0);
- const {
- map: k,
- hash: F,
- transfers: x
- } = b.serializable, y = structuredClone(k, x ? {
- transfer: x
- } : null);
- oe(this, w, {
- map: y,
- hash: F,
- transfers: x
- });
- }
- get print() {
- (0, n.unreachable)("Should not call PrintAnnotationStorage.print");
- }
- get serializable() {
- return a(this, w);
- }
- }
- w = new WeakMap(), e.PrintAnnotationStorage = c;
- },
- /* 4 */
- /***/
- (t, e, i) => {
- var c, o, r, T, S, w, C, P, b, k, F, x, y, p, E, Ht, M, Ut, N, jt, X, Gt, I, Sn, ee, wn, q, Tn, pe, Wt, be, Cn;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.AnnotationEditor = void 0;
- var n = i(5), s = i(1), l = i(6);
- const d = class d {
- constructor(f) {
- W(this, E);
- W(this, M);
- W(this, X);
- W(this, I);
- W(this, ee);
- W(this, q);
- W(this, pe);
- W(this, be);
- W(this, c, "");
- W(this, o, !1);
- W(this, r, null);
- W(this, T, null);
- W(this, S, null);
- W(this, w, !1);
- W(this, C, null);
- W(this, P, this.focusin.bind(this));
- W(this, b, this.focusout.bind(this));
- W(this, k, !1);
- W(this, F, !1);
- W(this, x, !1);
- nt(this, "_initialOptions", /* @__PURE__ */ Object.create(null));
- nt(this, "_uiManager", null);
- nt(this, "_focusEventsAllowed", !0);
- nt(this, "_l10nPromise", null);
- W(this, y, !1);
- W(this, p, d._zIndex++);
- this.constructor === d && (0, s.unreachable)("Cannot initialize AnnotationEditor."), this.parent = f.parent, this.id = f.id, this.width = this.height = null, this.pageIndex = f.parent.pageIndex, this.name = f.name, this.div = null, this._uiManager = f.uiManager, this.annotationElementId = null, this._willKeepAspectRatio = !1, this._initialOptions.isCentered = f.isCentered, this._structTreeParentId = null;
- const {
- rotation: v,
- rawDims: {
- pageWidth: A,
- pageHeight: O,
- pageX: H,
- pageY: z
- }
- } = this.parent.viewport;
- this.rotation = v, this.pageRotation = (360 + v - this._uiManager.viewParameters.rotation) % 360, this.pageDimensions = [A, O], this.pageTranslation = [H, z];
- const [ae, Q] = this.parentDimensions;
- this.x = f.x / ae, this.y = f.y / Q, this.isAttachedToDOM = !1, this.deleted = !1;
- }
- get editorType() {
- return Object.getPrototypeOf(this).constructor._type;
- }
- static get _defaultLineColor() {
- return (0, s.shadow)(this, "_defaultLineColor", this._colorManager.getHexCode("CanvasText"));
- }
- static deleteAnnotationElement(f) {
- const v = new _({
- id: f.parent.getNextId(),
- parent: f.parent,
- uiManager: f._uiManager
- });
- v.annotationElementId = f.annotationElementId, v.deleted = !0, v._uiManager.addToAnnotationStorage(v);
- }
- static initialize(f, v = null) {
- if (d._l10nPromise || (d._l10nPromise = new Map(["editor_alt_text_button_label", "editor_alt_text_edit_button_label", "editor_alt_text_decorative_tooltip"].map((O) => [O, f.get(O)]))), v != null && v.strings)
- for (const O of v.strings)
- d._l10nPromise.set(O, f.get(O));
- if (d._borderLineWidth !== -1)
- return;
- const A = getComputedStyle(document.documentElement);
- d._borderLineWidth = parseFloat(A.getPropertyValue("--outline-width")) || 0;
- }
- static updateDefaultParams(f, v) {
- }
- static get defaultPropertiesToUpdate() {
- return [];
- }
- static isHandlingMimeForPasting(f) {
- return !1;
- }
- static paste(f, v) {
- (0, s.unreachable)("Not implemented");
- }
- get propertiesToUpdate() {
- return [];
- }
- get _isDraggable() {
- return a(this, y);
- }
- set _isDraggable(f) {
- var v;
- oe(this, y, f), (v = this.div) == null || v.classList.toggle("draggable", f);
- }
- center() {
- const [f, v] = this.pageDimensions;
- switch (this.parentRotation) {
- case 90:
- this.x -= this.height * v / (f * 2), this.y += this.width * f / (v * 2);
- break;
- case 180:
- this.x += this.width / 2, this.y += this.height / 2;
- break;
- case 270:
- this.x += this.height * v / (f * 2), this.y -= this.width * f / (v * 2);
- break;
- default:
- this.x -= this.width / 2, this.y -= this.height / 2;
- break;
- }
- this.fixAndSetPosition();
- }
- addCommands(f) {
- this._uiManager.addCommands(f);
- }
- get currentLayer() {
- return this._uiManager.currentLayer;
- }
- setInBackground() {
- this.div.style.zIndex = 0;
- }
- setInForeground() {
- this.div.style.zIndex = a(this, p);
- }
- setParent(f) {
- f !== null && (this.pageIndex = f.pageIndex, this.pageDimensions = f.pageDimensions), this.parent = f;
- }
- focusin(f) {
- this._focusEventsAllowed && (a(this, k) ? oe(this, k, !1) : this.parent.setSelected(this));
- }
- focusout(f) {
- var A;
- if (!this._focusEventsAllowed || !this.isAttachedToDOM)
- return;
- const v = f.relatedTarget;
- v != null && v.closest(`#${this.id}`) || (f.preventDefault(), (A = this.parent) != null && A.isMultipleSelection || this.commitOrRemove());
- }
- commitOrRemove() {
- this.isEmpty() ? this.remove() : this.commit();
- }
- commit() {
- this.addToAnnotationStorage();
- }
- addToAnnotationStorage() {
- this._uiManager.addToAnnotationStorage(this);
- }
- setAt(f, v, A, O) {
- const [H, z] = this.parentDimensions;
- [A, O] = this.screenToPageTranslation(A, O), this.x = (f + A) / H, this.y = (v + O) / z, this.fixAndSetPosition();
- }
- translate(f, v) {
- K(this, E, Ht).call(this, this.parentDimensions, f, v);
- }
- translateInPage(f, v) {
- K(this, E, Ht).call(this, this.pageDimensions, f, v), this.div.scrollIntoView({
- block: "nearest"
- });
- }
- drag(f, v) {
- const [A, O] = this.parentDimensions;
- if (this.x += f / A, this.y += v / O, this.parent && (this.x < 0 || this.x > 1 || this.y < 0 || this.y > 1)) {
- const {
- x: ce,
- y: ue
- } = this.div.getBoundingClientRect();
- this.parent.findNewParent(this, ce, ue) && (this.x -= Math.floor(this.x), this.y -= Math.floor(this.y));
- }
- let {
- x: H,
- y: z
- } = this;
- const [ae, Q] = K(this, M, Ut).call(this);
- H += ae, z += Q, this.div.style.left = `${(100 * H).toFixed(2)}%`, this.div.style.top = `${(100 * z).toFixed(2)}%`, this.div.scrollIntoView({
- block: "nearest"
- });
- }
- fixAndSetPosition() {
- const [f, v] = this.pageDimensions;
- let {
- x: A,
- y: O,
- width: H,
- height: z
- } = this;
- switch (H *= f, z *= v, A *= f, O *= v, this.rotation) {
- case 0:
- A = Math.max(0, Math.min(f - H, A)), O = Math.max(0, Math.min(v - z, O));
- break;
- case 90:
- A = Math.max(0, Math.min(f - z, A)), O = Math.min(v, Math.max(H, O));
- break;
- case 180:
- A = Math.min(f, Math.max(H, A)), O = Math.min(v, Math.max(z, O));
- break;
- case 270:
- A = Math.min(f, Math.max(z, A)), O = Math.max(0, Math.min(v - H, O));
- break;
- }
- this.x = A /= f, this.y = O /= v;
- const [ae, Q] = K(this, M, Ut).call(this);
- A += ae, O += Q;
- const {
- style: ce
- } = this.div;
- ce.left = `${(100 * A).toFixed(2)}%`, ce.top = `${(100 * O).toFixed(2)}%`, this.moveInDOM();
- }
- screenToPageTranslation(f, v) {
- var A;
- return K(A = d, N, jt).call(A, f, v, this.parentRotation);
- }
- pageTranslationToScreen(f, v) {
- var A;
- return K(A = d, N, jt).call(A, f, v, 360 - this.parentRotation);
- }
- get parentScale() {
- return this._uiManager.viewParameters.realScale;
- }
- get parentRotation() {
- return (this._uiManager.viewParameters.rotation + this.pageRotation) % 360;
- }
- get parentDimensions() {
- const {
- parentScale: f,
- pageDimensions: [v, A]
- } = this, O = v * f, H = A * f;
- return s.FeatureTest.isCSSRoundSupported ? [Math.round(O), Math.round(H)] : [O, H];
- }
- setDims(f, v) {
- var H;
- const [A, O] = this.parentDimensions;
- this.div.style.width = `${(100 * f / A).toFixed(2)}%`, a(this, w) || (this.div.style.height = `${(100 * v / O).toFixed(2)}%`), (H = a(this, r)) == null || H.classList.toggle("small", f < d.SMALL_EDITOR_SIZE || v < d.SMALL_EDITOR_SIZE);
- }
- fixDims() {
- const {
- style: f
- } = this.div, {
- height: v,
- width: A
- } = f, O = A.endsWith("%"), H = !a(this, w) && v.endsWith("%");
- if (O && H)
- return;
- const [z, ae] = this.parentDimensions;
- O || (f.width = `${(100 * parseFloat(A) / z).toFixed(2)}%`), !a(this, w) && !H && (f.height = `${(100 * parseFloat(v) / ae).toFixed(2)}%`);
- }
- getInitialTranslation() {
- return [0, 0];
- }
- async addAltTextButton() {
- if (a(this, r))
- return;
- const f = oe(this, r, document.createElement("button"));
- f.className = "altText";
- const v = await d._l10nPromise.get("editor_alt_text_button_label");
- f.textContent = v, f.setAttribute("aria-label", v), f.tabIndex = "0", f.addEventListener("contextmenu", l.noContextMenu), f.addEventListener("pointerdown", (A) => A.stopPropagation()), f.addEventListener("click", (A) => {
- A.preventDefault(), this._uiManager.editAltText(this);
- }, {
- capture: !0
- }), f.addEventListener("keydown", (A) => {
- A.target === f && A.key === "Enter" && (A.preventDefault(), this._uiManager.editAltText(this));
- }), K(this, pe, Wt).call(this), this.div.append(f), d.SMALL_EDITOR_SIZE || (d.SMALL_EDITOR_SIZE = Math.min(128, Math.round(f.getBoundingClientRect().width * 1.4)));
- }
- getClientDimensions() {
- return this.div.getBoundingClientRect();
- }
- get altTextData() {
- return {
- altText: a(this, c),
- decorative: a(this, o)
- };
- }
- set altTextData({
- altText: f,
- decorative: v
- }) {
- a(this, c) === f && a(this, o) === v || (oe(this, c, f), oe(this, o, v), K(this, pe, Wt).call(this));
- }
- render() {
- this.div = document.createElement("div"), this.div.setAttribute("data-editor-rotation", (360 - this.rotation) % 360), this.div.className = this.name, this.div.setAttribute("id", this.id), this.div.setAttribute("tabIndex", 0), this.setInForeground(), this.div.addEventListener("focusin", a(this, P)), this.div.addEventListener("focusout", a(this, b));
- const [f, v] = this.parentDimensions;
- this.parentRotation % 180 !== 0 && (this.div.style.maxWidth = `${(100 * v / f).toFixed(2)}%`, this.div.style.maxHeight = `${(100 * f / v).toFixed(2)}%`);
- const [A, O] = this.getInitialTranslation();
- return this.translate(A, O), (0, n.bindEvents)(this, this.div, ["pointerdown"]), this.div;
- }
- pointerdown(f) {
- const {
- isMac: v
- } = s.FeatureTest.platform;
- if (f.button !== 0 || f.ctrlKey && v) {
- f.preventDefault();
- return;
- }
- oe(this, k, !0), K(this, be, Cn).call(this, f);
- }
- moveInDOM() {
- var f;
- (f = this.parent) == null || f.moveEditorInDOM(this);
- }
- _setParentAndPosition(f, v, A) {
- f.changeParent(this), this.x = v, this.y = A, this.fixAndSetPosition();
- }
- getRect(f, v) {
- const A = this.parentScale, [O, H] = this.pageDimensions, [z, ae] = this.pageTranslation, Q = f / A, ce = v / A, ue = this.x * O, me = this.y * H, fe = this.width * O, Pe = this.height * H;
- switch (this.rotation) {
- case 0:
- return [ue + Q + z, H - me - ce - Pe + ae, ue + Q + fe + z, H - me - ce + ae];
- case 90:
- return [ue + ce + z, H - me + Q + ae, ue + ce + Pe + z, H - me + Q + fe + ae];
- case 180:
- return [ue - Q - fe + z, H - me + ce + ae, ue - Q + z, H - me + ce + Pe + ae];
- case 270:
- return [ue - ce - Pe + z, H - me - Q - fe + ae, ue - ce + z, H - me - Q + ae];
- default:
- throw new Error("Invalid rotation");
- }
- }
- getRectInCurrentCoords(f, v) {
- const [A, O, H, z] = f, ae = H - A, Q = z - O;
- switch (this.rotation) {
- case 0:
- return [A, v - z, ae, Q];
- case 90:
- return [A, v - O, Q, ae];
- case 180:
- return [H, v - O, ae, Q];
- case 270:
- return [H, v - z, Q, ae];
- default:
- throw new Error("Invalid rotation");
- }
- }
- onceAdded() {
- }
- isEmpty() {
- return !1;
- }
- enableEditMode() {
- oe(this, x, !0);
- }
- disableEditMode() {
- oe(this, x, !1);
- }
- isInEditMode() {
- return a(this, x);
- }
- shouldGetKeyboardEvents() {
- return !1;
- }
- needsToBeRebuilt() {
- return this.div && !this.isAttachedToDOM;
- }
- rebuild() {
- var f, v;
- (f = this.div) == null || f.addEventListener("focusin", a(this, P)), (v = this.div) == null || v.addEventListener("focusout", a(this, b));
- }
- serialize(f = !1, v = null) {
- (0, s.unreachable)("An editor must be serializable");
- }
- static deserialize(f, v, A) {
- const O = new this.prototype.constructor({
- parent: v,
- id: v.getNextId(),
- uiManager: A
- });
- O.rotation = f.rotation;
- const [H, z] = O.pageDimensions, [ae, Q, ce, ue] = O.getRectInCurrentCoords(f.rect, z);
- return O.x = ae / H, O.y = Q / z, O.width = ce / H, O.height = ue / z, O;
- }
- remove() {
- var f;
- this.div.removeEventListener("focusin", a(this, P)), this.div.removeEventListener("focusout", a(this, b)), this.isEmpty() || this.commit(), this.parent ? this.parent.remove(this) : this._uiManager.removeEditor(this), (f = a(this, r)) == null || f.remove(), oe(this, r, null), oe(this, T, null);
- }
- get isResizable() {
- return !1;
- }
- makeResizable() {
- this.isResizable && (K(this, I, Sn).call(this), a(this, C).classList.remove("hidden"));
- }
- select() {
- var f;
- this.makeResizable(), (f = this.div) == null || f.classList.add("selectedEditor");
- }
- unselect() {
- var f, v, A;
- (f = a(this, C)) == null || f.classList.add("hidden"), (v = this.div) == null || v.classList.remove("selectedEditor"), (A = this.div) != null && A.contains(document.activeElement) && this._uiManager.currentLayer.div.focus();
- }
- updateParams(f, v) {
- }
- disableEditing() {
- a(this, r) && (a(this, r).hidden = !0);
- }
- enableEditing() {
- a(this, r) && (a(this, r).hidden = !1);
- }
- enterInEditMode() {
- }
- get contentDiv() {
- return this.div;
- }
- get isEditing() {
- return a(this, F);
- }
- set isEditing(f) {
- oe(this, F, f), this.parent && (f ? (this.parent.setSelected(this), this.parent.setActiveEditor(this)) : this.parent.setActiveEditor(null));
- }
- setAspectRatio(f, v) {
- oe(this, w, !0);
- const A = f / v, {
- style: O
- } = this.div;
- O.aspectRatio = A, O.height = "auto";
- }
- static get MIN_SIZE() {
- return 16;
- }
- };
- c = new WeakMap(), o = new WeakMap(), r = new WeakMap(), T = new WeakMap(), S = new WeakMap(), w = new WeakMap(), C = new WeakMap(), P = new WeakMap(), b = new WeakMap(), k = new WeakMap(), F = new WeakMap(), x = new WeakMap(), y = new WeakMap(), p = new WeakMap(), E = new WeakSet(), Ht = function([f, v], A, O) {
- [A, O] = this.screenToPageTranslation(A, O), this.x += A / f, this.y += O / v, this.fixAndSetPosition();
- }, M = new WeakSet(), Ut = function() {
- const [f, v] = this.parentDimensions, {
- _borderLineWidth: A
- } = d, O = A / f, H = A / v;
- switch (this.rotation) {
- case 90:
- return [-O, H];
- case 180:
- return [O, H];
- case 270:
- return [O, -H];
- default:
- return [-O, -H];
- }
- }, N = new WeakSet(), jt = function(f, v, A) {
- switch (A) {
- case 90:
- return [v, -f];
- case 180:
- return [-f, -v];
- case 270:
- return [-v, f];
- default:
- return [f, v];
- }
- }, X = new WeakSet(), Gt = function(f) {
- switch (f) {
- case 90: {
- const [v, A] = this.pageDimensions;
- return [0, -v / A, A / v, 0];
- }
- case 180:
- return [-1, 0, 0, -1];
- case 270: {
- const [v, A] = this.pageDimensions;
- return [0, v / A, -A / v, 0];
- }
- default:
- return [1, 0, 0, 1];
- }
- }, I = new WeakSet(), Sn = function() {
- if (a(this, C))
- return;
- oe(this, C, document.createElement("div")), a(this, C).classList.add("resizers");
- const f = ["topLeft", "topRight", "bottomRight", "bottomLeft"];
- this._willKeepAspectRatio || f.push("topMiddle", "middleRight", "bottomMiddle", "middleLeft");
- for (const v of f) {
- const A = document.createElement("div");
- a(this, C).append(A), A.classList.add("resizer", v), A.addEventListener("pointerdown", K(this, ee, wn).bind(this, v)), A.addEventListener("contextmenu", l.noContextMenu);
- }
- this.div.prepend(a(this, C));
- }, ee = new WeakSet(), wn = function(f, v) {
- v.preventDefault();
- const {
- isMac: A
- } = s.FeatureTest.platform;
- if (v.button !== 0 || v.ctrlKey && A)
- return;
- const O = K(this, q, Tn).bind(this, f), H = this._isDraggable;
- this._isDraggable = !1;
- const z = {
- passive: !0,
- capture: !0
- };
- window.addEventListener("pointermove", O, z);
- const ae = this.x, Q = this.y, ce = this.width, ue = this.height, me = this.parent.div.style.cursor, fe = this.div.style.cursor;
- this.div.style.cursor = this.parent.div.style.cursor = window.getComputedStyle(v.target).cursor;
- const Pe = () => {
- this._isDraggable = H, window.removeEventListener("pointerup", Pe), window.removeEventListener("blur", Pe), window.removeEventListener("pointermove", O, z), this.parent.div.style.cursor = me, this.div.style.cursor = fe;
- const Fe = this.x, Ee = this.y, De = this.width, _e = this.height;
- Fe === ae && Ee === Q && De === ce && _e === ue || this.addCommands({
- cmd: () => {
- this.width = De, this.height = _e, this.x = Fe, this.y = Ee;
- const [ie, se] = this.parentDimensions;
- this.setDims(ie * De, se * _e), this.fixAndSetPosition();
- },
- undo: () => {
- this.width = ce, this.height = ue, this.x = ae, this.y = Q;
- const [ie, se] = this.parentDimensions;
- this.setDims(ie * ce, se * ue), this.fixAndSetPosition();
- },
- mustExec: !0
- });
- };
- window.addEventListener("pointerup", Pe), window.addEventListener("blur", Pe);
- }, q = new WeakSet(), Tn = function(f, v) {
- const [A, O] = this.parentDimensions, H = this.x, z = this.y, ae = this.width, Q = this.height, ce = d.MIN_SIZE / A, ue = d.MIN_SIZE / O, me = (ve) => Math.round(ve * 1e4) / 1e4, fe = K(this, X, Gt).call(this, this.rotation), Pe = (ve, Se) => [fe[0] * ve + fe[2] * Se, fe[1] * ve + fe[3] * Se], Fe = K(this, X, Gt).call(this, 360 - this.rotation), Ee = (ve, Se) => [Fe[0] * ve + Fe[2] * Se, Fe[1] * ve + Fe[3] * Se];
- let De, _e, ie = !1, se = !1;
- switch (f) {
- case "topLeft":
- ie = !0, De = (ve, Se) => [0, 0], _e = (ve, Se) => [ve, Se];
- break;
- case "topMiddle":
- De = (ve, Se) => [ve / 2, 0], _e = (ve, Se) => [ve / 2, Se];
- break;
- case "topRight":
- ie = !0, De = (ve, Se) => [ve, 0], _e = (ve, Se) => [0, Se];
- break;
- case "middleRight":
- se = !0, De = (ve, Se) => [ve, Se / 2], _e = (ve, Se) => [0, Se / 2];
- break;
- case "bottomRight":
- ie = !0, De = (ve, Se) => [ve, Se], _e = (ve, Se) => [0, 0];
- break;
- case "bottomMiddle":
- De = (ve, Se) => [ve / 2, Se], _e = (ve, Se) => [ve / 2, 0];
- break;
- case "bottomLeft":
- ie = !0, De = (ve, Se) => [0, Se], _e = (ve, Se) => [ve, 0];
- break;
- case "middleLeft":
- se = !0, De = (ve, Se) => [0, Se / 2], _e = (ve, Se) => [ve, Se / 2];
- break;
- }
- const ge = De(ae, Q), Ce = _e(ae, Q);
- let xe = Pe(...Ce);
- const Ue = me(H + xe[0]), We = me(z + xe[1]);
- let je = 1, ze = 1, [Xe, Ge] = this.screenToPageTranslation(v.movementX, v.movementY);
- if ([Xe, Ge] = Ee(Xe / A, Ge / O), ie) {
- const ve = Math.hypot(ae, Q);
- je = ze = Math.max(Math.min(Math.hypot(Ce[0] - ge[0] - Xe, Ce[1] - ge[1] - Ge) / ve, 1 / ae, 1 / Q), ce / ae, ue / Q);
- } else
- se ? je = Math.max(ce, Math.min(1, Math.abs(Ce[0] - ge[0] - Xe))) / ae : ze = Math.max(ue, Math.min(1, Math.abs(Ce[1] - ge[1] - Ge))) / Q;
- const Ye = me(ae * je), de = me(Q * ze);
- xe = Pe(..._e(Ye, de));
- const ne = Ue - xe[0], J = We - xe[1];
- this.width = Ye, this.height = de, this.x = ne, this.y = J, this.setDims(A * Ye, O * de), this.fixAndSetPosition();
- }, pe = new WeakSet(), Wt = async function() {
- var A;
- const f = a(this, r);
- if (!f)
- return;
- if (!a(this, c) && !a(this, o)) {
- f.classList.remove("done"), (A = a(this, T)) == null || A.remove();
- return;
- }
- d._l10nPromise.get("editor_alt_text_edit_button_label").then((O) => {
- f.setAttribute("aria-label", O);
- });
- let v = a(this, T);
- if (!v) {
- oe(this, T, v = document.createElement("span")), v.className = "tooltip", v.setAttribute("role", "tooltip");
- const O = v.id = `alt-text-tooltip-${this.id}`;
- f.setAttribute("aria-describedby", O);
- const H = 100;
- f.addEventListener("mouseenter", () => {
- oe(this, S, setTimeout(() => {
- oe(this, S, null), a(this, T).classList.add("show"), this._uiManager._eventBus.dispatch("reporttelemetry", {
- source: this,
- details: {
- type: "editing",
- subtype: this.editorType,
- data: {
- action: "alt_text_tooltip"
- }
- }
- });
- }, H));
- }), f.addEventListener("mouseleave", () => {
- var z;
- clearTimeout(a(this, S)), oe(this, S, null), (z = a(this, T)) == null || z.classList.remove("show");
- });
- }
- f.classList.add("done"), v.innerText = a(this, o) ? await d._l10nPromise.get("editor_alt_text_decorative_tooltip") : a(this, c), v.parentNode || f.append(v);
- }, be = new WeakSet(), Cn = function(f) {
- if (!this._isDraggable)
- return;
- const v = this._uiManager.isSelected(this);
- this._uiManager.setUpDragSession();
- let A, O;
- v && (A = {
- passive: !0,
- capture: !0
- }, O = (z) => {
- const [ae, Q] = this.screenToPageTranslation(z.movementX, z.movementY);
- this._uiManager.dragSelectedEditors(ae, Q);
- }, window.addEventListener("pointermove", O, A));
- const H = () => {
- if (window.removeEventListener("pointerup", H), window.removeEventListener("blur", H), v && window.removeEventListener("pointermove", O, A), oe(this, k, !1), !this._uiManager.endDragSession()) {
- const {
- isMac: z
- } = s.FeatureTest.platform;
- f.ctrlKey && !z || f.shiftKey || f.metaKey && z ? this.parent.toggleSelected(this) : this.parent.setSelected(this);
- }
- };
- window.addEventListener("pointerup", H), window.addEventListener("blur", H);
- }, W(d, N), nt(d, "_borderLineWidth", -1), nt(d, "_colorManager", new n.ColorManager()), nt(d, "_zIndex", 1), nt(d, "SMALL_EDITOR_SIZE", 0);
- let h = d;
- e.AnnotationEditor = h;
- class _ extends h {
- constructor(f) {
- super(f), this.annotationElementId = f.annotationElementId, this.deleted = !0;
- }
- serialize() {
- return {
- id: this.annotationElementId,
- deleted: !0,
- pageIndex: this.pageIndex
- };
- }
- }
- },
- /* 5 */
- /***/
- (t, e, i) => {
- var w, C, P, b, k, qt, y, p, E, $, M, Pn, D, X, G, I, B, ee, Y, q, le, pe, we, be, R, d, g, f, v, A, O, H, z, ae, Q, ce, ue, me, fe, Pe, Fe, Ee, De, _e, ie, se, ge, xn, xe, zt, We, Xt, ze, Ct, Ge, Vt, de, Yt, J, at, Se, mt, et, kn, Te, Rn, ke, Kt, Be, bt, Ae, Jt;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.KeyboardManager = e.CommandManager = e.ColorManager = e.AnnotationEditorUIManager = void 0, e.bindEvents = l, e.opacityToHex = h;
- var n = i(1), s = i(6);
- function l(U, u, L) {
- for (const j of L)
- u.addEventListener(j, U[j].bind(U));
- }
- function h(U) {
- return Math.round(Math.min(255, Math.max(1, 255 * U))).toString(16).padStart(2, "0");
- }
- class _ {
- constructor() {
- W(this, w, 0);
- }
- getId() {
- return `${n.AnnotationEditorPrefix}${_t(this, w)._++}`;
- }
- }
- w = new WeakMap();
- const x = class x {
- constructor() {
- W(this, k);
- W(this, C, (0, n.getUuid)());
- W(this, P, 0);
- W(this, b, null);
- }
- static get _isSVGFittingCanvas() {
- const u = 'data:image/svg+xml;charset=UTF-8,', j = new OffscreenCanvas(1, 3).getContext("2d"), V = new Image();
- V.src = u;
- const Z = V.decode().then(() => (j.drawImage(V, 0, 0, 1, 1, 0, 0, 1, 3), new Uint32Array(j.getImageData(0, 0, 1, 1).data.buffer)[0] === 0));
- return (0, n.shadow)(this, "_isSVGFittingCanvas", Z);
- }
- async getFromFile(u) {
- const {
- lastModified: L,
- name: j,
- size: V,
- type: Z
- } = u;
- return K(this, k, qt).call(this, `${L}_${j}_${V}_${Z}`, u);
- }
- async getFromUrl(u) {
- return K(this, k, qt).call(this, u, u);
- }
- async getFromId(u) {
- a(this, b) || oe(this, b, /* @__PURE__ */ new Map());
- const L = a(this, b).get(u);
- return L ? L.bitmap ? (L.refCounter += 1, L) : L.file ? this.getFromFile(L.file) : this.getFromUrl(L.url) : null;
- }
- getSvgUrl(u) {
- const L = a(this, b).get(u);
- return L != null && L.isSvg ? L.svgUrl : null;
- }
- deleteId(u) {
- a(this, b) || oe(this, b, /* @__PURE__ */ new Map());
- const L = a(this, b).get(u);
- L && (L.refCounter -= 1, L.refCounter === 0 && (L.bitmap = null));
- }
- isValidId(u) {
- return u.startsWith(`image_${a(this, C)}_`);
- }
- };
- C = new WeakMap(), P = new WeakMap(), b = new WeakMap(), k = new WeakSet(), qt = async function(u, L) {
- a(this, b) || oe(this, b, /* @__PURE__ */ new Map());
- let j = a(this, b).get(u);
- if (j === null)
- return null;
- if (j != null && j.bitmap)
- return j.refCounter += 1, j;
- try {
- j || (j = {
- bitmap: null,
- id: `image_${a(this, C)}_${_t(this, P)._++}`,
- refCounter: 0,
- isSvg: !1
- });
- let V;
- if (typeof L == "string") {
- j.url = L;
- const Z = await fetch(L);
- if (!Z.ok)
- throw new Error(Z.statusText);
- V = await Z.blob();
- } else
- V = j.file = L;
- if (V.type === "image/svg+xml") {
- const Z = x._isSVGFittingCanvas, he = new FileReader(), ye = new Image(), Me = new Promise((Re, qe) => {
- ye.onload = () => {
- j.bitmap = ye, j.isSvg = !0, Re();
- }, he.onload = async () => {
- const Ie = j.svgUrl = he.result;
- ye.src = await Z ? `${Ie}#svgView(preserveAspectRatio(none))` : Ie;
- }, ye.onerror = he.onerror = qe;
- });
- he.readAsDataURL(V), await Me;
- } else
- j.bitmap = await createImageBitmap(V);
- j.refCounter = 1;
- } catch (V) {
- console.error(V), j = null;
- }
- return a(this, b).set(u, j), j && a(this, b).set(j.id, j), j;
- };
- let c = x;
- class o {
- constructor(u = 128) {
- W(this, y, []);
- W(this, p, !1);
- W(this, E, void 0);
- W(this, $, -1);
- oe(this, E, u);
- }
- add({
- cmd: u,
- undo: L,
- mustExec: j,
- type: V = NaN,
- overwriteIfSameType: Z = !1,
- keepUndo: he = !1
- }) {
- if (j && u(), a(this, p))
- return;
- const ye = {
- cmd: u,
- undo: L,
- type: V
- };
- if (a(this, $) === -1) {
- a(this, y).length > 0 && (a(this, y).length = 0), oe(this, $, 0), a(this, y).push(ye);
- return;
- }
- if (Z && a(this, y)[a(this, $)].type === V) {
- he && (ye.undo = a(this, y)[a(this, $)].undo), a(this, y)[a(this, $)] = ye;
- return;
- }
- const Me = a(this, $) + 1;
- Me === a(this, E) ? a(this, y).splice(0, 1) : (oe(this, $, Me), Me < a(this, y).length && a(this, y).splice(Me)), a(this, y).push(ye);
- }
- undo() {
- a(this, $) !== -1 && (oe(this, p, !0), a(this, y)[a(this, $)].undo(), oe(this, p, !1), oe(this, $, a(this, $) - 1));
- }
- redo() {
- a(this, $) < a(this, y).length - 1 && (oe(this, $, a(this, $) + 1), oe(this, p, !0), a(this, y)[a(this, $)].cmd(), oe(this, p, !1));
- }
- hasSomethingToUndo() {
- return a(this, $) !== -1;
- }
- hasSomethingToRedo() {
- return a(this, $) < a(this, y).length - 1;
- }
- destroy() {
- oe(this, y, null);
- }
- }
- y = new WeakMap(), p = new WeakMap(), E = new WeakMap(), $ = new WeakMap(), e.CommandManager = o;
- class r {
- constructor(u) {
- W(this, M);
- this.buffer = [], this.callbacks = /* @__PURE__ */ new Map(), this.allKeys = /* @__PURE__ */ new Set();
- const {
- isMac: L
- } = n.FeatureTest.platform;
- for (const [j, V, Z = {}] of u)
- for (const he of j) {
- const ye = he.startsWith("mac+");
- L && ye ? (this.callbacks.set(he.slice(4), {
- callback: V,
- options: Z
- }), this.allKeys.add(he.split("+").at(-1))) : !L && !ye && (this.callbacks.set(he, {
- callback: V,
- options: Z
- }), this.allKeys.add(he.split("+").at(-1)));
- }
- }
- exec(u, L) {
- if (!this.allKeys.has(L.key))
- return;
- const j = this.callbacks.get(K(this, M, Pn).call(this, L));
- if (!j)
- return;
- const {
- callback: V,
- options: {
- bubbles: Z = !1,
- args: he = [],
- checker: ye = null
- }
- } = j;
- ye && !ye(u, L) || (V.bind(u, ...he)(), Z || (L.stopPropagation(), L.preventDefault()));
- }
- }
- M = new WeakSet(), Pn = function(u) {
- u.altKey && this.buffer.push("alt"), u.ctrlKey && this.buffer.push("ctrl"), u.metaKey && this.buffer.push("meta"), u.shiftKey && this.buffer.push("shift"), this.buffer.push(u.key);
- const L = this.buffer.join("+");
- return this.buffer.length = 0, L;
- }, e.KeyboardManager = r;
- const N = class N {
- get _colors() {
- const u = /* @__PURE__ */ new Map([["CanvasText", null], ["Canvas", null]]);
- return (0, s.getColorValues)(u), (0, n.shadow)(this, "_colors", u);
- }
- convert(u) {
- const L = (0, s.getRGB)(u);
- if (!window.matchMedia("(forced-colors: active)").matches)
- return L;
- for (const [j, V] of this._colors)
- if (V.every((Z, he) => Z === L[he]))
- return N._colorsMapping.get(j);
- return L;
- }
- getHexCode(u) {
- const L = this._colors.get(u);
- return L ? n.Util.makeHexColor(...L) : u;
- }
- };
- nt(N, "_colorsMapping", /* @__PURE__ */ new Map([["CanvasText", [0, 0, 0]], ["Canvas", [255, 255, 255]]]));
- let T = N;
- e.ColorManager = T;
- const Oe = class Oe {
- constructor(u, L, j, V, Z, he) {
- W(this, ge);
- W(this, xe);
- W(this, We);
- W(this, ze);
- W(this, Ge);
- W(this, de);
- W(this, J);
- W(this, Se);
- W(this, et);
- W(this, Te);
- W(this, ke);
- W(this, Be);
- W(this, Ae);
- W(this, D, null);
- W(this, X, /* @__PURE__ */ new Map());
- W(this, G, /* @__PURE__ */ new Map());
- W(this, I, null);
- W(this, B, null);
- W(this, ee, new o());
- W(this, Y, 0);
- W(this, q, /* @__PURE__ */ new Set());
- W(this, le, null);
- W(this, pe, null);
- W(this, we, /* @__PURE__ */ new Set());
- W(this, be, null);
- W(this, R, new _());
- W(this, d, !1);
- W(this, g, !1);
- W(this, f, null);
- W(this, v, n.AnnotationEditorType.NONE);
- W(this, A, /* @__PURE__ */ new Set());
- W(this, O, null);
- W(this, H, this.blur.bind(this));
- W(this, z, this.focus.bind(this));
- W(this, ae, this.copy.bind(this));
- W(this, Q, this.cut.bind(this));
- W(this, ce, this.paste.bind(this));
- W(this, ue, this.keydown.bind(this));
- W(this, me, this.onEditingAction.bind(this));
- W(this, fe, this.onPageChanging.bind(this));
- W(this, Pe, this.onScaleChanging.bind(this));
- W(this, Fe, this.onRotationChanging.bind(this));
- W(this, Ee, {
- isEditing: !1,
- isEmpty: !0,
- hasSomethingToUndo: !1,
- hasSomethingToRedo: !1,
- hasSelectedEditor: !1
- });
- W(this, De, [0, 0]);
- W(this, _e, null);
- W(this, ie, null);
- W(this, se, null);
- oe(this, ie, u), oe(this, se, L), oe(this, I, j), this._eventBus = V, this._eventBus._on("editingaction", a(this, me)), this._eventBus._on("pagechanging", a(this, fe)), this._eventBus._on("scalechanging", a(this, Pe)), this._eventBus._on("rotationchanging", a(this, Fe)), oe(this, B, Z.annotationStorage), oe(this, be, Z.filterFactory), oe(this, O, he), this.viewParameters = {
- realScale: s.PixelsPerInch.PDF_TO_CSS_UNITS,
- rotation: 0
- };
- }
- static get _keyboardManager() {
- const u = Oe.prototype, L = (Z) => {
- const {
- activeElement: he
- } = document;
- return he && a(Z, ie).contains(he) && Z.hasSomethingToControl();
- }, j = this.TRANSLATE_SMALL, V = this.TRANSLATE_BIG;
- return (0, n.shadow)(this, "_keyboardManager", new r([[["ctrl+a", "mac+meta+a"], u.selectAll], [["ctrl+z", "mac+meta+z"], u.undo], [["ctrl+y", "ctrl+shift+z", "mac+meta+shift+z", "ctrl+shift+Z", "mac+meta+shift+Z"], u.redo], [["Backspace", "alt+Backspace", "ctrl+Backspace", "shift+Backspace", "mac+Backspace", "mac+alt+Backspace", "mac+ctrl+Backspace", "Delete", "ctrl+Delete", "shift+Delete", "mac+Delete"], u.delete], [["Escape", "mac+Escape"], u.unselectAll], [["ArrowLeft", "mac+ArrowLeft"], u.translateSelectedEditors, {
- args: [-j, 0],
- checker: L
- }], [["ctrl+ArrowLeft", "mac+shift+ArrowLeft"], u.translateSelectedEditors, {
- args: [-V, 0],
- checker: L
- }], [["ArrowRight", "mac+ArrowRight"], u.translateSelectedEditors, {
- args: [j, 0],
- checker: L
- }], [["ctrl+ArrowRight", "mac+shift+ArrowRight"], u.translateSelectedEditors, {
- args: [V, 0],
- checker: L
- }], [["ArrowUp", "mac+ArrowUp"], u.translateSelectedEditors, {
- args: [0, -j],
- checker: L
- }], [["ctrl+ArrowUp", "mac+shift+ArrowUp"], u.translateSelectedEditors, {
- args: [0, -V],
- checker: L
- }], [["ArrowDown", "mac+ArrowDown"], u.translateSelectedEditors, {
- args: [0, j],
- checker: L
- }], [["ctrl+ArrowDown", "mac+shift+ArrowDown"], u.translateSelectedEditors, {
- args: [0, V],
- checker: L
- }]]));
- }
- destroy() {
- K(this, ze, Ct).call(this), K(this, xe, zt).call(this), this._eventBus._off("editingaction", a(this, me)), this._eventBus._off("pagechanging", a(this, fe)), this._eventBus._off("scalechanging", a(this, Pe)), this._eventBus._off("rotationchanging", a(this, Fe));
- for (const u of a(this, G).values())
- u.destroy();
- a(this, G).clear(), a(this, X).clear(), a(this, we).clear(), oe(this, D, null), a(this, A).clear(), a(this, ee).destroy(), a(this, I).destroy();
- }
- get hcmFilter() {
- return (0, n.shadow)(this, "hcmFilter", a(this, O) ? a(this, be).addHCMFilter(a(this, O).foreground, a(this, O).background) : "none");
- }
- get direction() {
- return (0, n.shadow)(this, "direction", getComputedStyle(a(this, ie)).direction);
- }
- editAltText(u) {
- var L;
- (L = a(this, I)) == null || L.editAltText(this, u);
- }
- onPageChanging({
- pageNumber: u
- }) {
- oe(this, Y, u - 1);
- }
- focusMainContainer() {
- a(this, ie).focus();
- }
- findParent(u, L) {
- for (const j of a(this, G).values()) {
- const {
- x: V,
- y: Z,
- width: he,
- height: ye
- } = j.div.getBoundingClientRect();
- if (u >= V && u <= V + he && L >= Z && L <= Z + ye)
- return j;
- }
- return null;
- }
- disableUserSelect(u = !1) {
- a(this, se).classList.toggle("noUserSelect", u);
- }
- addShouldRescale(u) {
- a(this, we).add(u);
- }
- removeShouldRescale(u) {
- a(this, we).delete(u);
- }
- onScaleChanging({
- scale: u
- }) {
- this.commitOrRemove(), this.viewParameters.realScale = u * s.PixelsPerInch.PDF_TO_CSS_UNITS;
- for (const L of a(this, we))
- L.onScaleChanging();
- }
- onRotationChanging({
- pagesRotation: u
- }) {
- this.commitOrRemove(), this.viewParameters.rotation = u;
- }
- addToAnnotationStorage(u) {
- !u.isEmpty() && a(this, B) && !a(this, B).has(u.id) && a(this, B).setValue(u.id, u);
- }
- blur() {
- if (!this.hasSelection)
- return;
- const {
- activeElement: u
- } = document;
- for (const L of a(this, A))
- if (L.div.contains(u)) {
- oe(this, f, [L, u]), L._focusEventsAllowed = !1;
- break;
- }
- }
- focus() {
- if (!a(this, f))
- return;
- const [u, L] = a(this, f);
- oe(this, f, null), L.addEventListener("focusin", () => {
- u._focusEventsAllowed = !0;
- }, {
- once: !0
- }), L.focus();
- }
- addEditListeners() {
- K(this, We, Xt).call(this), K(this, Ge, Vt).call(this);
- }
- removeEditListeners() {
- K(this, ze, Ct).call(this), K(this, de, Yt).call(this);
- }
- copy(u) {
- var j;
- if (u.preventDefault(), (j = a(this, D)) == null || j.commitOrRemove(), !this.hasSelection)
- return;
- const L = [];
- for (const V of a(this, A)) {
- const Z = V.serialize(!0);
- Z && L.push(Z);
- }
- L.length !== 0 && u.clipboardData.setData("application/pdfjs", JSON.stringify(L));
- }
- cut(u) {
- this.copy(u), this.delete();
- }
- paste(u) {
- u.preventDefault();
- const {
- clipboardData: L
- } = u;
- for (const Z of L.items)
- for (const he of a(this, pe))
- if (he.isHandlingMimeForPasting(Z.type)) {
- he.paste(Z, this.currentLayer);
- return;
- }
- let j = L.getData("application/pdfjs");
- if (!j)
- return;
- try {
- j = JSON.parse(j);
- } catch (Z) {
- (0, n.warn)(`paste: "${Z.message}".`);
- return;
- }
- if (!Array.isArray(j))
- return;
- this.unselectAll();
- const V = this.currentLayer;
- try {
- const Z = [];
- for (const Me of j) {
- const Re = V.deserialize(Me);
- if (!Re)
- return;
- Z.push(Re);
- }
- const he = () => {
- for (const Me of Z)
- K(this, ke, Kt).call(this, Me);
- K(this, Ae, Jt).call(this, Z);
- }, ye = () => {
- for (const Me of Z)
- Me.remove();
- };
- this.addCommands({
- cmd: he,
- undo: ye,
- mustExec: !0
- });
- } catch (Z) {
- (0, n.warn)(`paste: "${Z.message}".`);
- }
- }
- keydown(u) {
- var L;
- (L = this.getActive()) != null && L.shouldGetKeyboardEvents() || Oe._keyboardManager.exec(this, u);
- }
- onEditingAction(u) {
- ["undo", "redo", "delete", "selectAll"].includes(u.name) && this[u.name]();
- }
- setEditingState(u) {
- u ? (K(this, ge, xn).call(this), K(this, We, Xt).call(this), K(this, Ge, Vt).call(this), K(this, J, at).call(this, {
- isEditing: a(this, v) !== n.AnnotationEditorType.NONE,
- isEmpty: K(this, Be, bt).call(this),
- hasSomethingToUndo: a(this, ee).hasSomethingToUndo(),
- hasSomethingToRedo: a(this, ee).hasSomethingToRedo(),
- hasSelectedEditor: !1
- })) : (K(this, xe, zt).call(this), K(this, ze, Ct).call(this), K(this, de, Yt).call(this), K(this, J, at).call(this, {
- isEditing: !1
- }), this.disableUserSelect(!1));
- }
- registerEditorTypes(u) {
- if (!a(this, pe)) {
- oe(this, pe, u);
- for (const L of a(this, pe))
- K(this, Se, mt).call(this, L.defaultPropertiesToUpdate);
- }
- }
- getId() {
- return a(this, R).getId();
- }
- get currentLayer() {
- return a(this, G).get(a(this, Y));
- }
- getLayer(u) {
- return a(this, G).get(u);
- }
- get currentPageIndex() {
- return a(this, Y);
- }
- addLayer(u) {
- a(this, G).set(u.pageIndex, u), a(this, d) ? u.enable() : u.disable();
- }
- removeLayer(u) {
- a(this, G).delete(u.pageIndex);
- }
- updateMode(u, L = null) {
- if (a(this, v) !== u) {
- if (oe(this, v, u), u === n.AnnotationEditorType.NONE) {
- this.setEditingState(!1), K(this, Te, Rn).call(this);
- return;
- }
- this.setEditingState(!0), K(this, et, kn).call(this), this.unselectAll();
- for (const j of a(this, G).values())
- j.updateMode(u);
- if (L) {
- for (const j of a(this, X).values())
- if (j.annotationElementId === L) {
- this.setSelected(j), j.enterInEditMode();
- break;
- }
- }
- }
- }
- updateToolbar(u) {
- u !== a(this, v) && this._eventBus.dispatch("switchannotationeditormode", {
- source: this,
- mode: u
- });
- }
- updateParams(u, L) {
- if (a(this, pe)) {
- if (u === n.AnnotationEditorParamsType.CREATE) {
- this.currentLayer.addNewEditor(u);
- return;
- }
- for (const j of a(this, A))
- j.updateParams(u, L);
- for (const j of a(this, pe))
- j.updateDefaultParams(u, L);
- }
- }
- enableWaiting(u = !1) {
- if (a(this, g) !== u) {
- oe(this, g, u);
- for (const L of a(this, G).values())
- u ? L.disableClick() : L.enableClick(), L.div.classList.toggle("waiting", u);
- }
- }
- getEditors(u) {
- const L = [];
- for (const j of a(this, X).values())
- j.pageIndex === u && L.push(j);
- return L;
- }
- getEditor(u) {
- return a(this, X).get(u);
- }
- addEditor(u) {
- a(this, X).set(u.id, u);
- }
- removeEditor(u) {
- var L;
- a(this, X).delete(u.id), this.unselect(u), (!u.annotationElementId || !a(this, q).has(u.annotationElementId)) && ((L = a(this, B)) == null || L.remove(u.id));
- }
- addDeletedAnnotationElement(u) {
- a(this, q).add(u.annotationElementId), u.deleted = !0;
- }
- isDeletedAnnotationElement(u) {
- return a(this, q).has(u);
- }
- removeDeletedAnnotationElement(u) {
- a(this, q).delete(u.annotationElementId), u.deleted = !1;
- }
- setActiveEditor(u) {
- a(this, D) !== u && (oe(this, D, u), u && K(this, Se, mt).call(this, u.propertiesToUpdate));
- }
- toggleSelected(u) {
- if (a(this, A).has(u)) {
- a(this, A).delete(u), u.unselect(), K(this, J, at).call(this, {
- hasSelectedEditor: this.hasSelection
- });
- return;
- }
- a(this, A).add(u), u.select(), K(this, Se, mt).call(this, u.propertiesToUpdate), K(this, J, at).call(this, {
- hasSelectedEditor: !0
- });
- }
- setSelected(u) {
- for (const L of a(this, A))
- L !== u && L.unselect();
- a(this, A).clear(), a(this, A).add(u), u.select(), K(this, Se, mt).call(this, u.propertiesToUpdate), K(this, J, at).call(this, {
- hasSelectedEditor: !0
- });
- }
- isSelected(u) {
- return a(this, A).has(u);
- }
- unselect(u) {
- u.unselect(), a(this, A).delete(u), K(this, J, at).call(this, {
- hasSelectedEditor: this.hasSelection
- });
- }
- get hasSelection() {
- return a(this, A).size !== 0;
- }
- undo() {
- a(this, ee).undo(), K(this, J, at).call(this, {
- hasSomethingToUndo: a(this, ee).hasSomethingToUndo(),
- hasSomethingToRedo: !0,
- isEmpty: K(this, Be, bt).call(this)
- });
- }
- redo() {
- a(this, ee).redo(), K(this, J, at).call(this, {
- hasSomethingToUndo: !0,
- hasSomethingToRedo: a(this, ee).hasSomethingToRedo(),
- isEmpty: K(this, Be, bt).call(this)
- });
- }
- addCommands(u) {
- a(this, ee).add(u), K(this, J, at).call(this, {
- hasSomethingToUndo: !0,
- hasSomethingToRedo: !1,
- isEmpty: K(this, Be, bt).call(this)
- });
- }
- delete() {
- if (this.commitOrRemove(), !this.hasSelection)
- return;
- const u = [...a(this, A)], L = () => {
- for (const V of u)
- V.remove();
- }, j = () => {
- for (const V of u)
- K(this, ke, Kt).call(this, V);
- };
- this.addCommands({
- cmd: L,
- undo: j,
- mustExec: !0
- });
- }
- commitOrRemove() {
- var u;
- (u = a(this, D)) == null || u.commitOrRemove();
- }
- hasSomethingToControl() {
- return a(this, D) || this.hasSelection;
- }
- selectAll() {
- for (const u of a(this, A))
- u.commit();
- K(this, Ae, Jt).call(this, a(this, X).values());
- }
- unselectAll() {
- if (a(this, D)) {
- a(this, D).commitOrRemove();
- return;
- }
- if (this.hasSelection) {
- for (const u of a(this, A))
- u.unselect();
- a(this, A).clear(), K(this, J, at).call(this, {
- hasSelectedEditor: !1
- });
- }
- }
- translateSelectedEditors(u, L, j = !1) {
- if (j || this.commitOrRemove(), !this.hasSelection)
- return;
- a(this, De)[0] += u, a(this, De)[1] += L;
- const [V, Z] = a(this, De), he = [...a(this, A)], ye = 1e3;
- a(this, _e) && clearTimeout(a(this, _e)), oe(this, _e, setTimeout(() => {
- oe(this, _e, null), a(this, De)[0] = a(this, De)[1] = 0, this.addCommands({
- cmd: () => {
- for (const Me of he)
- a(this, X).has(Me.id) && Me.translateInPage(V, Z);
- },
- undo: () => {
- for (const Me of he)
- a(this, X).has(Me.id) && Me.translateInPage(-V, -Z);
- },
- mustExec: !1
- });
- }, ye));
- for (const Me of he)
- Me.translateInPage(u, L);
- }
- setUpDragSession() {
- if (this.hasSelection) {
- this.disableUserSelect(!0), oe(this, le, /* @__PURE__ */ new Map());
- for (const u of a(this, A))
- a(this, le).set(u, {
- savedX: u.x,
- savedY: u.y,
- savedPageIndex: u.pageIndex,
- newX: 0,
- newY: 0,
- newPageIndex: -1
- });
- }
- }
- endDragSession() {
- if (!a(this, le))
- return !1;
- this.disableUserSelect(!1);
- const u = a(this, le);
- oe(this, le, null);
- let L = !1;
- for (const [{
- x: V,
- y: Z,
- pageIndex: he
- }, ye] of u)
- ye.newX = V, ye.newY = Z, ye.newPageIndex = he, L || (L = V !== ye.savedX || Z !== ye.savedY || he !== ye.savedPageIndex);
- if (!L)
- return !1;
- const j = (V, Z, he, ye) => {
- if (a(this, X).has(V.id)) {
- const Me = a(this, G).get(ye);
- Me ? V._setParentAndPosition(Me, Z, he) : (V.pageIndex = ye, V.x = Z, V.y = he);
- }
- };
- return this.addCommands({
- cmd: () => {
- for (const [V, {
- newX: Z,
- newY: he,
- newPageIndex: ye
- }] of u)
- j(V, Z, he, ye);
- },
- undo: () => {
- for (const [V, {
- savedX: Z,
- savedY: he,
- savedPageIndex: ye
- }] of u)
- j(V, Z, he, ye);
- },
- mustExec: !0
- }), !0;
- }
- dragSelectedEditors(u, L) {
- if (a(this, le))
- for (const j of a(this, le).keys())
- j.drag(u, L);
- }
- rebuild(u) {
- if (u.parent === null) {
- const L = this.getLayer(u.pageIndex);
- L ? (L.changeParent(u), L.addOrRebuild(u)) : (this.addEditor(u), this.addToAnnotationStorage(u), u.rebuild());
- } else
- u.parent.addOrRebuild(u);
- }
- isActive(u) {
- return a(this, D) === u;
- }
- getActive() {
- return a(this, D);
- }
- getMode() {
- return a(this, v);
- }
- get imageManager() {
- return (0, n.shadow)(this, "imageManager", new c());
- }
- };
- D = new WeakMap(), X = new WeakMap(), G = new WeakMap(), I = new WeakMap(), B = new WeakMap(), ee = new WeakMap(), Y = new WeakMap(), q = new WeakMap(), le = new WeakMap(), pe = new WeakMap(), we = new WeakMap(), be = new WeakMap(), R = new WeakMap(), d = new WeakMap(), g = new WeakMap(), f = new WeakMap(), v = new WeakMap(), A = new WeakMap(), O = new WeakMap(), H = new WeakMap(), z = new WeakMap(), ae = new WeakMap(), Q = new WeakMap(), ce = new WeakMap(), ue = new WeakMap(), me = new WeakMap(), fe = new WeakMap(), Pe = new WeakMap(), Fe = new WeakMap(), Ee = new WeakMap(), De = new WeakMap(), _e = new WeakMap(), ie = new WeakMap(), se = new WeakMap(), ge = new WeakSet(), xn = function() {
- window.addEventListener("focus", a(this, z)), window.addEventListener("blur", a(this, H));
- }, xe = new WeakSet(), zt = function() {
- window.removeEventListener("focus", a(this, z)), window.removeEventListener("blur", a(this, H));
- }, We = new WeakSet(), Xt = function() {
- window.addEventListener("keydown", a(this, ue), {
- capture: !0
- });
- }, ze = new WeakSet(), Ct = function() {
- window.removeEventListener("keydown", a(this, ue), {
- capture: !0
- });
- }, Ge = new WeakSet(), Vt = function() {
- document.addEventListener("copy", a(this, ae)), document.addEventListener("cut", a(this, Q)), document.addEventListener("paste", a(this, ce));
- }, de = new WeakSet(), Yt = function() {
- document.removeEventListener("copy", a(this, ae)), document.removeEventListener("cut", a(this, Q)), document.removeEventListener("paste", a(this, ce));
- }, J = new WeakSet(), at = function(u) {
- Object.entries(u).some(([j, V]) => a(this, Ee)[j] !== V) && this._eventBus.dispatch("annotationeditorstateschanged", {
- source: this,
- details: Object.assign(a(this, Ee), u)
- });
- }, Se = new WeakSet(), mt = function(u) {
- this._eventBus.dispatch("annotationeditorparamschanged", {
- source: this,
- details: u
- });
- }, et = new WeakSet(), kn = function() {
- if (!a(this, d)) {
- oe(this, d, !0);
- for (const u of a(this, G).values())
- u.enable();
- }
- }, Te = new WeakSet(), Rn = function() {
- if (this.unselectAll(), a(this, d)) {
- oe(this, d, !1);
- for (const u of a(this, G).values())
- u.disable();
- }
- }, ke = new WeakSet(), Kt = function(u) {
- const L = a(this, G).get(u.pageIndex);
- L ? L.addOrRebuild(u) : this.addEditor(u);
- }, Be = new WeakSet(), bt = function() {
- if (a(this, X).size === 0)
- return !0;
- if (a(this, X).size === 1)
- for (const u of a(this, X).values())
- return u.isEmpty();
- return !1;
- }, Ae = new WeakSet(), Jt = function(u) {
- a(this, A).clear();
- for (const L of u)
- L.isEmpty() || (a(this, A).add(L), L.select());
- K(this, J, at).call(this, {
- hasSelectedEditor: !0
- });
- }, nt(Oe, "TRANSLATE_SMALL", 1), nt(Oe, "TRANSLATE_BIG", 10);
- let S = Oe;
- e.AnnotationEditorUIManager = S;
- },
- /* 6 */
- /***/
- (t, e, i) => {
- var Y, q, le, pe, we, be, R, d, g, f, v, A, dt, H, ft, ae, Qt, ce, Pt, me, xt, Pe, yt, Ee, vt;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.StatTimer = e.RenderingCancelledException = e.PixelsPerInch = e.PageViewport = e.PDFDateString = e.DOMStandardFontDataFactory = e.DOMSVGFactory = e.DOMFilterFactory = e.DOMCanvasFactory = e.DOMCMapReaderFactory = void 0, e.deprecated = $, e.getColorValues = X, e.getCurrentTransform = G, e.getCurrentTransformInverse = I, e.getFilenameFromUrl = k, e.getPdfFilenameFromUrl = F, e.getRGB = D, e.getXfaPageViewport = N, e.isDataScheme = P, e.isPdfFile = b, e.isValidFetchUrl = y, e.loadScript = E, e.noContextMenu = p, e.setLayerDimensions = B;
- var n = i(7), s = i(1);
- const l = "http://www.w3.org/2000/svg", ee = class ee {
- };
- nt(ee, "CSS", 96), nt(ee, "PDF", 72), nt(ee, "PDF_TO_CSS_UNITS", ee.CSS / ee.PDF);
- let h = ee;
- e.PixelsPerInch = h;
- class _ extends n.BaseFilterFactory {
- constructor({
- docId: se,
- ownerDocument: ge = globalThis.document
- } = {}) {
- super();
- W(this, A);
- W(this, H);
- W(this, ae);
- W(this, ce);
- W(this, me);
- W(this, Pe);
- W(this, Ee);
- W(this, Y, void 0);
- W(this, q, void 0);
- W(this, le, void 0);
- W(this, pe, void 0);
- W(this, we, void 0);
- W(this, be, void 0);
- W(this, R, void 0);
- W(this, d, void 0);
- W(this, g, void 0);
- W(this, f, void 0);
- W(this, v, 0);
- oe(this, le, se), oe(this, pe, ge);
- }
- addFilter(se) {
- if (!se)
- return "none";
- let ge = a(this, A, dt).get(se);
- if (ge)
- return ge;
- let Ce, xe, Ue, We;
- if (se.length === 1) {
- const Ge = se[0], Ye = new Array(256);
- for (let de = 0; de < 256; de++)
- Ye[de] = Ge[de] / 255;
- We = Ce = xe = Ue = Ye.join(",");
- } else {
- const [Ge, Ye, de] = se, ne = new Array(256), J = new Array(256), ve = new Array(256);
- for (let Se = 0; Se < 256; Se++)
- ne[Se] = Ge[Se] / 255, J[Se] = Ye[Se] / 255, ve[Se] = de[Se] / 255;
- Ce = ne.join(","), xe = J.join(","), Ue = ve.join(","), We = `${Ce}${xe}${Ue}`;
- }
- if (ge = a(this, A, dt).get(We), ge)
- return a(this, A, dt).set(se, ge), ge;
- const je = `g_${a(this, le)}_transfer_map_${_t(this, v)._++}`, ze = `url(#${je})`;
- a(this, A, dt).set(se, ze), a(this, A, dt).set(We, ze);
- const Xe = K(this, ce, Pt).call(this, je);
- return K(this, Pe, yt).call(this, Ce, xe, Ue, Xe), ze;
- }
- addHCMFilter(se, ge) {
- var Ye;
- const Ce = `${se}-${ge}`;
- if (a(this, be) === Ce)
- return a(this, R);
- if (oe(this, be, Ce), oe(this, R, "none"), (Ye = a(this, we)) == null || Ye.remove(), !se || !ge)
- return a(this, R);
- const xe = K(this, Ee, vt).call(this, se);
- se = s.Util.makeHexColor(...xe);
- const Ue = K(this, Ee, vt).call(this, ge);
- if (ge = s.Util.makeHexColor(...Ue), a(this, H, ft).style.color = "", se === "#000000" && ge === "#ffffff" || se === ge)
- return a(this, R);
- const We = new Array(256);
- for (let de = 0; de <= 255; de++) {
- const ne = de / 255;
- We[de] = ne <= 0.03928 ? ne / 12.92 : ((ne + 0.055) / 1.055) ** 2.4;
- }
- const je = We.join(","), ze = `g_${a(this, le)}_hcm_filter`, Xe = oe(this, d, K(this, ce, Pt).call(this, ze));
- K(this, Pe, yt).call(this, je, je, je, Xe), K(this, ae, Qt).call(this, Xe);
- const Ge = (de, ne) => {
- const J = xe[de] / 255, ve = Ue[de] / 255, Se = new Array(ne + 1);
- for (let tt = 0; tt <= ne; tt++)
- Se[tt] = J + tt / ne * (ve - J);
- return Se.join(",");
- };
- return K(this, Pe, yt).call(this, Ge(0, 5), Ge(1, 5), Ge(2, 5), Xe), oe(this, R, `url(#${ze})`), a(this, R);
- }
- addHighlightHCMFilter(se, ge, Ce, xe) {
- var ve;
- const Ue = `${se}-${ge}-${Ce}-${xe}`;
- if (a(this, g) === Ue)
- return a(this, f);
- if (oe(this, g, Ue), oe(this, f, "none"), (ve = a(this, d)) == null || ve.remove(), !se || !ge)
- return a(this, f);
- const [We, je] = [se, ge].map(K(this, Ee, vt).bind(this));
- let ze = Math.round(0.2126 * We[0] + 0.7152 * We[1] + 0.0722 * We[2]), Xe = Math.round(0.2126 * je[0] + 0.7152 * je[1] + 0.0722 * je[2]), [Ge, Ye] = [Ce, xe].map(K(this, Ee, vt).bind(this));
- Xe < ze && ([ze, Xe, Ge, Ye] = [Xe, ze, Ye, Ge]), a(this, H, ft).style.color = "";
- const de = (Se, tt, et) => {
- const te = new Array(256), Te = (Xe - ze) / et, Ne = Se / 255, ke = (tt - Se) / (255 * et);
- let $e = 0;
- for (let Be = 0; Be <= et; Be++) {
- const Qe = Math.round(ze + Be * Te), Ae = Ne + Be * ke;
- for (let Ke = $e; Ke <= Qe; Ke++)
- te[Ke] = Ae;
- $e = Qe + 1;
- }
- for (let Be = $e; Be < 256; Be++)
- te[Be] = te[$e - 1];
- return te.join(",");
- }, ne = `g_${a(this, le)}_hcm_highlight_filter`, J = oe(this, d, K(this, ce, Pt).call(this, ne));
- return K(this, ae, Qt).call(this, J), K(this, Pe, yt).call(this, de(Ge[0], Ye[0], 5), de(Ge[1], Ye[1], 5), de(Ge[2], Ye[2], 5), J), oe(this, f, `url(#${ne})`), a(this, f);
- }
- destroy(se = !1) {
- se && (a(this, R) || a(this, f)) || (a(this, q) && (a(this, q).parentNode.parentNode.remove(), oe(this, q, null)), a(this, Y) && (a(this, Y).clear(), oe(this, Y, null)), oe(this, v, 0));
- }
- }
- Y = new WeakMap(), q = new WeakMap(), le = new WeakMap(), pe = new WeakMap(), we = new WeakMap(), be = new WeakMap(), R = new WeakMap(), d = new WeakMap(), g = new WeakMap(), f = new WeakMap(), v = new WeakMap(), A = new WeakSet(), dt = function() {
- return a(this, Y) || oe(this, Y, /* @__PURE__ */ new Map());
- }, H = new WeakSet(), ft = function() {
- if (!a(this, q)) {
- const se = a(this, pe).createElement("div"), {
- style: ge
- } = se;
- ge.visibility = "hidden", ge.contain = "strict", ge.width = ge.height = 0, ge.position = "absolute", ge.top = ge.left = 0, ge.zIndex = -1;
- const Ce = a(this, pe).createElementNS(l, "svg");
- Ce.setAttribute("width", 0), Ce.setAttribute("height", 0), oe(this, q, a(this, pe).createElementNS(l, "defs")), se.append(Ce), Ce.append(a(this, q)), a(this, pe).body.append(se);
- }
- return a(this, q);
- }, ae = new WeakSet(), Qt = function(se) {
- const ge = a(this, pe).createElementNS(l, "feColorMatrix");
- ge.setAttribute("type", "matrix"), ge.setAttribute("values", "0.2126 0.7152 0.0722 0 0 0.2126 0.7152 0.0722 0 0 0.2126 0.7152 0.0722 0 0 0 0 0 1 0"), se.append(ge);
- }, ce = new WeakSet(), Pt = function(se) {
- const ge = a(this, pe).createElementNS(l, "filter");
- return ge.setAttribute("color-interpolation-filters", "sRGB"), ge.setAttribute("id", se), a(this, H, ft).append(ge), ge;
- }, me = new WeakSet(), xt = function(se, ge, Ce) {
- const xe = a(this, pe).createElementNS(l, ge);
- xe.setAttribute("type", "discrete"), xe.setAttribute("tableValues", Ce), se.append(xe);
- }, Pe = new WeakSet(), yt = function(se, ge, Ce, xe) {
- const Ue = a(this, pe).createElementNS(l, "feComponentTransfer");
- xe.append(Ue), K(this, me, xt).call(this, Ue, "feFuncR", se), K(this, me, xt).call(this, Ue, "feFuncG", ge), K(this, me, xt).call(this, Ue, "feFuncB", Ce);
- }, Ee = new WeakSet(), vt = function(se) {
- return a(this, H, ft).style.color = se, D(getComputedStyle(a(this, H, ft)).getPropertyValue("color"));
- }, e.DOMFilterFactory = _;
- class c extends n.BaseCanvasFactory {
- constructor({
- ownerDocument: ie = globalThis.document
- } = {}) {
- super(), this._document = ie;
- }
- _createCanvas(ie, se) {
- const ge = this._document.createElement("canvas");
- return ge.width = ie, ge.height = se, ge;
- }
- }
- e.DOMCanvasFactory = c;
- async function o(_e, ie = !1) {
- if (y(_e, document.baseURI)) {
- const se = await fetch(_e);
- if (!se.ok)
- throw new Error(se.statusText);
- return ie ? new Uint8Array(await se.arrayBuffer()) : (0, s.stringToBytes)(await se.text());
- }
- return new Promise((se, ge) => {
- const Ce = new XMLHttpRequest();
- Ce.open("GET", _e, !0), ie && (Ce.responseType = "arraybuffer"), Ce.onreadystatechange = () => {
- if (Ce.readyState === XMLHttpRequest.DONE) {
- if (Ce.status === 200 || Ce.status === 0) {
- let xe;
- if (ie && Ce.response ? xe = new Uint8Array(Ce.response) : !ie && Ce.responseText && (xe = (0, s.stringToBytes)(Ce.responseText)), xe) {
- se(xe);
- return;
- }
- }
- ge(new Error(Ce.statusText));
- }
- }, Ce.send(null);
- });
- }
- class r extends n.BaseCMapReaderFactory {
- _fetchData(ie, se) {
- return o(ie, this.isCompressed).then((ge) => ({
- cMapData: ge,
- compressionType: se
- }));
- }
- }
- e.DOMCMapReaderFactory = r;
- class T extends n.BaseStandardFontDataFactory {
- _fetchData(ie) {
- return o(ie, !0);
- }
- }
- e.DOMStandardFontDataFactory = T;
- class S extends n.BaseSVGFactory {
- _createSVG(ie) {
- return document.createElementNS(l, ie);
- }
- }
- e.DOMSVGFactory = S;
- class w {
- constructor({
- viewBox: ie,
- scale: se,
- rotation: ge,
- offsetX: Ce = 0,
- offsetY: xe = 0,
- dontFlip: Ue = !1
- }) {
- this.viewBox = ie, this.scale = se, this.rotation = ge, this.offsetX = Ce, this.offsetY = xe;
- const We = (ie[2] + ie[0]) / 2, je = (ie[3] + ie[1]) / 2;
- let ze, Xe, Ge, Ye;
- switch (ge %= 360, ge < 0 && (ge += 360), ge) {
- case 180:
- ze = -1, Xe = 0, Ge = 0, Ye = 1;
- break;
- case 90:
- ze = 0, Xe = 1, Ge = 1, Ye = 0;
- break;
- case 270:
- ze = 0, Xe = -1, Ge = -1, Ye = 0;
- break;
- case 0:
- ze = 1, Xe = 0, Ge = 0, Ye = -1;
- break;
- default:
- throw new Error("PageViewport: Invalid rotation, must be a multiple of 90 degrees.");
- }
- Ue && (Ge = -Ge, Ye = -Ye);
- let de, ne, J, ve;
- ze === 0 ? (de = Math.abs(je - ie[1]) * se + Ce, ne = Math.abs(We - ie[0]) * se + xe, J = (ie[3] - ie[1]) * se, ve = (ie[2] - ie[0]) * se) : (de = Math.abs(We - ie[0]) * se + Ce, ne = Math.abs(je - ie[1]) * se + xe, J = (ie[2] - ie[0]) * se, ve = (ie[3] - ie[1]) * se), this.transform = [ze * se, Xe * se, Ge * se, Ye * se, de - ze * se * We - Ge * se * je, ne - Xe * se * We - Ye * se * je], this.width = J, this.height = ve;
- }
- get rawDims() {
- const {
- viewBox: ie
- } = this;
- return (0, s.shadow)(this, "rawDims", {
- pageWidth: ie[2] - ie[0],
- pageHeight: ie[3] - ie[1],
- pageX: ie[0],
- pageY: ie[1]
- });
- }
- clone({
- scale: ie = this.scale,
- rotation: se = this.rotation,
- offsetX: ge = this.offsetX,
- offsetY: Ce = this.offsetY,
- dontFlip: xe = !1
- } = {}) {
- return new w({
- viewBox: this.viewBox.slice(),
- scale: ie,
- rotation: se,
- offsetX: ge,
- offsetY: Ce,
- dontFlip: xe
- });
- }
- convertToViewportPoint(ie, se) {
- return s.Util.applyTransform([ie, se], this.transform);
- }
- convertToViewportRectangle(ie) {
- const se = s.Util.applyTransform([ie[0], ie[1]], this.transform), ge = s.Util.applyTransform([ie[2], ie[3]], this.transform);
- return [se[0], se[1], ge[0], ge[1]];
- }
- convertToPdfPoint(ie, se) {
- return s.Util.applyInverseTransform([ie, se], this.transform);
- }
- }
- e.PageViewport = w;
- class C extends s.BaseException {
- constructor(ie, se = 0) {
- super(ie, "RenderingCancelledException"), this.extraDelay = se;
- }
- }
- e.RenderingCancelledException = C;
- function P(_e) {
- const ie = _e.length;
- let se = 0;
- for (; se < ie && _e[se].trim() === ""; )
- se++;
- return _e.substring(se, se + 5).toLowerCase() === "data:";
- }
- function b(_e) {
- return typeof _e == "string" && /\.pdf$/i.test(_e);
- }
- function k(_e, ie = !1) {
- return ie || ([_e] = _e.split(/[#?]/, 1)), _e.substring(_e.lastIndexOf("/") + 1);
- }
- function F(_e, ie = "document.pdf") {
- if (typeof _e != "string")
- return ie;
- if (P(_e))
- return (0, s.warn)('getPdfFilenameFromUrl: ignore "data:"-URL for performance reasons.'), ie;
- const se = /^(?:(?:[^:]+:)?\/\/[^/]+)?([^?#]*)(\?[^#]*)?(#.*)?$/, ge = /[^/?#=]+\.pdf\b(?!.*\.pdf\b)/i, Ce = se.exec(_e);
- let xe = ge.exec(Ce[1]) || ge.exec(Ce[2]) || ge.exec(Ce[3]);
- if (xe && (xe = xe[0], xe.includes("%")))
- try {
- xe = ge.exec(decodeURIComponent(xe))[0];
- } catch {
- }
- return xe || ie;
- }
- class x {
- constructor() {
- nt(this, "started", /* @__PURE__ */ Object.create(null));
- nt(this, "times", []);
- }
- time(ie) {
- ie in this.started && (0, s.warn)(`Timer is already running for ${ie}`), this.started[ie] = Date.now();
- }
- timeEnd(ie) {
- ie in this.started || (0, s.warn)(`Timer has not been started for ${ie}`), this.times.push({
- name: ie,
- start: this.started[ie],
- end: Date.now()
- }), delete this.started[ie];
- }
- toString() {
- const ie = [];
- let se = 0;
- for (const {
- name: ge
- } of this.times)
- se = Math.max(ge.length, se);
- for (const {
- name: ge,
- start: Ce,
- end: xe
- } of this.times)
- ie.push(`${ge.padEnd(se)} ${xe - Ce}ms
-`);
- return ie.join("");
- }
- }
- e.StatTimer = x;
- function y(_e, ie) {
- try {
- const {
- protocol: se
- } = ie ? new URL(_e, ie) : new URL(_e);
- return se === "http:" || se === "https:";
- } catch {
- return !1;
- }
- }
- function p(_e) {
- _e.preventDefault();
- }
- function E(_e, ie = !1) {
- return new Promise((se, ge) => {
- const Ce = document.createElement("script");
- Ce.src = _e, Ce.onload = function(xe) {
- ie && Ce.remove(), se(xe);
- }, Ce.onerror = function() {
- ge(new Error(`Cannot load script at: ${Ce.src}`));
- }, (document.head || document.documentElement).append(Ce);
- });
- }
- function $(_e) {
- console.log("Deprecated API usage: " + _e);
- }
- let M;
- class m {
- static toDateObject(ie) {
- if (!ie || typeof ie != "string")
- return null;
- M || (M = new RegExp("^D:(\\d{4})(\\d{2})?(\\d{2})?(\\d{2})?(\\d{2})?(\\d{2})?([Z|+|-])?(\\d{2})?'?(\\d{2})?'?"));
- const se = M.exec(ie);
- if (!se)
- return null;
- const ge = parseInt(se[1], 10);
- let Ce = parseInt(se[2], 10);
- Ce = Ce >= 1 && Ce <= 12 ? Ce - 1 : 0;
- let xe = parseInt(se[3], 10);
- xe = xe >= 1 && xe <= 31 ? xe : 1;
- let Ue = parseInt(se[4], 10);
- Ue = Ue >= 0 && Ue <= 23 ? Ue : 0;
- let We = parseInt(se[5], 10);
- We = We >= 0 && We <= 59 ? We : 0;
- let je = parseInt(se[6], 10);
- je = je >= 0 && je <= 59 ? je : 0;
- const ze = se[7] || "Z";
- let Xe = parseInt(se[8], 10);
- Xe = Xe >= 0 && Xe <= 23 ? Xe : 0;
- let Ge = parseInt(se[9], 10) || 0;
- return Ge = Ge >= 0 && Ge <= 59 ? Ge : 0, ze === "-" ? (Ue += Xe, We += Ge) : ze === "+" && (Ue -= Xe, We -= Ge), new Date(Date.UTC(ge, Ce, xe, Ue, We, je));
- }
- }
- e.PDFDateString = m;
- function N(_e, {
- scale: ie = 1,
- rotation: se = 0
- }) {
- const {
- width: ge,
- height: Ce
- } = _e.attributes.style, xe = [0, 0, parseInt(ge), parseInt(Ce)];
- return new w({
- viewBox: xe,
- scale: ie,
- rotation: se
- });
- }
- function D(_e) {
- if (_e.startsWith("#")) {
- const ie = parseInt(_e.slice(1), 16);
- return [(ie & 16711680) >> 16, (ie & 65280) >> 8, ie & 255];
- }
- return _e.startsWith("rgb(") ? _e.slice(4, -1).split(",").map((ie) => parseInt(ie)) : _e.startsWith("rgba(") ? _e.slice(5, -1).split(",").map((ie) => parseInt(ie)).slice(0, 3) : ((0, s.warn)(`Not a valid color format: "${_e}"`), [0, 0, 0]);
- }
- function X(_e) {
- const ie = document.createElement("span");
- ie.style.visibility = "hidden", document.body.append(ie);
- for (const se of _e.keys()) {
- ie.style.color = se;
- const ge = window.getComputedStyle(ie).color;
- _e.set(se, D(ge));
- }
- ie.remove();
- }
- function G(_e) {
- const {
- a: ie,
- b: se,
- c: ge,
- d: Ce,
- e: xe,
- f: Ue
- } = _e.getTransform();
- return [ie, se, ge, Ce, xe, Ue];
- }
- function I(_e) {
- const {
- a: ie,
- b: se,
- c: ge,
- d: Ce,
- e: xe,
- f: Ue
- } = _e.getTransform().invertSelf();
- return [ie, se, ge, Ce, xe, Ue];
- }
- function B(_e, ie, se = !1, ge = !0) {
- if (ie instanceof w) {
- const {
- pageWidth: Ce,
- pageHeight: xe
- } = ie.rawDims, {
- style: Ue
- } = _e, We = s.FeatureTest.isCSSRoundSupported, je = `var(--scale-factor) * ${Ce}px`, ze = `var(--scale-factor) * ${xe}px`, Xe = We ? `round(${je}, 1px)` : `calc(${je})`, Ge = We ? `round(${ze}, 1px)` : `calc(${ze})`;
- !se || ie.rotation % 180 === 0 ? (Ue.width = Xe, Ue.height = Ge) : (Ue.width = Ge, Ue.height = Xe);
- }
- ge && _e.setAttribute("data-main-rotation", ie.rotation);
- }
- },
- /* 7 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.BaseStandardFontDataFactory = e.BaseSVGFactory = e.BaseFilterFactory = e.BaseCanvasFactory = e.BaseCMapReaderFactory = void 0;
- var n = i(1);
- class s {
- constructor() {
- this.constructor === s && (0, n.unreachable)("Cannot initialize BaseFilterFactory.");
- }
- addFilter(r) {
- return "none";
- }
- addHCMFilter(r, T) {
- return "none";
- }
- addHighlightHCMFilter(r, T, S, w) {
- return "none";
- }
- destroy(r = !1) {
- }
- }
- e.BaseFilterFactory = s;
- class l {
- constructor() {
- this.constructor === l && (0, n.unreachable)("Cannot initialize BaseCanvasFactory.");
- }
- create(r, T) {
- if (r <= 0 || T <= 0)
- throw new Error("Invalid canvas size");
- const S = this._createCanvas(r, T);
- return {
- canvas: S,
- context: S.getContext("2d")
- };
- }
- reset(r, T, S) {
- if (!r.canvas)
- throw new Error("Canvas is not specified");
- if (T <= 0 || S <= 0)
- throw new Error("Invalid canvas size");
- r.canvas.width = T, r.canvas.height = S;
- }
- destroy(r) {
- if (!r.canvas)
- throw new Error("Canvas is not specified");
- r.canvas.width = 0, r.canvas.height = 0, r.canvas = null, r.context = null;
- }
- _createCanvas(r, T) {
- (0, n.unreachable)("Abstract method `_createCanvas` called.");
- }
- }
- e.BaseCanvasFactory = l;
- class h {
- constructor({
- baseUrl: r = null,
- isCompressed: T = !0
- }) {
- this.constructor === h && (0, n.unreachable)("Cannot initialize BaseCMapReaderFactory."), this.baseUrl = r, this.isCompressed = T;
- }
- async fetch({
- name: r
- }) {
- if (!this.baseUrl)
- throw new Error('The CMap "baseUrl" parameter must be specified, ensure that the "cMapUrl" and "cMapPacked" API parameters are provided.');
- if (!r)
- throw new Error("CMap name must be specified.");
- const T = this.baseUrl + r + (this.isCompressed ? ".bcmap" : ""), S = this.isCompressed ? n.CMapCompressionType.BINARY : n.CMapCompressionType.NONE;
- return this._fetchData(T, S).catch((w) => {
- throw new Error(`Unable to load ${this.isCompressed ? "binary " : ""}CMap at: ${T}`);
- });
- }
- _fetchData(r, T) {
- (0, n.unreachable)("Abstract method `_fetchData` called.");
- }
- }
- e.BaseCMapReaderFactory = h;
- class _ {
- constructor({
- baseUrl: r = null
- }) {
- this.constructor === _ && (0, n.unreachable)("Cannot initialize BaseStandardFontDataFactory."), this.baseUrl = r;
- }
- async fetch({
- filename: r
- }) {
- if (!this.baseUrl)
- throw new Error('The standard font "baseUrl" parameter must be specified, ensure that the "standardFontDataUrl" API parameter is provided.');
- if (!r)
- throw new Error("Font filename must be specified.");
- const T = `${this.baseUrl}${r}`;
- return this._fetchData(T).catch((S) => {
- throw new Error(`Unable to load font data at: ${T}`);
- });
- }
- _fetchData(r) {
- (0, n.unreachable)("Abstract method `_fetchData` called.");
- }
- }
- e.BaseStandardFontDataFactory = _;
- class c {
- constructor() {
- this.constructor === c && (0, n.unreachable)("Cannot initialize BaseSVGFactory.");
- }
- create(r, T, S = !1) {
- if (r <= 0 || T <= 0)
- throw new Error("Invalid SVG dimensions");
- const w = this._createSVG("svg:svg");
- return w.setAttribute("version", "1.1"), S || (w.setAttribute("width", `${r}px`), w.setAttribute("height", `${T}px`)), w.setAttribute("preserveAspectRatio", "none"), w.setAttribute("viewBox", `0 0 ${r} ${T}`), w;
- }
- createElement(r) {
- if (typeof r != "string")
- throw new Error("Invalid SVG element type");
- return this._createSVG(r);
- }
- _createSVG(r) {
- (0, n.unreachable)("Abstract method `_createSVG` called.");
- }
- }
- e.BaseSVGFactory = c;
- },
- /* 8 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.MurmurHash3_64 = void 0;
- var n = i(1);
- const s = 3285377520, l = 4294901760, h = 65535;
- class _ {
- constructor(o) {
- this.h1 = o ? o & 4294967295 : s, this.h2 = o ? o & 4294967295 : s;
- }
- update(o) {
- let r, T;
- if (typeof o == "string") {
- r = new Uint8Array(o.length * 2), T = 0;
- for (let $ = 0, M = o.length; $ < M; $++) {
- const m = o.charCodeAt($);
- m <= 255 ? r[T++] = m : (r[T++] = m >>> 8, r[T++] = m & 255);
- }
- } else if ((0, n.isArrayBuffer)(o))
- r = o.slice(), T = r.byteLength;
- else
- throw new Error("Wrong data format in MurmurHash3_64_update. Input must be a string or array.");
- const S = T >> 2, w = T - S * 4, C = new Uint32Array(r.buffer, 0, S);
- let P = 0, b = 0, k = this.h1, F = this.h2;
- const x = 3432918353, y = 461845907, p = x & h, E = y & h;
- for (let $ = 0; $ < S; $++)
- $ & 1 ? (P = C[$], P = P * x & l | P * p & h, P = P << 15 | P >>> 17, P = P * y & l | P * E & h, k ^= P, k = k << 13 | k >>> 19, k = k * 5 + 3864292196) : (b = C[$], b = b * x & l | b * p & h, b = b << 15 | b >>> 17, b = b * y & l | b * E & h, F ^= b, F = F << 13 | F >>> 19, F = F * 5 + 3864292196);
- switch (P = 0, w) {
- case 3:
- P ^= r[S * 4 + 2] << 16;
- case 2:
- P ^= r[S * 4 + 1] << 8;
- case 1:
- P ^= r[S * 4], P = P * x & l | P * p & h, P = P << 15 | P >>> 17, P = P * y & l | P * E & h, S & 1 ? k ^= P : F ^= P;
- }
- this.h1 = k, this.h2 = F;
- }
- hexdigest() {
- let o = this.h1, r = this.h2;
- return o ^= r >>> 1, o = o * 3981806797 & l | o * 36045 & h, r = r * 4283543511 & l | ((r << 16 | o >>> 16) * 2950163797 & l) >>> 16, o ^= r >>> 1, o = o * 444984403 & l | o * 60499 & h, r = r * 3301882366 & l | ((r << 16 | o >>> 16) * 3120437893 & l) >>> 16, o ^= r >>> 1, (o >>> 0).toString(16).padStart(8, "0") + (r >>> 0).toString(16).padStart(8, "0");
- }
- }
- e.MurmurHash3_64 = _;
- },
- /* 9 */
- /***/
- (t, e, i) => {
- var h;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.FontLoader = e.FontFaceObject = void 0;
- var n = i(1);
- class s {
- constructor({
- ownerDocument: c = globalThis.document,
- styleElement: o = null
- }) {
- W(this, h, /* @__PURE__ */ new Set());
- this._document = c, this.nativeFontFaces = /* @__PURE__ */ new Set(), this.styleElement = null, this.loadingRequests = [], this.loadTestFontId = 0;
- }
- addNativeFontFace(c) {
- this.nativeFontFaces.add(c), this._document.fonts.add(c);
- }
- removeNativeFontFace(c) {
- this.nativeFontFaces.delete(c), this._document.fonts.delete(c);
- }
- insertRule(c) {
- this.styleElement || (this.styleElement = this._document.createElement("style"), this._document.documentElement.getElementsByTagName("head")[0].append(this.styleElement));
- const o = this.styleElement.sheet;
- o.insertRule(c, o.cssRules.length);
- }
- clear() {
- for (const c of this.nativeFontFaces)
- this._document.fonts.delete(c);
- this.nativeFontFaces.clear(), a(this, h).clear(), this.styleElement && (this.styleElement.remove(), this.styleElement = null);
- }
- async loadSystemFont(c) {
- if (!(!c || a(this, h).has(c.loadedName))) {
- if ((0, n.assert)(!this.disableFontFace, "loadSystemFont shouldn't be called when `disableFontFace` is set."), this.isFontLoadingAPISupported) {
- const {
- loadedName: o,
- src: r,
- style: T
- } = c, S = new FontFace(o, r, T);
- this.addNativeFontFace(S);
- try {
- await S.load(), a(this, h).add(o);
- } catch {
- (0, n.warn)(`Cannot load system font: ${c.baseFontName}, installing it could help to improve PDF rendering.`), this.removeNativeFontFace(S);
- }
- return;
- }
- (0, n.unreachable)("Not implemented: loadSystemFont without the Font Loading API.");
- }
- }
- async bind(c) {
- if (c.attached || c.missingFile && !c.systemFontInfo)
- return;
- if (c.attached = !0, c.systemFontInfo) {
- await this.loadSystemFont(c.systemFontInfo);
- return;
- }
- if (this.isFontLoadingAPISupported) {
- const r = c.createNativeFontFace();
- if (r) {
- this.addNativeFontFace(r);
- try {
- await r.loaded;
- } catch (T) {
- throw (0, n.warn)(`Failed to load font '${r.family}': '${T}'.`), c.disableFontFace = !0, T;
- }
- }
- return;
- }
- const o = c.createFontFaceRule();
- if (o) {
- if (this.insertRule(o), this.isSyncFontLoadingSupported)
- return;
- await new Promise((r) => {
- const T = this._queueLoadingCallback(r);
- this._prepareFontLoadEvent(c, T);
- });
- }
- }
- get isFontLoadingAPISupported() {
- var o;
- const c = !!((o = this._document) != null && o.fonts);
- return (0, n.shadow)(this, "isFontLoadingAPISupported", c);
- }
- get isSyncFontLoadingSupported() {
- let c = !1;
- return (n.isNodeJS || typeof navigator < "u" && /Mozilla\/5.0.*?rv:\d+.*? Gecko/.test(navigator.userAgent)) && (c = !0), (0, n.shadow)(this, "isSyncFontLoadingSupported", c);
- }
- _queueLoadingCallback(c) {
- function o() {
- for ((0, n.assert)(!T.done, "completeRequest() cannot be called twice."), T.done = !0; r.length > 0 && r[0].done; ) {
- const S = r.shift();
- setTimeout(S.callback, 0);
- }
- }
- const {
- loadingRequests: r
- } = this, T = {
- done: !1,
- complete: o,
- callback: c
- };
- return r.push(T), T;
- }
- get _loadTestFont() {
- const c = atob("T1RUTwALAIAAAwAwQ0ZGIDHtZg4AAAOYAAAAgUZGVE1lkzZwAAAEHAAAABxHREVGABQAFQAABDgAAAAeT1MvMlYNYwkAAAEgAAAAYGNtYXABDQLUAAACNAAAAUJoZWFk/xVFDQAAALwAAAA2aGhlYQdkA+oAAAD0AAAAJGhtdHgD6AAAAAAEWAAAAAZtYXhwAAJQAAAAARgAAAAGbmFtZVjmdH4AAAGAAAAAsXBvc3T/hgAzAAADeAAAACAAAQAAAAEAALZRFsRfDzz1AAsD6AAAAADOBOTLAAAAAM4KHDwAAAAAA+gDIQAAAAgAAgAAAAAAAAABAAADIQAAAFoD6AAAAAAD6AABAAAAAAAAAAAAAAAAAAAAAQAAUAAAAgAAAAQD6AH0AAUAAAKKArwAAACMAooCvAAAAeAAMQECAAACAAYJAAAAAAAAAAAAAQAAAAAAAAAAAAAAAFBmRWQAwAAuAC4DIP84AFoDIQAAAAAAAQAAAAAAAAAAACAAIAABAAAADgCuAAEAAAAAAAAAAQAAAAEAAAAAAAEAAQAAAAEAAAAAAAIAAQAAAAEAAAAAAAMAAQAAAAEAAAAAAAQAAQAAAAEAAAAAAAUAAQAAAAEAAAAAAAYAAQAAAAMAAQQJAAAAAgABAAMAAQQJAAEAAgABAAMAAQQJAAIAAgABAAMAAQQJAAMAAgABAAMAAQQJAAQAAgABAAMAAQQJAAUAAgABAAMAAQQJAAYAAgABWABYAAAAAAAAAwAAAAMAAAAcAAEAAAAAADwAAwABAAAAHAAEACAAAAAEAAQAAQAAAC7//wAAAC7////TAAEAAAAAAAABBgAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAAAAD/gwAyAAAAAQAAAAAAAAAAAAAAAAAAAAABAAQEAAEBAQJYAAEBASH4DwD4GwHEAvgcA/gXBIwMAYuL+nz5tQXkD5j3CBLnEQACAQEBIVhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYAAABAQAADwACAQEEE/t3Dov6fAH6fAT+fPp8+nwHDosMCvm1Cvm1DAz6fBQAAAAAAAABAAAAAMmJbzEAAAAAzgTjFQAAAADOBOQpAAEAAAAAAAAADAAUAAQAAAABAAAAAgABAAAAAAAAAAAD6AAAAAAAAA==");
- return (0, n.shadow)(this, "_loadTestFont", c);
- }
- _prepareFontLoadEvent(c, o) {
- function r(D, X) {
- return D.charCodeAt(X) << 24 | D.charCodeAt(X + 1) << 16 | D.charCodeAt(X + 2) << 8 | D.charCodeAt(X + 3) & 255;
- }
- function T(D, X, G, I) {
- const B = D.substring(0, X), ee = D.substring(X + G);
- return B + I + ee;
- }
- let S, w;
- const C = this._document.createElement("canvas");
- C.width = 1, C.height = 1;
- const P = C.getContext("2d");
- let b = 0;
- function k(D, X) {
- if (++b > 30) {
- (0, n.warn)("Load test font never loaded."), X();
- return;
- }
- if (P.font = "30px " + D, P.fillText(".", 0, 20), P.getImageData(0, 0, 1, 1).data[3] > 0) {
- X();
- return;
- }
- setTimeout(k.bind(null, D, X));
- }
- const F = `lt${Date.now()}${this.loadTestFontId++}`;
- let x = this._loadTestFont;
- x = T(x, 976, F.length, F);
- const p = 16, E = 1482184792;
- let $ = r(x, p);
- for (S = 0, w = F.length - 3; S < w; S += 4)
- $ = $ - E + r(F, S) | 0;
- S < F.length && ($ = $ - E + r(F + "XXX", S) | 0), x = T(x, p, 4, (0, n.string32)($));
- const M = `url(data:font/opentype;base64,${btoa(x)});`, m = `@font-face {font-family:"${F}";src:${M}}`;
- this.insertRule(m);
- const N = this._document.createElement("div");
- N.style.visibility = "hidden", N.style.width = N.style.height = "10px", N.style.position = "absolute", N.style.top = N.style.left = "0px";
- for (const D of [c.loadedName, F]) {
- const X = this._document.createElement("span");
- X.textContent = "Hi", X.style.fontFamily = D, N.append(X);
- }
- this._document.body.append(N), k(F, () => {
- N.remove(), o.complete();
- });
- }
- }
- h = new WeakMap(), e.FontLoader = s;
- class l {
- constructor(c, {
- isEvalSupported: o = !0,
- disableFontFace: r = !1,
- ignoreErrors: T = !1,
- inspectFont: S = null
- }) {
- this.compiledGlyphs = /* @__PURE__ */ Object.create(null);
- for (const w in c)
- this[w] = c[w];
- this.isEvalSupported = o !== !1, this.disableFontFace = r === !0, this.ignoreErrors = T === !0, this._inspectFont = S;
- }
- createNativeFontFace() {
- var o;
- if (!this.data || this.disableFontFace)
- return null;
- let c;
- if (!this.cssFontInfo)
- c = new FontFace(this.loadedName, this.data, {});
- else {
- const r = {
- weight: this.cssFontInfo.fontWeight
- };
- this.cssFontInfo.italicAngle && (r.style = `oblique ${this.cssFontInfo.italicAngle}deg`), c = new FontFace(this.cssFontInfo.fontFamily, this.data, r);
- }
- return (o = this._inspectFont) == null || o.call(this, this), c;
- }
- createFontFaceRule() {
- var T;
- if (!this.data || this.disableFontFace)
- return null;
- const c = (0, n.bytesToString)(this.data), o = `url(data:${this.mimetype};base64,${btoa(c)});`;
- let r;
- if (!this.cssFontInfo)
- r = `@font-face {font-family:"${this.loadedName}";src:${o}}`;
- else {
- let S = `font-weight: ${this.cssFontInfo.fontWeight};`;
- this.cssFontInfo.italicAngle && (S += `font-style: oblique ${this.cssFontInfo.italicAngle}deg;`), r = `@font-face {font-family:"${this.cssFontInfo.fontFamily}";${S}src:${o}}`;
- }
- return (T = this._inspectFont) == null || T.call(this, this, o), r;
- }
- getPathGenerator(c, o) {
- if (this.compiledGlyphs[o] !== void 0)
- return this.compiledGlyphs[o];
- let r;
- try {
- r = c.get(this.loadedName + "_path_" + o);
- } catch (T) {
- if (!this.ignoreErrors)
- throw T;
- return (0, n.warn)(`getPathGenerator - ignoring character: "${T}".`), this.compiledGlyphs[o] = function(S, w) {
- };
- }
- if (this.isEvalSupported && n.FeatureTest.isEvalSupported) {
- const T = [];
- for (const S of r) {
- const w = S.args !== void 0 ? S.args.join(",") : "";
- T.push("c.", S.cmd, "(", w, `);
-`);
- }
- return this.compiledGlyphs[o] = new Function("c", "size", T.join(""));
- }
- return this.compiledGlyphs[o] = function(T, S) {
- for (const w of r)
- w.cmd === "scale" && (w.args = [S, -S]), T[w.cmd].apply(T, w.args);
- };
- }
- }
- e.FontFaceObject = l;
- },
- /* 10 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.NodeStandardFontDataFactory = e.NodeFilterFactory = e.NodeCanvasFactory = e.NodeCMapReaderFactory = void 0;
- var n = i(7);
- i(1);
- const s = function(o) {
- return new Promise((r, T) => {
- require$$5.readFile(o, (w, C) => {
- if (w || !C) {
- T(new Error(w));
- return;
- }
- r(new Uint8Array(C));
- });
- });
- };
- class l extends n.BaseFilterFactory {
- }
- e.NodeFilterFactory = l;
- class h extends n.BaseCanvasFactory {
- _createCanvas(r, T) {
- return require$$5.createCanvas(r, T);
- }
- }
- e.NodeCanvasFactory = h;
- class _ extends n.BaseCMapReaderFactory {
- _fetchData(r, T) {
- return s(r).then((S) => ({
- cMapData: S,
- compressionType: T
- }));
- }
- }
- e.NodeCMapReaderFactory = _;
- class c extends n.BaseStandardFontDataFactory {
- _fetchData(r) {
- return s(r);
- }
- }
- e.NodeStandardFontDataFactory = c;
- },
- /* 11 */
- /***/
- (t, e, i) => {
- var q, Zt, pe, en;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.CanvasGraphics = void 0;
- var n = i(1), s = i(6), l = i(12), h = i(13);
- const _ = 16, c = 100, o = 4096, r = 15, T = 10, S = 1e3, w = 16;
- function C(R, d) {
- if (R._removeMirroring)
- throw new Error("Context is already forwarding operations.");
- R.__originalSave = R.save, R.__originalRestore = R.restore, R.__originalRotate = R.rotate, R.__originalScale = R.scale, R.__originalTranslate = R.translate, R.__originalTransform = R.transform, R.__originalSetTransform = R.setTransform, R.__originalResetTransform = R.resetTransform, R.__originalClip = R.clip, R.__originalMoveTo = R.moveTo, R.__originalLineTo = R.lineTo, R.__originalBezierCurveTo = R.bezierCurveTo, R.__originalRect = R.rect, R.__originalClosePath = R.closePath, R.__originalBeginPath = R.beginPath, R._removeMirroring = () => {
- R.save = R.__originalSave, R.restore = R.__originalRestore, R.rotate = R.__originalRotate, R.scale = R.__originalScale, R.translate = R.__originalTranslate, R.transform = R.__originalTransform, R.setTransform = R.__originalSetTransform, R.resetTransform = R.__originalResetTransform, R.clip = R.__originalClip, R.moveTo = R.__originalMoveTo, R.lineTo = R.__originalLineTo, R.bezierCurveTo = R.__originalBezierCurveTo, R.rect = R.__originalRect, R.closePath = R.__originalClosePath, R.beginPath = R.__originalBeginPath, delete R._removeMirroring;
- }, R.save = function() {
- d.save(), this.__originalSave();
- }, R.restore = function() {
- d.restore(), this.__originalRestore();
- }, R.translate = function(f, v) {
- d.translate(f, v), this.__originalTranslate(f, v);
- }, R.scale = function(f, v) {
- d.scale(f, v), this.__originalScale(f, v);
- }, R.transform = function(f, v, A, O, H, z) {
- d.transform(f, v, A, O, H, z), this.__originalTransform(f, v, A, O, H, z);
- }, R.setTransform = function(f, v, A, O, H, z) {
- d.setTransform(f, v, A, O, H, z), this.__originalSetTransform(f, v, A, O, H, z);
- }, R.resetTransform = function() {
- d.resetTransform(), this.__originalResetTransform();
- }, R.rotate = function(f) {
- d.rotate(f), this.__originalRotate(f);
- }, R.clip = function(f) {
- d.clip(f), this.__originalClip(f);
- }, R.moveTo = function(g, f) {
- d.moveTo(g, f), this.__originalMoveTo(g, f);
- }, R.lineTo = function(g, f) {
- d.lineTo(g, f), this.__originalLineTo(g, f);
- }, R.bezierCurveTo = function(g, f, v, A, O, H) {
- d.bezierCurveTo(g, f, v, A, O, H), this.__originalBezierCurveTo(g, f, v, A, O, H);
- }, R.rect = function(g, f, v, A) {
- d.rect(g, f, v, A), this.__originalRect(g, f, v, A);
- }, R.closePath = function() {
- d.closePath(), this.__originalClosePath();
- }, R.beginPath = function() {
- d.beginPath(), this.__originalBeginPath();
- };
- }
- class P {
- constructor(d) {
- this.canvasFactory = d, this.cache = /* @__PURE__ */ Object.create(null);
- }
- getCanvas(d, g, f) {
- let v;
- return this.cache[d] !== void 0 ? (v = this.cache[d], this.canvasFactory.reset(v, g, f)) : (v = this.canvasFactory.create(g, f), this.cache[d] = v), v;
- }
- delete(d) {
- delete this.cache[d];
- }
- clear() {
- for (const d in this.cache) {
- const g = this.cache[d];
- this.canvasFactory.destroy(g), delete this.cache[d];
- }
- }
- }
- function b(R, d, g, f, v, A, O, H, z, ae) {
- const [Q, ce, ue, me, fe, Pe] = (0, s.getCurrentTransform)(R);
- if (ce === 0 && ue === 0) {
- const De = O * Q + fe, _e = Math.round(De), ie = H * me + Pe, se = Math.round(ie), ge = (O + z) * Q + fe, Ce = Math.abs(Math.round(ge) - _e) || 1, xe = (H + ae) * me + Pe, Ue = Math.abs(Math.round(xe) - se) || 1;
- return R.setTransform(Math.sign(Q), 0, 0, Math.sign(me), _e, se), R.drawImage(d, g, f, v, A, 0, 0, Ce, Ue), R.setTransform(Q, ce, ue, me, fe, Pe), [Ce, Ue];
- }
- if (Q === 0 && me === 0) {
- const De = H * ue + fe, _e = Math.round(De), ie = O * ce + Pe, se = Math.round(ie), ge = (H + ae) * ue + fe, Ce = Math.abs(Math.round(ge) - _e) || 1, xe = (O + z) * ce + Pe, Ue = Math.abs(Math.round(xe) - se) || 1;
- return R.setTransform(0, Math.sign(ce), Math.sign(ue), 0, _e, se), R.drawImage(d, g, f, v, A, 0, 0, Ue, Ce), R.setTransform(Q, ce, ue, me, fe, Pe), [Ue, Ce];
- }
- R.drawImage(d, g, f, v, A, O, H, z, ae);
- const Fe = Math.hypot(Q, ce), Ee = Math.hypot(ue, me);
- return [Fe * z, Ee * ae];
- }
- function k(R) {
- const {
- width: d,
- height: g
- } = R;
- if (d > S || g > S)
- return null;
- const f = 1e3, v = new Uint8Array([0, 2, 4, 0, 1, 0, 5, 4, 8, 10, 0, 8, 0, 2, 1, 0]), A = d + 1;
- let O = new Uint8Array(A * (g + 1)), H, z, ae;
- const Q = d + 7 & -8;
- let ce = new Uint8Array(Q * g), ue = 0;
- for (const Ee of R.data) {
- let De = 128;
- for (; De > 0; )
- ce[ue++] = Ee & De ? 0 : 255, De >>= 1;
- }
- let me = 0;
- for (ue = 0, ce[ue] !== 0 && (O[0] = 1, ++me), z = 1; z < d; z++)
- ce[ue] !== ce[ue + 1] && (O[z] = ce[ue] ? 2 : 1, ++me), ue++;
- for (ce[ue] !== 0 && (O[z] = 2, ++me), H = 1; H < g; H++) {
- ue = H * Q, ae = H * A, ce[ue - Q] !== ce[ue] && (O[ae] = ce[ue] ? 1 : 8, ++me);
- let Ee = (ce[ue] ? 4 : 0) + (ce[ue - Q] ? 8 : 0);
- for (z = 1; z < d; z++)
- Ee = (Ee >> 2) + (ce[ue + 1] ? 4 : 0) + (ce[ue - Q + 1] ? 8 : 0), v[Ee] && (O[ae + z] = v[Ee], ++me), ue++;
- if (ce[ue - Q] !== ce[ue] && (O[ae + z] = ce[ue] ? 2 : 4, ++me), me > f)
- return null;
- }
- for (ue = Q * (g - 1), ae = H * A, ce[ue] !== 0 && (O[ae] = 8, ++me), z = 1; z < d; z++)
- ce[ue] !== ce[ue + 1] && (O[ae + z] = ce[ue] ? 4 : 8, ++me), ue++;
- if (ce[ue] !== 0 && (O[ae + z] = 4, ++me), me > f)
- return null;
- const fe = new Int32Array([0, A, -1, 0, -A, 0, 0, 0, 1]), Pe = new Path2D();
- for (H = 0; me && H <= g; H++) {
- let Ee = H * A;
- const De = Ee + d;
- for (; Ee < De && !O[Ee]; )
- Ee++;
- if (Ee === De)
- continue;
- Pe.moveTo(Ee % A, H);
- const _e = Ee;
- let ie = O[Ee];
- do {
- const se = fe[ie];
- do
- Ee += se;
- while (!O[Ee]);
- const ge = O[Ee];
- ge !== 5 && ge !== 10 ? (ie = ge, O[Ee] = 0) : (ie = ge & 51 * ie >> 4, O[Ee] &= ie >> 2 | ie << 2), Pe.lineTo(Ee % A, Ee / A | 0), O[Ee] || --me;
- } while (_e !== Ee);
- --H;
- }
- return ce = null, O = null, function(Ee) {
- Ee.save(), Ee.scale(1 / d, -1 / g), Ee.translate(0, -g), Ee.fill(Pe), Ee.beginPath(), Ee.restore();
- };
- }
- class F {
- constructor(d, g) {
- this.alphaIsShape = !1, this.fontSize = 0, this.fontSizeScale = 1, this.textMatrix = n.IDENTITY_MATRIX, this.textMatrixScale = 1, this.fontMatrix = n.FONT_IDENTITY_MATRIX, this.leading = 0, this.x = 0, this.y = 0, this.lineX = 0, this.lineY = 0, this.charSpacing = 0, this.wordSpacing = 0, this.textHScale = 1, this.textRenderingMode = n.TextRenderingMode.FILL, this.textRise = 0, this.fillColor = "#000000", this.strokeColor = "#000000", this.patternFill = !1, this.fillAlpha = 1, this.strokeAlpha = 1, this.lineWidth = 1, this.activeSMask = null, this.transferMaps = "none", this.startNewPathAndClipBox([0, 0, d, g]);
- }
- clone() {
- const d = Object.create(this);
- return d.clipBox = this.clipBox.slice(), d;
- }
- setCurrentPoint(d, g) {
- this.x = d, this.y = g;
- }
- updatePathMinMax(d, g, f) {
- [g, f] = n.Util.applyTransform([g, f], d), this.minX = Math.min(this.minX, g), this.minY = Math.min(this.minY, f), this.maxX = Math.max(this.maxX, g), this.maxY = Math.max(this.maxY, f);
- }
- updateRectMinMax(d, g) {
- const f = n.Util.applyTransform(g, d), v = n.Util.applyTransform(g.slice(2), d);
- this.minX = Math.min(this.minX, f[0], v[0]), this.minY = Math.min(this.minY, f[1], v[1]), this.maxX = Math.max(this.maxX, f[0], v[0]), this.maxY = Math.max(this.maxY, f[1], v[1]);
- }
- updateScalingPathMinMax(d, g) {
- n.Util.scaleMinMax(d, g), this.minX = Math.min(this.minX, g[0]), this.maxX = Math.max(this.maxX, g[1]), this.minY = Math.min(this.minY, g[2]), this.maxY = Math.max(this.maxY, g[3]);
- }
- updateCurvePathMinMax(d, g, f, v, A, O, H, z, ae, Q) {
- const ce = n.Util.bezierBoundingBox(g, f, v, A, O, H, z, ae);
- if (Q) {
- Q[0] = Math.min(Q[0], ce[0], ce[2]), Q[1] = Math.max(Q[1], ce[0], ce[2]), Q[2] = Math.min(Q[2], ce[1], ce[3]), Q[3] = Math.max(Q[3], ce[1], ce[3]);
- return;
- }
- this.updateRectMinMax(d, ce);
- }
- getPathBoundingBox(d = l.PathType.FILL, g = null) {
- const f = [this.minX, this.minY, this.maxX, this.maxY];
- if (d === l.PathType.STROKE) {
- g || (0, n.unreachable)("Stroke bounding box must include transform.");
- const v = n.Util.singularValueDecompose2dScale(g), A = v[0] * this.lineWidth / 2, O = v[1] * this.lineWidth / 2;
- f[0] -= A, f[1] -= O, f[2] += A, f[3] += O;
- }
- return f;
- }
- updateClipFromPath() {
- const d = n.Util.intersect(this.clipBox, this.getPathBoundingBox());
- this.startNewPathAndClipBox(d || [0, 0, 0, 0]);
- }
- isEmptyClip() {
- return this.minX === 1 / 0;
- }
- startNewPathAndClipBox(d) {
- this.clipBox = d, this.minX = 1 / 0, this.minY = 1 / 0, this.maxX = 0, this.maxY = 0;
- }
- getClippedPathBoundingBox(d = l.PathType.FILL, g = null) {
- return n.Util.intersect(this.clipBox, this.getPathBoundingBox(d, g));
- }
- }
- function x(R, d) {
- if (typeof ImageData < "u" && d instanceof ImageData) {
- R.putImageData(d, 0, 0);
- return;
- }
- const g = d.height, f = d.width, v = g % w, A = (g - v) / w, O = v === 0 ? A : A + 1, H = R.createImageData(f, w);
- let z = 0, ae;
- const Q = d.data, ce = H.data;
- let ue, me, fe, Pe;
- if (d.kind === n.ImageKind.GRAYSCALE_1BPP) {
- const Fe = Q.byteLength, Ee = new Uint32Array(ce.buffer, 0, ce.byteLength >> 2), De = Ee.length, _e = f + 7 >> 3, ie = 4294967295, se = n.FeatureTest.isLittleEndian ? 4278190080 : 255;
- for (ue = 0; ue < O; ue++) {
- for (fe = ue < A ? w : v, ae = 0, me = 0; me < fe; me++) {
- const ge = Fe - z;
- let Ce = 0;
- const xe = ge > _e ? f : ge * 8 - 7, Ue = xe & -8;
- let We = 0, je = 0;
- for (; Ce < Ue; Ce += 8)
- je = Q[z++], Ee[ae++] = je & 128 ? ie : se, Ee[ae++] = je & 64 ? ie : se, Ee[ae++] = je & 32 ? ie : se, Ee[ae++] = je & 16 ? ie : se, Ee[ae++] = je & 8 ? ie : se, Ee[ae++] = je & 4 ? ie : se, Ee[ae++] = je & 2 ? ie : se, Ee[ae++] = je & 1 ? ie : se;
- for (; Ce < xe; Ce++)
- We === 0 && (je = Q[z++], We = 128), Ee[ae++] = je & We ? ie : se, We >>= 1;
- }
- for (; ae < De; )
- Ee[ae++] = 0;
- R.putImageData(H, 0, ue * w);
- }
- } else if (d.kind === n.ImageKind.RGBA_32BPP) {
- for (me = 0, Pe = f * w * 4, ue = 0; ue < A; ue++)
- ce.set(Q.subarray(z, z + Pe)), z += Pe, R.putImageData(H, 0, me), me += w;
- ue < O && (Pe = f * v * 4, ce.set(Q.subarray(z, z + Pe)), R.putImageData(H, 0, me));
- } else if (d.kind === n.ImageKind.RGB_24BPP)
- for (fe = w, Pe = f * fe, ue = 0; ue < O; ue++) {
- for (ue >= A && (fe = v, Pe = f * fe), ae = 0, me = Pe; me--; )
- ce[ae++] = Q[z++], ce[ae++] = Q[z++], ce[ae++] = Q[z++], ce[ae++] = 255;
- R.putImageData(H, 0, ue * w);
- }
- else
- throw new Error(`bad image kind: ${d.kind}`);
- }
- function y(R, d) {
- if (d.bitmap) {
- R.drawImage(d.bitmap, 0, 0);
- return;
- }
- const g = d.height, f = d.width, v = g % w, A = (g - v) / w, O = v === 0 ? A : A + 1, H = R.createImageData(f, w);
- let z = 0;
- const ae = d.data, Q = H.data;
- for (let ce = 0; ce < O; ce++) {
- const ue = ce < A ? w : v;
- ({
- srcPos: z
- } = (0, h.convertBlackAndWhiteToRGBA)({
- src: ae,
- srcPos: z,
- dest: Q,
- width: f,
- height: ue,
- nonBlackColor: 0
- })), R.putImageData(H, 0, ce * w);
- }
- }
- function p(R, d) {
- const g = ["strokeStyle", "fillStyle", "fillRule", "globalAlpha", "lineWidth", "lineCap", "lineJoin", "miterLimit", "globalCompositeOperation", "font", "filter"];
- for (const f of g)
- R[f] !== void 0 && (d[f] = R[f]);
- R.setLineDash !== void 0 && (d.setLineDash(R.getLineDash()), d.lineDashOffset = R.lineDashOffset);
- }
- function E(R) {
- if (R.strokeStyle = R.fillStyle = "#000000", R.fillRule = "nonzero", R.globalAlpha = 1, R.lineWidth = 1, R.lineCap = "butt", R.lineJoin = "miter", R.miterLimit = 10, R.globalCompositeOperation = "source-over", R.font = "10px sans-serif", R.setLineDash !== void 0 && (R.setLineDash([]), R.lineDashOffset = 0), !n.isNodeJS) {
- const {
- filter: d
- } = R;
- d !== "none" && d !== "" && (R.filter = "none");
- }
- }
- function $(R, d, g, f) {
- const v = R.length;
- for (let A = 3; A < v; A += 4) {
- const O = R[A];
- if (O === 0)
- R[A - 3] = d, R[A - 2] = g, R[A - 1] = f;
- else if (O < 255) {
- const H = 255 - O;
- R[A - 3] = R[A - 3] * O + d * H >> 8, R[A - 2] = R[A - 2] * O + g * H >> 8, R[A - 1] = R[A - 1] * O + f * H >> 8;
- }
- }
- }
- function M(R, d, g) {
- const f = R.length, v = 1 / 255;
- for (let A = 3; A < f; A += 4) {
- const O = g ? g[R[A]] : R[A];
- d[A] = d[A] * O * v | 0;
- }
- }
- function m(R, d, g) {
- const f = R.length;
- for (let v = 3; v < f; v += 4) {
- const A = R[v - 3] * 77 + R[v - 2] * 152 + R[v - 1] * 28;
- d[v] = g ? d[v] * g[A >> 8] >> 8 : d[v] * A >> 16;
- }
- }
- function N(R, d, g, f, v, A, O, H, z, ae, Q) {
- const ce = !!A, ue = ce ? A[0] : 0, me = ce ? A[1] : 0, fe = ce ? A[2] : 0, Pe = v === "Luminosity" ? m : M, Ee = Math.min(f, Math.ceil(1048576 / g));
- for (let De = 0; De < f; De += Ee) {
- const _e = Math.min(Ee, f - De), ie = R.getImageData(H - ae, De + (z - Q), g, _e), se = d.getImageData(H, De + z, g, _e);
- ce && $(ie.data, ue, me, fe), Pe(ie.data, se.data, O), d.putImageData(se, H, De + z);
- }
- }
- function D(R, d, g, f) {
- const v = f[0], A = f[1], O = f[2] - v, H = f[3] - A;
- O === 0 || H === 0 || (N(d.context, g, O, H, d.subtype, d.backdrop, d.transferMap, v, A, d.offsetX, d.offsetY), R.save(), R.globalAlpha = 1, R.globalCompositeOperation = "source-over", R.setTransform(1, 0, 0, 1, 0, 0), R.drawImage(g.canvas, 0, 0), R.restore());
- }
- function X(R, d) {
- const g = n.Util.singularValueDecompose2dScale(R);
- g[0] = Math.fround(g[0]), g[1] = Math.fround(g[1]);
- const f = Math.fround((globalThis.devicePixelRatio || 1) * s.PixelsPerInch.PDF_TO_CSS_UNITS);
- return d !== void 0 ? d : g[0] <= f || g[1] <= f;
- }
- const G = ["butt", "round", "square"], I = ["miter", "round", "bevel"], B = {}, ee = {}, be = class be {
- constructor(d, g, f, v, A, {
- optionalContentConfig: O,
- markedContentStack: H = null
- }, z, ae) {
- W(this, q);
- W(this, pe);
- this.ctx = d, this.current = new F(this.ctx.canvas.width, this.ctx.canvas.height), this.stateStack = [], this.pendingClip = null, this.pendingEOFill = !1, this.res = null, this.xobjs = null, this.commonObjs = g, this.objs = f, this.canvasFactory = v, this.filterFactory = A, this.groupStack = [], this.processingType3 = null, this.baseTransform = null, this.baseTransformStack = [], this.groupLevel = 0, this.smaskStack = [], this.smaskCounter = 0, this.tempSMask = null, this.suspendedCtx = null, this.contentVisible = !0, this.markedContentStack = H || [], this.optionalContentConfig = O, this.cachedCanvases = new P(this.canvasFactory), this.cachedPatterns = /* @__PURE__ */ new Map(), this.annotationCanvasMap = z, this.viewportScale = 1, this.outputScaleX = 1, this.outputScaleY = 1, this.pageColors = ae, this._cachedScaleForStroking = [-1, 0], this._cachedGetSinglePixelWidth = null, this._cachedBitmapsMap = /* @__PURE__ */ new Map();
- }
- getObject(d, g = null) {
- return typeof d == "string" ? d.startsWith("g_") ? this.commonObjs.get(d) : this.objs.get(d) : g;
- }
- beginDrawing({
- transform: d,
- viewport: g,
- transparency: f = !1,
- background: v = null
- }) {
- const A = this.ctx.canvas.width, O = this.ctx.canvas.height, H = this.ctx.fillStyle;
- if (this.ctx.fillStyle = v || "#ffffff", this.ctx.fillRect(0, 0, A, O), this.ctx.fillStyle = H, f) {
- const z = this.cachedCanvases.getCanvas("transparent", A, O);
- this.compositeCtx = this.ctx, this.transparentCanvas = z.canvas, this.ctx = z.context, this.ctx.save(), this.ctx.transform(...(0, s.getCurrentTransform)(this.compositeCtx));
- }
- this.ctx.save(), E(this.ctx), d && (this.ctx.transform(...d), this.outputScaleX = d[0], this.outputScaleY = d[0]), this.ctx.transform(...g.transform), this.viewportScale = g.scale, this.baseTransform = (0, s.getCurrentTransform)(this.ctx);
- }
- executeOperatorList(d, g, f, v) {
- const A = d.argsArray, O = d.fnArray;
- let H = g || 0;
- const z = A.length;
- if (z === H)
- return H;
- const ae = z - H > T && typeof f == "function", Q = ae ? Date.now() + r : 0;
- let ce = 0;
- const ue = this.commonObjs, me = this.objs;
- let fe;
- for (; ; ) {
- if (v !== void 0 && H === v.nextBreakPoint)
- return v.breakIt(H, f), H;
- if (fe = O[H], fe !== n.OPS.dependency)
- this[fe].apply(this, A[H]);
- else
- for (const Pe of A[H]) {
- const Fe = Pe.startsWith("g_") ? ue : me;
- if (!Fe.has(Pe))
- return Fe.get(Pe, f), H;
- }
- if (H++, H === z)
- return H;
- if (ae && ++ce > T) {
- if (Date.now() > Q)
- return f(), H;
- ce = 0;
- }
- }
- }
- endDrawing() {
- K(this, q, Zt).call(this), this.cachedCanvases.clear(), this.cachedPatterns.clear();
- for (const d of this._cachedBitmapsMap.values()) {
- for (const g of d.values())
- typeof HTMLCanvasElement < "u" && g instanceof HTMLCanvasElement && (g.width = g.height = 0);
- d.clear();
- }
- this._cachedBitmapsMap.clear(), K(this, pe, en).call(this);
- }
- _scaleImage(d, g) {
- const f = d.width, v = d.height;
- let A = Math.max(Math.hypot(g[0], g[1]), 1), O = Math.max(Math.hypot(g[2], g[3]), 1), H = f, z = v, ae = "prescale1", Q, ce;
- for (; A > 2 && H > 1 || O > 2 && z > 1; ) {
- let ue = H, me = z;
- A > 2 && H > 1 && (ue = H >= 16384 ? Math.floor(H / 2) - 1 || 1 : Math.ceil(H / 2), A /= H / ue), O > 2 && z > 1 && (me = z >= 16384 ? Math.floor(z / 2) - 1 || 1 : Math.ceil(z) / 2, O /= z / me), Q = this.cachedCanvases.getCanvas(ae, ue, me), ce = Q.context, ce.clearRect(0, 0, ue, me), ce.drawImage(d, 0, 0, H, z, 0, 0, ue, me), d = Q.canvas, H = ue, z = me, ae = ae === "prescale1" ? "prescale2" : "prescale1";
- }
- return {
- img: d,
- paintWidth: H,
- paintHeight: z
- };
- }
- _createMaskCanvas(d) {
- const g = this.ctx, {
- width: f,
- height: v
- } = d, A = this.current.fillColor, O = this.current.patternFill, H = (0, s.getCurrentTransform)(g);
- let z, ae, Q, ce;
- if ((d.bitmap || d.data) && d.count > 1) {
- const Ce = d.bitmap || d.data.buffer;
- ae = JSON.stringify(O ? H : [H.slice(0, 4), A]), z = this._cachedBitmapsMap.get(Ce), z || (z = /* @__PURE__ */ new Map(), this._cachedBitmapsMap.set(Ce, z));
- const xe = z.get(ae);
- if (xe && !O) {
- const Ue = Math.round(Math.min(H[0], H[2]) + H[4]), We = Math.round(Math.min(H[1], H[3]) + H[5]);
- return {
- canvas: xe,
- offsetX: Ue,
- offsetY: We
- };
- }
- Q = xe;
- }
- Q || (ce = this.cachedCanvases.getCanvas("maskCanvas", f, v), y(ce.context, d));
- let ue = n.Util.transform(H, [1 / f, 0, 0, -1 / v, 0, 0]);
- ue = n.Util.transform(ue, [1, 0, 0, 1, 0, -v]);
- const me = n.Util.applyTransform([0, 0], ue), fe = n.Util.applyTransform([f, v], ue), Pe = n.Util.normalizeRect([me[0], me[1], fe[0], fe[1]]), Fe = Math.round(Pe[2] - Pe[0]) || 1, Ee = Math.round(Pe[3] - Pe[1]) || 1, De = this.cachedCanvases.getCanvas("fillCanvas", Fe, Ee), _e = De.context, ie = Math.min(me[0], fe[0]), se = Math.min(me[1], fe[1]);
- _e.translate(-ie, -se), _e.transform(...ue), Q || (Q = this._scaleImage(ce.canvas, (0, s.getCurrentTransformInverse)(_e)), Q = Q.img, z && O && z.set(ae, Q)), _e.imageSmoothingEnabled = X((0, s.getCurrentTransform)(_e), d.interpolate), b(_e, Q, 0, 0, Q.width, Q.height, 0, 0, f, v), _e.globalCompositeOperation = "source-in";
- const ge = n.Util.transform((0, s.getCurrentTransformInverse)(_e), [1, 0, 0, 1, -ie, -se]);
- return _e.fillStyle = O ? A.getPattern(g, this, ge, l.PathType.FILL) : A, _e.fillRect(0, 0, f, v), z && !O && (this.cachedCanvases.delete("fillCanvas"), z.set(ae, De.canvas)), {
- canvas: De.canvas,
- offsetX: Math.round(ie),
- offsetY: Math.round(se)
- };
- }
- setLineWidth(d) {
- d !== this.current.lineWidth && (this._cachedScaleForStroking[0] = -1), this.current.lineWidth = d, this.ctx.lineWidth = d;
- }
- setLineCap(d) {
- this.ctx.lineCap = G[d];
- }
- setLineJoin(d) {
- this.ctx.lineJoin = I[d];
- }
- setMiterLimit(d) {
- this.ctx.miterLimit = d;
- }
- setDash(d, g) {
- const f = this.ctx;
- f.setLineDash !== void 0 && (f.setLineDash(d), f.lineDashOffset = g);
- }
- setRenderingIntent(d) {
- }
- setFlatness(d) {
- }
- setGState(d) {
- for (const [g, f] of d)
- switch (g) {
- case "LW":
- this.setLineWidth(f);
- break;
- case "LC":
- this.setLineCap(f);
- break;
- case "LJ":
- this.setLineJoin(f);
- break;
- case "ML":
- this.setMiterLimit(f);
- break;
- case "D":
- this.setDash(f[0], f[1]);
- break;
- case "RI":
- this.setRenderingIntent(f);
- break;
- case "FL":
- this.setFlatness(f);
- break;
- case "Font":
- this.setFont(f[0], f[1]);
- break;
- case "CA":
- this.current.strokeAlpha = f;
- break;
- case "ca":
- this.current.fillAlpha = f, this.ctx.globalAlpha = f;
- break;
- case "BM":
- this.ctx.globalCompositeOperation = f;
- break;
- case "SMask":
- this.current.activeSMask = f ? this.tempSMask : null, this.tempSMask = null, this.checkSMaskState();
- break;
- case "TR":
- this.ctx.filter = this.current.transferMaps = this.filterFactory.addFilter(f);
- break;
- }
- }
- get inSMaskMode() {
- return !!this.suspendedCtx;
- }
- checkSMaskState() {
- const d = this.inSMaskMode;
- this.current.activeSMask && !d ? this.beginSMaskMode() : !this.current.activeSMask && d && this.endSMaskMode();
- }
- beginSMaskMode() {
- if (this.inSMaskMode)
- throw new Error("beginSMaskMode called while already in smask mode");
- const d = this.ctx.canvas.width, g = this.ctx.canvas.height, f = "smaskGroupAt" + this.groupLevel, v = this.cachedCanvases.getCanvas(f, d, g);
- this.suspendedCtx = this.ctx, this.ctx = v.context;
- const A = this.ctx;
- A.setTransform(...(0, s.getCurrentTransform)(this.suspendedCtx)), p(this.suspendedCtx, A), C(A, this.suspendedCtx), this.setGState([["BM", "source-over"], ["ca", 1], ["CA", 1]]);
- }
- endSMaskMode() {
- if (!this.inSMaskMode)
- throw new Error("endSMaskMode called while not in smask mode");
- this.ctx._removeMirroring(), p(this.ctx, this.suspendedCtx), this.ctx = this.suspendedCtx, this.suspendedCtx = null;
- }
- compose(d) {
- if (!this.current.activeSMask)
- return;
- d ? (d[0] = Math.floor(d[0]), d[1] = Math.floor(d[1]), d[2] = Math.ceil(d[2]), d[3] = Math.ceil(d[3])) : d = [0, 0, this.ctx.canvas.width, this.ctx.canvas.height];
- const g = this.current.activeSMask, f = this.suspendedCtx;
- D(f, g, this.ctx, d), this.ctx.save(), this.ctx.setTransform(1, 0, 0, 1, 0, 0), this.ctx.clearRect(0, 0, this.ctx.canvas.width, this.ctx.canvas.height), this.ctx.restore();
- }
- save() {
- this.inSMaskMode ? (p(this.ctx, this.suspendedCtx), this.suspendedCtx.save()) : this.ctx.save();
- const d = this.current;
- this.stateStack.push(d), this.current = d.clone();
- }
- restore() {
- this.stateStack.length === 0 && this.inSMaskMode && this.endSMaskMode(), this.stateStack.length !== 0 && (this.current = this.stateStack.pop(), this.inSMaskMode ? (this.suspendedCtx.restore(), p(this.suspendedCtx, this.ctx)) : this.ctx.restore(), this.checkSMaskState(), this.pendingClip = null, this._cachedScaleForStroking[0] = -1, this._cachedGetSinglePixelWidth = null);
- }
- transform(d, g, f, v, A, O) {
- this.ctx.transform(d, g, f, v, A, O), this._cachedScaleForStroking[0] = -1, this._cachedGetSinglePixelWidth = null;
- }
- constructPath(d, g, f) {
- const v = this.ctx, A = this.current;
- let O = A.x, H = A.y, z, ae;
- const Q = (0, s.getCurrentTransform)(v), ce = Q[0] === 0 && Q[3] === 0 || Q[1] === 0 && Q[2] === 0, ue = ce ? f.slice(0) : null;
- for (let me = 0, fe = 0, Pe = d.length; me < Pe; me++)
- switch (d[me] | 0) {
- case n.OPS.rectangle:
- O = g[fe++], H = g[fe++];
- const Fe = g[fe++], Ee = g[fe++], De = O + Fe, _e = H + Ee;
- v.moveTo(O, H), Fe === 0 || Ee === 0 ? v.lineTo(De, _e) : (v.lineTo(De, H), v.lineTo(De, _e), v.lineTo(O, _e)), ce || A.updateRectMinMax(Q, [O, H, De, _e]), v.closePath();
- break;
- case n.OPS.moveTo:
- O = g[fe++], H = g[fe++], v.moveTo(O, H), ce || A.updatePathMinMax(Q, O, H);
- break;
- case n.OPS.lineTo:
- O = g[fe++], H = g[fe++], v.lineTo(O, H), ce || A.updatePathMinMax(Q, O, H);
- break;
- case n.OPS.curveTo:
- z = O, ae = H, O = g[fe + 4], H = g[fe + 5], v.bezierCurveTo(g[fe], g[fe + 1], g[fe + 2], g[fe + 3], O, H), A.updateCurvePathMinMax(Q, z, ae, g[fe], g[fe + 1], g[fe + 2], g[fe + 3], O, H, ue), fe += 6;
- break;
- case n.OPS.curveTo2:
- z = O, ae = H, v.bezierCurveTo(O, H, g[fe], g[fe + 1], g[fe + 2], g[fe + 3]), A.updateCurvePathMinMax(Q, z, ae, O, H, g[fe], g[fe + 1], g[fe + 2], g[fe + 3], ue), O = g[fe + 2], H = g[fe + 3], fe += 4;
- break;
- case n.OPS.curveTo3:
- z = O, ae = H, O = g[fe + 2], H = g[fe + 3], v.bezierCurveTo(g[fe], g[fe + 1], O, H, O, H), A.updateCurvePathMinMax(Q, z, ae, g[fe], g[fe + 1], O, H, O, H, ue), fe += 4;
- break;
- case n.OPS.closePath:
- v.closePath();
- break;
- }
- ce && A.updateScalingPathMinMax(Q, ue), A.setCurrentPoint(O, H);
- }
- closePath() {
- this.ctx.closePath();
- }
- stroke(d = !0) {
- const g = this.ctx, f = this.current.strokeColor;
- g.globalAlpha = this.current.strokeAlpha, this.contentVisible && (typeof f == "object" && (f != null && f.getPattern) ? (g.save(), g.strokeStyle = f.getPattern(g, this, (0, s.getCurrentTransformInverse)(g), l.PathType.STROKE), this.rescaleAndStroke(!1), g.restore()) : this.rescaleAndStroke(!0)), d && this.consumePath(this.current.getClippedPathBoundingBox()), g.globalAlpha = this.current.fillAlpha;
- }
- closeStroke() {
- this.closePath(), this.stroke();
- }
- fill(d = !0) {
- const g = this.ctx, f = this.current.fillColor, v = this.current.patternFill;
- let A = !1;
- v && (g.save(), g.fillStyle = f.getPattern(g, this, (0, s.getCurrentTransformInverse)(g), l.PathType.FILL), A = !0);
- const O = this.current.getClippedPathBoundingBox();
- this.contentVisible && O !== null && (this.pendingEOFill ? (g.fill("evenodd"), this.pendingEOFill = !1) : g.fill()), A && g.restore(), d && this.consumePath(O);
- }
- eoFill() {
- this.pendingEOFill = !0, this.fill();
- }
- fillStroke() {
- this.fill(!1), this.stroke(!1), this.consumePath();
- }
- eoFillStroke() {
- this.pendingEOFill = !0, this.fillStroke();
- }
- closeFillStroke() {
- this.closePath(), this.fillStroke();
- }
- closeEOFillStroke() {
- this.pendingEOFill = !0, this.closePath(), this.fillStroke();
- }
- endPath() {
- this.consumePath();
- }
- clip() {
- this.pendingClip = B;
- }
- eoClip() {
- this.pendingClip = ee;
- }
- beginText() {
- this.current.textMatrix = n.IDENTITY_MATRIX, this.current.textMatrixScale = 1, this.current.x = this.current.lineX = 0, this.current.y = this.current.lineY = 0;
- }
- endText() {
- const d = this.pendingTextPaths, g = this.ctx;
- if (d === void 0) {
- g.beginPath();
- return;
- }
- g.save(), g.beginPath();
- for (const f of d)
- g.setTransform(...f.transform), g.translate(f.x, f.y), f.addToPath(g, f.fontSize);
- g.restore(), g.clip(), g.beginPath(), delete this.pendingTextPaths;
- }
- setCharSpacing(d) {
- this.current.charSpacing = d;
- }
- setWordSpacing(d) {
- this.current.wordSpacing = d;
- }
- setHScale(d) {
- this.current.textHScale = d / 100;
- }
- setLeading(d) {
- this.current.leading = -d;
- }
- setFont(d, g) {
- var Q;
- const f = this.commonObjs.get(d), v = this.current;
- if (!f)
- throw new Error(`Can't find font for ${d}`);
- if (v.fontMatrix = f.fontMatrix || n.FONT_IDENTITY_MATRIX, (v.fontMatrix[0] === 0 || v.fontMatrix[3] === 0) && (0, n.warn)("Invalid font matrix for font " + d), g < 0 ? (g = -g, v.fontDirection = -1) : v.fontDirection = 1, this.current.font = f, this.current.fontSize = g, f.isType3Font)
- return;
- const A = f.loadedName || "sans-serif", O = ((Q = f.systemFontInfo) == null ? void 0 : Q.css) || `"${A}", ${f.fallbackName}`;
- let H = "normal";
- f.black ? H = "900" : f.bold && (H = "bold");
- const z = f.italic ? "italic" : "normal";
- let ae = g;
- g < _ ? ae = _ : g > c && (ae = c), this.current.fontSizeScale = g / ae, this.ctx.font = `${z} ${H} ${ae}px ${O}`;
- }
- setTextRenderingMode(d) {
- this.current.textRenderingMode = d;
- }
- setTextRise(d) {
- this.current.textRise = d;
- }
- moveText(d, g) {
- this.current.x = this.current.lineX += d, this.current.y = this.current.lineY += g;
- }
- setLeadingMoveText(d, g) {
- this.setLeading(-g), this.moveText(d, g);
- }
- setTextMatrix(d, g, f, v, A, O) {
- this.current.textMatrix = [d, g, f, v, A, O], this.current.textMatrixScale = Math.hypot(d, g), this.current.x = this.current.lineX = 0, this.current.y = this.current.lineY = 0;
- }
- nextLine() {
- this.moveText(0, this.current.leading);
- }
- paintChar(d, g, f, v) {
- const A = this.ctx, O = this.current, H = O.font, z = O.textRenderingMode, ae = O.fontSize / O.fontSizeScale, Q = z & n.TextRenderingMode.FILL_STROKE_MASK, ce = !!(z & n.TextRenderingMode.ADD_TO_PATH_FLAG), ue = O.patternFill && !H.missingFile;
- let me;
- (H.disableFontFace || ce || ue) && (me = H.getPathGenerator(this.commonObjs, d)), H.disableFontFace || ue ? (A.save(), A.translate(g, f), A.beginPath(), me(A, ae), v && A.setTransform(...v), (Q === n.TextRenderingMode.FILL || Q === n.TextRenderingMode.FILL_STROKE) && A.fill(), (Q === n.TextRenderingMode.STROKE || Q === n.TextRenderingMode.FILL_STROKE) && A.stroke(), A.restore()) : ((Q === n.TextRenderingMode.FILL || Q === n.TextRenderingMode.FILL_STROKE) && A.fillText(d, g, f), (Q === n.TextRenderingMode.STROKE || Q === n.TextRenderingMode.FILL_STROKE) && A.strokeText(d, g, f)), ce && (this.pendingTextPaths || (this.pendingTextPaths = [])).push({
- transform: (0, s.getCurrentTransform)(A),
- x: g,
- y: f,
- fontSize: ae,
- addToPath: me
- });
- }
- get isFontSubpixelAAEnabled() {
- const {
- context: d
- } = this.cachedCanvases.getCanvas("isFontSubpixelAAEnabled", 10, 10);
- d.scale(1.5, 1), d.fillText("I", 0, 10);
- const g = d.getImageData(0, 0, 10, 10).data;
- let f = !1;
- for (let v = 3; v < g.length; v += 4)
- if (g[v] > 0 && g[v] < 255) {
- f = !0;
- break;
- }
- return (0, n.shadow)(this, "isFontSubpixelAAEnabled", f);
- }
- showText(d) {
- const g = this.current, f = g.font;
- if (f.isType3Font)
- return this.showType3Text(d);
- const v = g.fontSize;
- if (v === 0)
- return;
- const A = this.ctx, O = g.fontSizeScale, H = g.charSpacing, z = g.wordSpacing, ae = g.fontDirection, Q = g.textHScale * ae, ce = d.length, ue = f.vertical, me = ue ? 1 : -1, fe = f.defaultVMetrics, Pe = v * g.fontMatrix[0], Fe = g.textRenderingMode === n.TextRenderingMode.FILL && !f.disableFontFace && !g.patternFill;
- A.save(), A.transform(...g.textMatrix), A.translate(g.x, g.y + g.textRise), ae > 0 ? A.scale(Q, -1) : A.scale(Q, 1);
- let Ee;
- if (g.patternFill) {
- A.save();
- const ge = g.fillColor.getPattern(A, this, (0, s.getCurrentTransformInverse)(A), l.PathType.FILL);
- Ee = (0, s.getCurrentTransform)(A), A.restore(), A.fillStyle = ge;
- }
- let De = g.lineWidth;
- const _e = g.textMatrixScale;
- if (_e === 0 || De === 0) {
- const ge = g.textRenderingMode & n.TextRenderingMode.FILL_STROKE_MASK;
- (ge === n.TextRenderingMode.STROKE || ge === n.TextRenderingMode.FILL_STROKE) && (De = this.getSinglePixelWidth());
- } else
- De /= _e;
- if (O !== 1 && (A.scale(O, O), De /= O), A.lineWidth = De, f.isInvalidPDFjsFont) {
- const ge = [];
- let Ce = 0;
- for (const xe of d)
- ge.push(xe.unicode), Ce += xe.width;
- A.fillText(ge.join(""), 0, 0), g.x += Ce * Pe * Q, A.restore(), this.compose();
- return;
- }
- let ie = 0, se;
- for (se = 0; se < ce; ++se) {
- const ge = d[se];
- if (typeof ge == "number") {
- ie += me * ge * v / 1e3;
- continue;
- }
- let Ce = !1;
- const xe = (ge.isSpace ? z : 0) + H, Ue = ge.fontChar, We = ge.accent;
- let je, ze, Xe = ge.width;
- if (ue) {
- const Ye = ge.vmetric || fe, de = -(ge.vmetric ? Ye[1] : Xe * 0.5) * Pe, ne = Ye[2] * Pe;
- Xe = Ye ? -Ye[0] : Xe, je = de / O, ze = (ie + ne) / O;
- } else
- je = ie / O, ze = 0;
- if (f.remeasure && Xe > 0) {
- const Ye = A.measureText(Ue).width * 1e3 / v * O;
- if (Xe < Ye && this.isFontSubpixelAAEnabled) {
- const de = Xe / Ye;
- Ce = !0, A.save(), A.scale(de, 1), je /= de;
- } else
- Xe !== Ye && (je += (Xe - Ye) / 2e3 * v / O);
- }
- if (this.contentVisible && (ge.isInFont || f.missingFile)) {
- if (Fe && !We)
- A.fillText(Ue, je, ze);
- else if (this.paintChar(Ue, je, ze, Ee), We) {
- const Ye = je + v * We.offset.x / O, de = ze - v * We.offset.y / O;
- this.paintChar(We.fontChar, Ye, de, Ee);
- }
- }
- const Ge = ue ? Xe * Pe - xe * ae : Xe * Pe + xe * ae;
- ie += Ge, Ce && A.restore();
- }
- ue ? g.y -= ie : g.x += ie * Q, A.restore(), this.compose();
- }
- showType3Text(d) {
- const g = this.ctx, f = this.current, v = f.font, A = f.fontSize, O = f.fontDirection, H = v.vertical ? 1 : -1, z = f.charSpacing, ae = f.wordSpacing, Q = f.textHScale * O, ce = f.fontMatrix || n.FONT_IDENTITY_MATRIX, ue = d.length, me = f.textRenderingMode === n.TextRenderingMode.INVISIBLE;
- let fe, Pe, Fe, Ee;
- if (!(me || A === 0)) {
- for (this._cachedScaleForStroking[0] = -1, this._cachedGetSinglePixelWidth = null, g.save(), g.transform(...f.textMatrix), g.translate(f.x, f.y), g.scale(Q, O), fe = 0; fe < ue; ++fe) {
- if (Pe = d[fe], typeof Pe == "number") {
- Ee = H * Pe * A / 1e3, this.ctx.translate(Ee, 0), f.x += Ee * Q;
- continue;
- }
- const De = (Pe.isSpace ? ae : 0) + z, _e = v.charProcOperatorList[Pe.operatorListId];
- if (!_e) {
- (0, n.warn)(`Type3 character "${Pe.operatorListId}" is not available.`);
- continue;
- }
- this.contentVisible && (this.processingType3 = Pe, this.save(), g.scale(A, A), g.transform(...ce), this.executeOperatorList(_e), this.restore()), Fe = n.Util.applyTransform([Pe.width, 0], ce)[0] * A + De, g.translate(Fe, 0), f.x += Fe * Q;
- }
- g.restore(), this.processingType3 = null;
- }
- }
- setCharWidth(d, g) {
- }
- setCharWidthAndBounds(d, g, f, v, A, O) {
- this.ctx.rect(f, v, A - f, O - v), this.ctx.clip(), this.endPath();
- }
- getColorN_Pattern(d) {
- let g;
- if (d[0] === "TilingPattern") {
- const f = d[1], v = this.baseTransform || (0, s.getCurrentTransform)(this.ctx), A = {
- createCanvasGraphics: (O) => new be(O, this.commonObjs, this.objs, this.canvasFactory, this.filterFactory, {
- optionalContentConfig: this.optionalContentConfig,
- markedContentStack: this.markedContentStack
- })
- };
- g = new l.TilingPattern(d, f, this.ctx, A, v);
- } else
- g = this._getPattern(d[1], d[2]);
- return g;
- }
- setStrokeColorN() {
- this.current.strokeColor = this.getColorN_Pattern(arguments);
- }
- setFillColorN() {
- this.current.fillColor = this.getColorN_Pattern(arguments), this.current.patternFill = !0;
- }
- setStrokeRGBColor(d, g, f) {
- const v = n.Util.makeHexColor(d, g, f);
- this.ctx.strokeStyle = v, this.current.strokeColor = v;
- }
- setFillRGBColor(d, g, f) {
- const v = n.Util.makeHexColor(d, g, f);
- this.ctx.fillStyle = v, this.current.fillColor = v, this.current.patternFill = !1;
- }
- _getPattern(d, g = null) {
- let f;
- return this.cachedPatterns.has(d) ? f = this.cachedPatterns.get(d) : (f = (0, l.getShadingPattern)(this.getObject(d)), this.cachedPatterns.set(d, f)), g && (f.matrix = g), f;
- }
- shadingFill(d) {
- if (!this.contentVisible)
- return;
- const g = this.ctx;
- this.save();
- const f = this._getPattern(d);
- g.fillStyle = f.getPattern(g, this, (0, s.getCurrentTransformInverse)(g), l.PathType.SHADING);
- const v = (0, s.getCurrentTransformInverse)(g);
- if (v) {
- const {
- width: A,
- height: O
- } = g.canvas, [H, z, ae, Q] = n.Util.getAxialAlignedBoundingBox([0, 0, A, O], v);
- this.ctx.fillRect(H, z, ae - H, Q - z);
- } else
- this.ctx.fillRect(-1e10, -1e10, 2e10, 2e10);
- this.compose(this.current.getClippedPathBoundingBox()), this.restore();
- }
- beginInlineImage() {
- (0, n.unreachable)("Should not call beginInlineImage");
- }
- beginImageData() {
- (0, n.unreachable)("Should not call beginImageData");
- }
- paintFormXObjectBegin(d, g) {
- if (this.contentVisible && (this.save(), this.baseTransformStack.push(this.baseTransform), Array.isArray(d) && d.length === 6 && this.transform(...d), this.baseTransform = (0, s.getCurrentTransform)(this.ctx), g)) {
- const f = g[2] - g[0], v = g[3] - g[1];
- this.ctx.rect(g[0], g[1], f, v), this.current.updateRectMinMax((0, s.getCurrentTransform)(this.ctx), g), this.clip(), this.endPath();
- }
- }
- paintFormXObjectEnd() {
- this.contentVisible && (this.restore(), this.baseTransform = this.baseTransformStack.pop());
- }
- beginGroup(d) {
- if (!this.contentVisible)
- return;
- this.save(), this.inSMaskMode && (this.endSMaskMode(), this.current.activeSMask = null);
- const g = this.ctx;
- d.isolated || (0, n.info)("TODO: Support non-isolated groups."), d.knockout && (0, n.warn)("Knockout groups not supported.");
- const f = (0, s.getCurrentTransform)(g);
- if (d.matrix && g.transform(...d.matrix), !d.bbox)
- throw new Error("Bounding box is required.");
- let v = n.Util.getAxialAlignedBoundingBox(d.bbox, (0, s.getCurrentTransform)(g));
- const A = [0, 0, g.canvas.width, g.canvas.height];
- v = n.Util.intersect(v, A) || [0, 0, 0, 0];
- const O = Math.floor(v[0]), H = Math.floor(v[1]);
- let z = Math.max(Math.ceil(v[2]) - O, 1), ae = Math.max(Math.ceil(v[3]) - H, 1), Q = 1, ce = 1;
- z > o && (Q = z / o, z = o), ae > o && (ce = ae / o, ae = o), this.current.startNewPathAndClipBox([0, 0, z, ae]);
- let ue = "groupAt" + this.groupLevel;
- d.smask && (ue += "_smask_" + this.smaskCounter++ % 2);
- const me = this.cachedCanvases.getCanvas(ue, z, ae), fe = me.context;
- fe.scale(1 / Q, 1 / ce), fe.translate(-O, -H), fe.transform(...f), d.smask ? this.smaskStack.push({
- canvas: me.canvas,
- context: fe,
- offsetX: O,
- offsetY: H,
- scaleX: Q,
- scaleY: ce,
- subtype: d.smask.subtype,
- backdrop: d.smask.backdrop,
- transferMap: d.smask.transferMap || null,
- startTransformInverse: null
- }) : (g.setTransform(1, 0, 0, 1, 0, 0), g.translate(O, H), g.scale(Q, ce), g.save()), p(g, fe), this.ctx = fe, this.setGState([["BM", "source-over"], ["ca", 1], ["CA", 1]]), this.groupStack.push(g), this.groupLevel++;
- }
- endGroup(d) {
- if (!this.contentVisible)
- return;
- this.groupLevel--;
- const g = this.ctx, f = this.groupStack.pop();
- if (this.ctx = f, this.ctx.imageSmoothingEnabled = !1, d.smask)
- this.tempSMask = this.smaskStack.pop(), this.restore();
- else {
- this.ctx.restore();
- const v = (0, s.getCurrentTransform)(this.ctx);
- this.restore(), this.ctx.save(), this.ctx.setTransform(...v);
- const A = n.Util.getAxialAlignedBoundingBox([0, 0, g.canvas.width, g.canvas.height], v);
- this.ctx.drawImage(g.canvas, 0, 0), this.ctx.restore(), this.compose(A);
- }
- }
- beginAnnotation(d, g, f, v, A) {
- if (K(this, q, Zt).call(this), E(this.ctx), this.ctx.save(), this.save(), this.baseTransform && this.ctx.setTransform(...this.baseTransform), Array.isArray(g) && g.length === 4) {
- const O = g[2] - g[0], H = g[3] - g[1];
- if (A && this.annotationCanvasMap) {
- f = f.slice(), f[4] -= g[0], f[5] -= g[1], g = g.slice(), g[0] = g[1] = 0, g[2] = O, g[3] = H;
- const [z, ae] = n.Util.singularValueDecompose2dScale((0, s.getCurrentTransform)(this.ctx)), {
- viewportScale: Q
- } = this, ce = Math.ceil(O * this.outputScaleX * Q), ue = Math.ceil(H * this.outputScaleY * Q);
- this.annotationCanvas = this.canvasFactory.create(ce, ue);
- const {
- canvas: me,
- context: fe
- } = this.annotationCanvas;
- this.annotationCanvasMap.set(d, me), this.annotationCanvas.savedCtx = this.ctx, this.ctx = fe, this.ctx.save(), this.ctx.setTransform(z, 0, 0, -ae, 0, H * ae), E(this.ctx);
- } else
- E(this.ctx), this.ctx.rect(g[0], g[1], O, H), this.ctx.clip(), this.endPath();
- }
- this.current = new F(this.ctx.canvas.width, this.ctx.canvas.height), this.transform(...f), this.transform(...v);
- }
- endAnnotation() {
- this.annotationCanvas && (this.ctx.restore(), K(this, pe, en).call(this), this.ctx = this.annotationCanvas.savedCtx, delete this.annotationCanvas.savedCtx, delete this.annotationCanvas);
- }
- paintImageMaskXObject(d) {
- if (!this.contentVisible)
- return;
- const g = d.count;
- d = this.getObject(d.data, d), d.count = g;
- const f = this.ctx, v = this.processingType3;
- if (v && (v.compiled === void 0 && (v.compiled = k(d)), v.compiled)) {
- v.compiled(f);
- return;
- }
- const A = this._createMaskCanvas(d), O = A.canvas;
- f.save(), f.setTransform(1, 0, 0, 1, 0, 0), f.drawImage(O, A.offsetX, A.offsetY), f.restore(), this.compose();
- }
- paintImageMaskXObjectRepeat(d, g, f = 0, v = 0, A, O) {
- if (!this.contentVisible)
- return;
- d = this.getObject(d.data, d);
- const H = this.ctx;
- H.save();
- const z = (0, s.getCurrentTransform)(H);
- H.transform(g, f, v, A, 0, 0);
- const ae = this._createMaskCanvas(d);
- H.setTransform(1, 0, 0, 1, ae.offsetX - z[4], ae.offsetY - z[5]);
- for (let Q = 0, ce = O.length; Q < ce; Q += 2) {
- const ue = n.Util.transform(z, [g, f, v, A, O[Q], O[Q + 1]]), [me, fe] = n.Util.applyTransform([0, 0], ue);
- H.drawImage(ae.canvas, me, fe);
- }
- H.restore(), this.compose();
- }
- paintImageMaskXObjectGroup(d) {
- if (!this.contentVisible)
- return;
- const g = this.ctx, f = this.current.fillColor, v = this.current.patternFill;
- for (const A of d) {
- const {
- data: O,
- width: H,
- height: z,
- transform: ae
- } = A, Q = this.cachedCanvases.getCanvas("maskCanvas", H, z), ce = Q.context;
- ce.save();
- const ue = this.getObject(O, A);
- y(ce, ue), ce.globalCompositeOperation = "source-in", ce.fillStyle = v ? f.getPattern(ce, this, (0, s.getCurrentTransformInverse)(g), l.PathType.FILL) : f, ce.fillRect(0, 0, H, z), ce.restore(), g.save(), g.transform(...ae), g.scale(1, -1), b(g, Q.canvas, 0, 0, H, z, 0, -1, 1, 1), g.restore();
- }
- this.compose();
- }
- paintImageXObject(d) {
- if (!this.contentVisible)
- return;
- const g = this.getObject(d);
- if (!g) {
- (0, n.warn)("Dependent image isn't ready yet");
- return;
- }
- this.paintInlineImageXObject(g);
- }
- paintImageXObjectRepeat(d, g, f, v) {
- if (!this.contentVisible)
- return;
- const A = this.getObject(d);
- if (!A) {
- (0, n.warn)("Dependent image isn't ready yet");
- return;
- }
- const O = A.width, H = A.height, z = [];
- for (let ae = 0, Q = v.length; ae < Q; ae += 2)
- z.push({
- transform: [g, 0, 0, f, v[ae], v[ae + 1]],
- x: 0,
- y: 0,
- w: O,
- h: H
- });
- this.paintInlineImageXObjectGroup(A, z);
- }
- applyTransferMapsToCanvas(d) {
- return this.current.transferMaps !== "none" && (d.filter = this.current.transferMaps, d.drawImage(d.canvas, 0, 0), d.filter = "none"), d.canvas;
- }
- applyTransferMapsToBitmap(d) {
- if (this.current.transferMaps === "none")
- return d.bitmap;
- const {
- bitmap: g,
- width: f,
- height: v
- } = d, A = this.cachedCanvases.getCanvas("inlineImage", f, v), O = A.context;
- return O.filter = this.current.transferMaps, O.drawImage(g, 0, 0), O.filter = "none", A.canvas;
- }
- paintInlineImageXObject(d) {
- if (!this.contentVisible)
- return;
- const g = d.width, f = d.height, v = this.ctx;
- if (this.save(), !n.isNodeJS) {
- const {
- filter: H
- } = v;
- H !== "none" && H !== "" && (v.filter = "none");
- }
- v.scale(1 / g, -1 / f);
- let A;
- if (d.bitmap)
- A = this.applyTransferMapsToBitmap(d);
- else if (typeof HTMLElement == "function" && d instanceof HTMLElement || !d.data)
- A = d;
- else {
- const z = this.cachedCanvases.getCanvas("inlineImage", g, f).context;
- x(z, d), A = this.applyTransferMapsToCanvas(z);
- }
- const O = this._scaleImage(A, (0, s.getCurrentTransformInverse)(v));
- v.imageSmoothingEnabled = X((0, s.getCurrentTransform)(v), d.interpolate), b(v, O.img, 0, 0, O.paintWidth, O.paintHeight, 0, -f, g, f), this.compose(), this.restore();
- }
- paintInlineImageXObjectGroup(d, g) {
- if (!this.contentVisible)
- return;
- const f = this.ctx;
- let v;
- if (d.bitmap)
- v = d.bitmap;
- else {
- const A = d.width, O = d.height, z = this.cachedCanvases.getCanvas("inlineImage", A, O).context;
- x(z, d), v = this.applyTransferMapsToCanvas(z);
- }
- for (const A of g)
- f.save(), f.transform(...A.transform), f.scale(1, -1), b(f, v, A.x, A.y, A.w, A.h, 0, -1, 1, 1), f.restore();
- this.compose();
- }
- paintSolidColorImageMask() {
- this.contentVisible && (this.ctx.fillRect(0, 0, 1, 1), this.compose());
- }
- markPoint(d) {
- }
- markPointProps(d, g) {
- }
- beginMarkedContent(d) {
- this.markedContentStack.push({
- visible: !0
- });
- }
- beginMarkedContentProps(d, g) {
- d === "OC" ? this.markedContentStack.push({
- visible: this.optionalContentConfig.isVisible(g)
- }) : this.markedContentStack.push({
- visible: !0
- }), this.contentVisible = this.isContentVisible();
- }
- endMarkedContent() {
- this.markedContentStack.pop(), this.contentVisible = this.isContentVisible();
- }
- beginCompat() {
- }
- endCompat() {
- }
- consumePath(d) {
- const g = this.current.isEmptyClip();
- this.pendingClip && this.current.updateClipFromPath(), this.pendingClip || this.compose(d);
- const f = this.ctx;
- this.pendingClip && (g || (this.pendingClip === ee ? f.clip("evenodd") : f.clip()), this.pendingClip = null), this.current.startNewPathAndClipBox(this.current.clipBox), f.beginPath();
- }
- getSinglePixelWidth() {
- if (!this._cachedGetSinglePixelWidth) {
- const d = (0, s.getCurrentTransform)(this.ctx);
- if (d[1] === 0 && d[2] === 0)
- this._cachedGetSinglePixelWidth = 1 / Math.min(Math.abs(d[0]), Math.abs(d[3]));
- else {
- const g = Math.abs(d[0] * d[3] - d[2] * d[1]), f = Math.hypot(d[0], d[2]), v = Math.hypot(d[1], d[3]);
- this._cachedGetSinglePixelWidth = Math.max(f, v) / g;
- }
- }
- return this._cachedGetSinglePixelWidth;
- }
- getScaleForStroking() {
- if (this._cachedScaleForStroking[0] === -1) {
- const {
- lineWidth: d
- } = this.current, {
- a: g,
- b: f,
- c: v,
- d: A
- } = this.ctx.getTransform();
- let O, H;
- if (f === 0 && v === 0) {
- const z = Math.abs(g), ae = Math.abs(A);
- if (z === ae)
- if (d === 0)
- O = H = 1 / z;
- else {
- const Q = z * d;
- O = H = Q < 1 ? 1 / Q : 1;
- }
- else if (d === 0)
- O = 1 / z, H = 1 / ae;
- else {
- const Q = z * d, ce = ae * d;
- O = Q < 1 ? 1 / Q : 1, H = ce < 1 ? 1 / ce : 1;
- }
- } else {
- const z = Math.abs(g * A - f * v), ae = Math.hypot(g, f), Q = Math.hypot(v, A);
- if (d === 0)
- O = Q / z, H = ae / z;
- else {
- const ce = d * z;
- O = Q > ce ? Q / ce : 1, H = ae > ce ? ae / ce : 1;
- }
- }
- this._cachedScaleForStroking[0] = O, this._cachedScaleForStroking[1] = H;
- }
- return this._cachedScaleForStroking;
- }
- rescaleAndStroke(d) {
- const {
- ctx: g
- } = this, {
- lineWidth: f
- } = this.current, [v, A] = this.getScaleForStroking();
- if (g.lineWidth = f || 1, v === 1 && A === 1) {
- g.stroke();
- return;
- }
- const O = g.getLineDash();
- if (d && g.save(), g.scale(v, A), O.length > 0) {
- const H = Math.max(v, A);
- g.setLineDash(O.map((z) => z / H)), g.lineDashOffset /= H;
- }
- g.stroke(), d && g.restore();
- }
- isContentVisible() {
- for (let d = this.markedContentStack.length - 1; d >= 0; d--)
- if (!this.markedContentStack[d].visible)
- return !1;
- return !0;
- }
- };
- q = new WeakSet(), Zt = function() {
- for (; this.stateStack.length || this.inSMaskMode; )
- this.restore();
- this.ctx.restore(), this.transparentCanvas && (this.ctx = this.compositeCtx, this.ctx.save(), this.ctx.setTransform(1, 0, 0, 1, 0, 0), this.ctx.drawImage(this.transparentCanvas, 0, 0), this.ctx.restore(), this.transparentCanvas = null);
- }, pe = new WeakSet(), en = function() {
- if (this.pageColors) {
- const d = this.filterFactory.addHCMFilter(this.pageColors.foreground, this.pageColors.background);
- if (d !== "none") {
- const g = this.ctx.filter;
- this.ctx.filter = d, this.ctx.drawImage(this.ctx.canvas, 0, 0), this.ctx.filter = g;
- }
- }
- };
- let Y = be;
- e.CanvasGraphics = Y;
- for (const R in n.OPS)
- Y.prototype[R] !== void 0 && (Y.prototype[n.OPS[R]] = Y.prototype[R]);
- },
- /* 12 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.TilingPattern = e.PathType = void 0, e.getShadingPattern = w;
- var n = i(1), s = i(6);
- const l = {
- FILL: "Fill",
- STROKE: "Stroke",
- SHADING: "Shading"
- };
- e.PathType = l;
- function h(k, F) {
- if (!F)
- return;
- const x = F[2] - F[0], y = F[3] - F[1], p = new Path2D();
- p.rect(F[0], F[1], x, y), k.clip(p);
- }
- class _ {
- constructor() {
- this.constructor === _ && (0, n.unreachable)("Cannot initialize BaseShadingPattern.");
- }
- getPattern() {
- (0, n.unreachable)("Abstract method `getPattern` called.");
- }
- }
- class c extends _ {
- constructor(F) {
- super(), this._type = F[1], this._bbox = F[2], this._colorStops = F[3], this._p0 = F[4], this._p1 = F[5], this._r0 = F[6], this._r1 = F[7], this.matrix = null;
- }
- _createGradient(F) {
- let x;
- this._type === "axial" ? x = F.createLinearGradient(this._p0[0], this._p0[1], this._p1[0], this._p1[1]) : this._type === "radial" && (x = F.createRadialGradient(this._p0[0], this._p0[1], this._r0, this._p1[0], this._p1[1], this._r1));
- for (const y of this._colorStops)
- x.addColorStop(y[0], y[1]);
- return x;
- }
- getPattern(F, x, y, p) {
- let E;
- if (p === l.STROKE || p === l.FILL) {
- const $ = x.current.getClippedPathBoundingBox(p, (0, s.getCurrentTransform)(F)) || [0, 0, 0, 0], M = Math.ceil($[2] - $[0]) || 1, m = Math.ceil($[3] - $[1]) || 1, N = x.cachedCanvases.getCanvas("pattern", M, m, !0), D = N.context;
- D.clearRect(0, 0, D.canvas.width, D.canvas.height), D.beginPath(), D.rect(0, 0, D.canvas.width, D.canvas.height), D.translate(-$[0], -$[1]), y = n.Util.transform(y, [1, 0, 0, 1, $[0], $[1]]), D.transform(...x.baseTransform), this.matrix && D.transform(...this.matrix), h(D, this._bbox), D.fillStyle = this._createGradient(D), D.fill(), E = F.createPattern(N.canvas, "no-repeat");
- const X = new DOMMatrix(y);
- E.setTransform(X);
- } else
- h(F, this._bbox), E = this._createGradient(F);
- return E;
- }
- }
- function o(k, F, x, y, p, E, $, M) {
- const m = F.coords, N = F.colors, D = k.data, X = k.width * 4;
- let G;
- m[x + 1] > m[y + 1] && (G = x, x = y, y = G, G = E, E = $, $ = G), m[y + 1] > m[p + 1] && (G = y, y = p, p = G, G = $, $ = M, M = G), m[x + 1] > m[y + 1] && (G = x, x = y, y = G, G = E, E = $, $ = G);
- const I = (m[x] + F.offsetX) * F.scaleX, B = (m[x + 1] + F.offsetY) * F.scaleY, ee = (m[y] + F.offsetX) * F.scaleX, Y = (m[y + 1] + F.offsetY) * F.scaleY, q = (m[p] + F.offsetX) * F.scaleX, le = (m[p + 1] + F.offsetY) * F.scaleY;
- if (B >= le)
- return;
- const pe = N[E], we = N[E + 1], be = N[E + 2], R = N[$], d = N[$ + 1], g = N[$ + 2], f = N[M], v = N[M + 1], A = N[M + 2], O = Math.round(B), H = Math.round(le);
- let z, ae, Q, ce, ue, me, fe, Pe;
- for (let Fe = O; Fe <= H; Fe++) {
- if (Fe < Y) {
- const se = Fe < B ? 0 : (B - Fe) / (B - Y);
- z = I - (I - ee) * se, ae = pe - (pe - R) * se, Q = we - (we - d) * se, ce = be - (be - g) * se;
- } else {
- let se;
- Fe > le ? se = 1 : Y === le ? se = 0 : se = (Y - Fe) / (Y - le), z = ee - (ee - q) * se, ae = R - (R - f) * se, Q = d - (d - v) * se, ce = g - (g - A) * se;
- }
- let Ee;
- Fe < B ? Ee = 0 : Fe > le ? Ee = 1 : Ee = (B - Fe) / (B - le), ue = I - (I - q) * Ee, me = pe - (pe - f) * Ee, fe = we - (we - v) * Ee, Pe = be - (be - A) * Ee;
- const De = Math.round(Math.min(z, ue)), _e = Math.round(Math.max(z, ue));
- let ie = X * Fe + De * 4;
- for (let se = De; se <= _e; se++)
- Ee = (z - se) / (z - ue), Ee < 0 ? Ee = 0 : Ee > 1 && (Ee = 1), D[ie++] = ae - (ae - me) * Ee | 0, D[ie++] = Q - (Q - fe) * Ee | 0, D[ie++] = ce - (ce - Pe) * Ee | 0, D[ie++] = 255;
- }
- }
- function r(k, F, x) {
- const y = F.coords, p = F.colors;
- let E, $;
- switch (F.type) {
- case "lattice":
- const M = F.verticesPerRow, m = Math.floor(y.length / M) - 1, N = M - 1;
- for (E = 0; E < m; E++) {
- let D = E * M;
- for (let X = 0; X < N; X++, D++)
- o(k, x, y[D], y[D + 1], y[D + M], p[D], p[D + 1], p[D + M]), o(k, x, y[D + M + 1], y[D + 1], y[D + M], p[D + M + 1], p[D + 1], p[D + M]);
- }
- break;
- case "triangles":
- for (E = 0, $ = y.length; E < $; E += 3)
- o(k, x, y[E], y[E + 1], y[E + 2], p[E], p[E + 1], p[E + 2]);
- break;
- default:
- throw new Error("illegal figure");
- }
- }
- class T extends _ {
- constructor(F) {
- super(), this._coords = F[2], this._colors = F[3], this._figures = F[4], this._bounds = F[5], this._bbox = F[7], this._background = F[8], this.matrix = null;
- }
- _createMeshCanvas(F, x, y) {
- const M = Math.floor(this._bounds[0]), m = Math.floor(this._bounds[1]), N = Math.ceil(this._bounds[2]) - M, D = Math.ceil(this._bounds[3]) - m, X = Math.min(Math.ceil(Math.abs(N * F[0] * 1.1)), 3e3), G = Math.min(Math.ceil(Math.abs(D * F[1] * 1.1)), 3e3), I = N / X, B = D / G, ee = {
- coords: this._coords,
- colors: this._colors,
- offsetX: -M,
- offsetY: -m,
- scaleX: 1 / I,
- scaleY: 1 / B
- }, Y = X + 2 * 2, q = G + 2 * 2, le = y.getCanvas("mesh", Y, q, !1), pe = le.context, we = pe.createImageData(X, G);
- if (x) {
- const R = we.data;
- for (let d = 0, g = R.length; d < g; d += 4)
- R[d] = x[0], R[d + 1] = x[1], R[d + 2] = x[2], R[d + 3] = 255;
- }
- for (const R of this._figures)
- r(we, R, ee);
- return pe.putImageData(we, 2, 2), {
- canvas: le.canvas,
- offsetX: M - 2 * I,
- offsetY: m - 2 * B,
- scaleX: I,
- scaleY: B
- };
- }
- getPattern(F, x, y, p) {
- h(F, this._bbox);
- let E;
- if (p === l.SHADING)
- E = n.Util.singularValueDecompose2dScale((0, s.getCurrentTransform)(F));
- else if (E = n.Util.singularValueDecompose2dScale(x.baseTransform), this.matrix) {
- const M = n.Util.singularValueDecompose2dScale(this.matrix);
- E = [E[0] * M[0], E[1] * M[1]];
- }
- const $ = this._createMeshCanvas(E, p === l.SHADING ? null : this._background, x.cachedCanvases);
- return p !== l.SHADING && (F.setTransform(...x.baseTransform), this.matrix && F.transform(...this.matrix)), F.translate($.offsetX, $.offsetY), F.scale($.scaleX, $.scaleY), F.createPattern($.canvas, "no-repeat");
- }
- }
- class S extends _ {
- getPattern() {
- return "hotpink";
- }
- }
- function w(k) {
- switch (k[0]) {
- case "RadialAxial":
- return new c(k);
- case "Mesh":
- return new T(k);
- case "Dummy":
- return new S();
- }
- throw new Error(`Unknown IR type: ${k[0]}`);
- }
- const C = {
- COLORED: 1,
- UNCOLORED: 2
- }, b = class b {
- constructor(F, x, y, p, E) {
- this.operatorList = F[2], this.matrix = F[3] || [1, 0, 0, 1, 0, 0], this.bbox = F[4], this.xstep = F[5], this.ystep = F[6], this.paintType = F[7], this.tilingType = F[8], this.color = x, this.ctx = y, this.canvasGraphicsFactory = p, this.baseTransform = E;
- }
- createPatternCanvas(F) {
- const x = this.operatorList, y = this.bbox, p = this.xstep, E = this.ystep, $ = this.paintType, M = this.tilingType, m = this.color, N = this.canvasGraphicsFactory;
- (0, n.info)("TilingType: " + M);
- const D = y[0], X = y[1], G = y[2], I = y[3], B = n.Util.singularValueDecompose2dScale(this.matrix), ee = n.Util.singularValueDecompose2dScale(this.baseTransform), Y = [B[0] * ee[0], B[1] * ee[1]], q = this.getSizeAndScale(p, this.ctx.canvas.width, Y[0]), le = this.getSizeAndScale(E, this.ctx.canvas.height, Y[1]), pe = F.cachedCanvases.getCanvas("pattern", q.size, le.size, !0), we = pe.context, be = N.createCanvasGraphics(we);
- be.groupLevel = F.groupLevel, this.setFillAndStrokeStyleToContext(be, $, m);
- let R = D, d = X, g = G, f = I;
- return D < 0 && (R = 0, g += Math.abs(D)), X < 0 && (d = 0, f += Math.abs(X)), we.translate(-(q.scale * R), -(le.scale * d)), be.transform(q.scale, 0, 0, le.scale, 0, 0), we.save(), this.clipBbox(be, R, d, g, f), be.baseTransform = (0, s.getCurrentTransform)(be.ctx), be.executeOperatorList(x), be.endDrawing(), {
- canvas: pe.canvas,
- scaleX: q.scale,
- scaleY: le.scale,
- offsetX: R,
- offsetY: d
- };
- }
- getSizeAndScale(F, x, y) {
- F = Math.abs(F);
- const p = Math.max(b.MAX_PATTERN_SIZE, x);
- let E = Math.ceil(F * y);
- return E >= p ? E = p : y = E / F, {
- scale: y,
- size: E
- };
- }
- clipBbox(F, x, y, p, E) {
- const $ = p - x, M = E - y;
- F.ctx.rect(x, y, $, M), F.current.updateRectMinMax((0, s.getCurrentTransform)(F.ctx), [x, y, p, E]), F.clip(), F.endPath();
- }
- setFillAndStrokeStyleToContext(F, x, y) {
- const p = F.ctx, E = F.current;
- switch (x) {
- case C.COLORED:
- const $ = this.ctx;
- p.fillStyle = $.fillStyle, p.strokeStyle = $.strokeStyle, E.fillColor = $.fillStyle, E.strokeColor = $.strokeStyle;
- break;
- case C.UNCOLORED:
- const M = n.Util.makeHexColor(y[0], y[1], y[2]);
- p.fillStyle = M, p.strokeStyle = M, E.fillColor = M, E.strokeColor = M;
- break;
- default:
- throw new n.FormatError(`Unsupported paint type: ${x}`);
- }
- }
- getPattern(F, x, y, p) {
- let E = y;
- p !== l.SHADING && (E = n.Util.transform(E, x.baseTransform), this.matrix && (E = n.Util.transform(E, this.matrix)));
- const $ = this.createPatternCanvas(x);
- let M = new DOMMatrix(E);
- M = M.translate($.offsetX, $.offsetY), M = M.scale(1 / $.scaleX, 1 / $.scaleY);
- const m = F.createPattern($.canvas, "repeat");
- return m.setTransform(M), m;
- }
- };
- nt(b, "MAX_PATTERN_SIZE", 3e3);
- let P = b;
- e.TilingPattern = P;
- },
- /* 13 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.convertBlackAndWhiteToRGBA = l, e.convertToRGBA = s, e.grayToRGBA = _;
- var n = i(1);
- function s(c) {
- switch (c.kind) {
- case n.ImageKind.GRAYSCALE_1BPP:
- return l(c);
- case n.ImageKind.RGB_24BPP:
- return h(c);
- }
- return null;
- }
- function l({
- src: c,
- srcPos: o = 0,
- dest: r,
- width: T,
- height: S,
- nonBlackColor: w = 4294967295,
- inverseDecode: C = !1
- }) {
- const P = n.FeatureTest.isLittleEndian ? 4278190080 : 255, [b, k] = C ? [w, P] : [P, w], F = T >> 3, x = T & 7, y = c.length;
- r = new Uint32Array(r.buffer);
- let p = 0;
- for (let E = 0; E < S; E++) {
- for (const M = o + F; o < M; o++) {
- const m = o < y ? c[o] : 255;
- r[p++] = m & 128 ? k : b, r[p++] = m & 64 ? k : b, r[p++] = m & 32 ? k : b, r[p++] = m & 16 ? k : b, r[p++] = m & 8 ? k : b, r[p++] = m & 4 ? k : b, r[p++] = m & 2 ? k : b, r[p++] = m & 1 ? k : b;
- }
- if (x === 0)
- continue;
- const $ = o < y ? c[o++] : 255;
- for (let M = 0; M < x; M++)
- r[p++] = $ & 1 << 7 - M ? k : b;
- }
- return {
- srcPos: o,
- destPos: p
- };
- }
- function h({
- src: c,
- srcPos: o = 0,
- dest: r,
- destPos: T = 0,
- width: S,
- height: w
- }) {
- let C = 0;
- const P = c.length >> 2, b = new Uint32Array(c.buffer, o, P);
- if (n.FeatureTest.isLittleEndian) {
- for (; C < P - 2; C += 3, T += 4) {
- const k = b[C], F = b[C + 1], x = b[C + 2];
- r[T] = k | 4278190080, r[T + 1] = k >>> 24 | F << 8 | 4278190080, r[T + 2] = F >>> 16 | x << 16 | 4278190080, r[T + 3] = x >>> 8 | 4278190080;
- }
- for (let k = C * 4, F = c.length; k < F; k += 3)
- r[T++] = c[k] | c[k + 1] << 8 | c[k + 2] << 16 | 4278190080;
- } else {
- for (; C < P - 2; C += 3, T += 4) {
- const k = b[C], F = b[C + 1], x = b[C + 2];
- r[T] = k | 255, r[T + 1] = k << 24 | F >>> 8 | 255, r[T + 2] = F << 16 | x >>> 16 | 255, r[T + 3] = x << 8 | 255;
- }
- for (let k = C * 4, F = c.length; k < F; k += 3)
- r[T++] = c[k] << 24 | c[k + 1] << 16 | c[k + 2] << 8 | 255;
- }
- return {
- srcPos: o,
- destPos: T
- };
- }
- function _(c, o) {
- if (n.FeatureTest.isLittleEndian)
- for (let r = 0, T = c.length; r < T; r++)
- o[r] = c[r] * 65793 | 4278190080;
- else
- for (let r = 0, T = c.length; r < T; r++)
- o[r] = c[r] * 16843008 | 255;
- }
- },
- /* 14 */
- /***/
- (t, e) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.GlobalWorkerOptions = void 0;
- const i = /* @__PURE__ */ Object.create(null);
- e.GlobalWorkerOptions = i, i.workerPort = null, i.workerSrc = "";
- },
- /* 15 */
- /***/
- (t, e, i) => {
- var c, Mn, r, Fn, S, kt;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.MessageHandler = void 0;
- var n = i(1);
- const s = {
- UNKNOWN: 0,
- DATA: 1,
- ERROR: 2
- }, l = {
- UNKNOWN: 0,
- CANCEL: 1,
- CANCEL_COMPLETE: 2,
- CLOSE: 3,
- ENQUEUE: 4,
- ERROR: 5,
- PULL: 6,
- PULL_COMPLETE: 7,
- START_COMPLETE: 8
- };
- function h(C) {
- switch (C instanceof Error || typeof C == "object" && C !== null || (0, n.unreachable)('wrapReason: Expected "reason" to be a (possibly cloned) Error.'), C.name) {
- case "AbortException":
- return new n.AbortException(C.message);
- case "MissingPDFException":
- return new n.MissingPDFException(C.message);
- case "PasswordException":
- return new n.PasswordException(C.message, C.code);
- case "UnexpectedResponseException":
- return new n.UnexpectedResponseException(C.message, C.status);
- case "UnknownErrorException":
- return new n.UnknownErrorException(C.message, C.details);
- default:
- return new n.UnknownErrorException(C.message, C.toString());
- }
- }
- class _ {
- constructor(P, b, k) {
- W(this, c);
- W(this, r);
- W(this, S);
- this.sourceName = P, this.targetName = b, this.comObj = k, this.callbackId = 1, this.streamId = 1, this.streamSinks = /* @__PURE__ */ Object.create(null), this.streamControllers = /* @__PURE__ */ Object.create(null), this.callbackCapabilities = /* @__PURE__ */ Object.create(null), this.actionHandler = /* @__PURE__ */ Object.create(null), this._onComObjOnMessage = (F) => {
- const x = F.data;
- if (x.targetName !== this.sourceName)
- return;
- if (x.stream) {
- K(this, r, Fn).call(this, x);
- return;
- }
- if (x.callback) {
- const p = x.callbackId, E = this.callbackCapabilities[p];
- if (!E)
- throw new Error(`Cannot resolve callback ${p}`);
- if (delete this.callbackCapabilities[p], x.callback === s.DATA)
- E.resolve(x.data);
- else if (x.callback === s.ERROR)
- E.reject(h(x.reason));
- else
- throw new Error("Unexpected callback case");
- return;
- }
- const y = this.actionHandler[x.action];
- if (!y)
- throw new Error(`Unknown action from worker: ${x.action}`);
- if (x.callbackId) {
- const p = this.sourceName, E = x.sourceName;
- new Promise(function($) {
- $(y(x.data));
- }).then(function($) {
- k.postMessage({
- sourceName: p,
- targetName: E,
- callback: s.DATA,
- callbackId: x.callbackId,
- data: $
- });
- }, function($) {
- k.postMessage({
- sourceName: p,
- targetName: E,
- callback: s.ERROR,
- callbackId: x.callbackId,
- reason: h($)
- });
- });
- return;
- }
- if (x.streamId) {
- K(this, c, Mn).call(this, x);
- return;
- }
- y(x.data);
- }, k.addEventListener("message", this._onComObjOnMessage);
- }
- on(P, b) {
- const k = this.actionHandler;
- if (k[P])
- throw new Error(`There is already an actionName called "${P}"`);
- k[P] = b;
- }
- send(P, b, k) {
- this.comObj.postMessage({
- sourceName: this.sourceName,
- targetName: this.targetName,
- action: P,
- data: b
- }, k);
- }
- sendWithPromise(P, b, k) {
- const F = this.callbackId++, x = new n.PromiseCapability();
- this.callbackCapabilities[F] = x;
- try {
- this.comObj.postMessage({
- sourceName: this.sourceName,
- targetName: this.targetName,
- action: P,
- callbackId: F,
- data: b
- }, k);
- } catch (y) {
- x.reject(y);
- }
- return x.promise;
- }
- sendWithStream(P, b, k, F) {
- const x = this.streamId++, y = this.sourceName, p = this.targetName, E = this.comObj;
- return new ReadableStream({
- start: ($) => {
- const M = new n.PromiseCapability();
- return this.streamControllers[x] = {
- controller: $,
- startCall: M,
- pullCall: null,
- cancelCall: null,
- isClosed: !1
- }, E.postMessage({
- sourceName: y,
- targetName: p,
- action: P,
- streamId: x,
- data: b,
- desiredSize: $.desiredSize
- }, F), M.promise;
- },
- pull: ($) => {
- const M = new n.PromiseCapability();
- return this.streamControllers[x].pullCall = M, E.postMessage({
- sourceName: y,
- targetName: p,
- stream: l.PULL,
- streamId: x,
- desiredSize: $.desiredSize
- }), M.promise;
- },
- cancel: ($) => {
- (0, n.assert)($ instanceof Error, "cancel must have a valid reason");
- const M = new n.PromiseCapability();
- return this.streamControllers[x].cancelCall = M, this.streamControllers[x].isClosed = !0, E.postMessage({
- sourceName: y,
- targetName: p,
- stream: l.CANCEL,
- streamId: x,
- reason: h($)
- }), M.promise;
- }
- }, k);
- }
- destroy() {
- this.comObj.removeEventListener("message", this._onComObjOnMessage);
- }
- }
- c = new WeakSet(), Mn = function(P) {
- const b = P.streamId, k = this.sourceName, F = P.sourceName, x = this.comObj, y = this, p = this.actionHandler[P.action], E = {
- enqueue($, M = 1, m) {
- if (this.isCancelled)
- return;
- const N = this.desiredSize;
- this.desiredSize -= M, N > 0 && this.desiredSize <= 0 && (this.sinkCapability = new n.PromiseCapability(), this.ready = this.sinkCapability.promise), x.postMessage({
- sourceName: k,
- targetName: F,
- stream: l.ENQUEUE,
- streamId: b,
- chunk: $
- }, m);
- },
- close() {
- this.isCancelled || (this.isCancelled = !0, x.postMessage({
- sourceName: k,
- targetName: F,
- stream: l.CLOSE,
- streamId: b
- }), delete y.streamSinks[b]);
- },
- error($) {
- (0, n.assert)($ instanceof Error, "error must have a valid reason"), !this.isCancelled && (this.isCancelled = !0, x.postMessage({
- sourceName: k,
- targetName: F,
- stream: l.ERROR,
- streamId: b,
- reason: h($)
- }));
- },
- sinkCapability: new n.PromiseCapability(),
- onPull: null,
- onCancel: null,
- isCancelled: !1,
- desiredSize: P.desiredSize,
- ready: null
- };
- E.sinkCapability.resolve(), E.ready = E.sinkCapability.promise, this.streamSinks[b] = E, new Promise(function($) {
- $(p(P.data, E));
- }).then(function() {
- x.postMessage({
- sourceName: k,
- targetName: F,
- stream: l.START_COMPLETE,
- streamId: b,
- success: !0
- });
- }, function($) {
- x.postMessage({
- sourceName: k,
- targetName: F,
- stream: l.START_COMPLETE,
- streamId: b,
- reason: h($)
- });
- });
- }, r = new WeakSet(), Fn = function(P) {
- const b = P.streamId, k = this.sourceName, F = P.sourceName, x = this.comObj, y = this.streamControllers[b], p = this.streamSinks[b];
- switch (P.stream) {
- case l.START_COMPLETE:
- P.success ? y.startCall.resolve() : y.startCall.reject(h(P.reason));
- break;
- case l.PULL_COMPLETE:
- P.success ? y.pullCall.resolve() : y.pullCall.reject(h(P.reason));
- break;
- case l.PULL:
- if (!p) {
- x.postMessage({
- sourceName: k,
- targetName: F,
- stream: l.PULL_COMPLETE,
- streamId: b,
- success: !0
- });
- break;
- }
- p.desiredSize <= 0 && P.desiredSize > 0 && p.sinkCapability.resolve(), p.desiredSize = P.desiredSize, new Promise(function(E) {
- var $;
- E(($ = p.onPull) == null ? void 0 : $.call(p));
- }).then(function() {
- x.postMessage({
- sourceName: k,
- targetName: F,
- stream: l.PULL_COMPLETE,
- streamId: b,
- success: !0
- });
- }, function(E) {
- x.postMessage({
- sourceName: k,
- targetName: F,
- stream: l.PULL_COMPLETE,
- streamId: b,
- reason: h(E)
- });
- });
- break;
- case l.ENQUEUE:
- if ((0, n.assert)(y, "enqueue should have stream controller"), y.isClosed)
- break;
- y.controller.enqueue(P.chunk);
- break;
- case l.CLOSE:
- if ((0, n.assert)(y, "close should have stream controller"), y.isClosed)
- break;
- y.isClosed = !0, y.controller.close(), K(this, S, kt).call(this, y, b);
- break;
- case l.ERROR:
- (0, n.assert)(y, "error should have stream controller"), y.controller.error(h(P.reason)), K(this, S, kt).call(this, y, b);
- break;
- case l.CANCEL_COMPLETE:
- P.success ? y.cancelCall.resolve() : y.cancelCall.reject(h(P.reason)), K(this, S, kt).call(this, y, b);
- break;
- case l.CANCEL:
- if (!p)
- break;
- new Promise(function(E) {
- var $;
- E(($ = p.onCancel) == null ? void 0 : $.call(p, h(P.reason)));
- }).then(function() {
- x.postMessage({
- sourceName: k,
- targetName: F,
- stream: l.CANCEL_COMPLETE,
- streamId: b,
- success: !0
- });
- }, function(E) {
- x.postMessage({
- sourceName: k,
- targetName: F,
- stream: l.CANCEL_COMPLETE,
- streamId: b,
- reason: h(E)
- });
- }), p.sinkCapability.reject(h(P.reason)), p.isCancelled = !0, delete this.streamSinks[b];
- break;
- default:
- throw new Error("Unexpected stream case");
- }
- }, S = new WeakSet(), kt = async function(P, b) {
- var k, F, x;
- await Promise.allSettled([(k = P.startCall) == null ? void 0 : k.promise, (F = P.pullCall) == null ? void 0 : F.promise, (x = P.cancelCall) == null ? void 0 : x.promise]), delete this.streamControllers[b];
- }, e.MessageHandler = _;
- },
- /* 16 */
- /***/
- (t, e, i) => {
- var l, h;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.Metadata = void 0;
- var n = i(1);
- class s {
- constructor({
- parsedData: c,
- rawData: o
- }) {
- W(this, l, void 0);
- W(this, h, void 0);
- oe(this, l, c), oe(this, h, o);
- }
- getRaw() {
- return a(this, h);
- }
- get(c) {
- return a(this, l).get(c) ?? null;
- }
- getAll() {
- return (0, n.objectFromMap)(a(this, l));
- }
- has(c) {
- return a(this, l).has(c);
- }
- }
- l = new WeakMap(), h = new WeakMap(), e.Metadata = s;
- },
- /* 17 */
- /***/
- (t, e, i) => {
- var c, o, r, T, S, w, tn;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.OptionalContentConfig = void 0;
- var n = i(1), s = i(8);
- const l = Symbol("INTERNAL");
- class h {
- constructor(b, k) {
- W(this, c, !0);
- this.name = b, this.intent = k;
- }
- get visible() {
- return a(this, c);
- }
- _setVisible(b, k) {
- b !== l && (0, n.unreachable)("Internal method `_setVisible` called."), oe(this, c, k);
- }
- }
- c = new WeakMap();
- class _ {
- constructor(b) {
- W(this, w);
- W(this, o, null);
- W(this, r, /* @__PURE__ */ new Map());
- W(this, T, null);
- W(this, S, null);
- if (this.name = null, this.creator = null, b !== null) {
- this.name = b.name, this.creator = b.creator, oe(this, S, b.order);
- for (const k of b.groups)
- a(this, r).set(k.id, new h(k.name, k.intent));
- if (b.baseState === "OFF")
- for (const k of a(this, r).values())
- k._setVisible(l, !1);
- for (const k of b.on)
- a(this, r).get(k)._setVisible(l, !0);
- for (const k of b.off)
- a(this, r).get(k)._setVisible(l, !1);
- oe(this, T, this.getHash());
- }
- }
- isVisible(b) {
- if (a(this, r).size === 0)
- return !0;
- if (!b)
- return (0, n.warn)("Optional content group not defined."), !0;
- if (b.type === "OCG")
- return a(this, r).has(b.id) ? a(this, r).get(b.id).visible : ((0, n.warn)(`Optional content group not found: ${b.id}`), !0);
- if (b.type === "OCMD") {
- if (b.expression)
- return K(this, w, tn).call(this, b.expression);
- if (!b.policy || b.policy === "AnyOn") {
- for (const k of b.ids) {
- if (!a(this, r).has(k))
- return (0, n.warn)(`Optional content group not found: ${k}`), !0;
- if (a(this, r).get(k).visible)
- return !0;
- }
- return !1;
- } else if (b.policy === "AllOn") {
- for (const k of b.ids) {
- if (!a(this, r).has(k))
- return (0, n.warn)(`Optional content group not found: ${k}`), !0;
- if (!a(this, r).get(k).visible)
- return !1;
- }
- return !0;
- } else if (b.policy === "AnyOff") {
- for (const k of b.ids) {
- if (!a(this, r).has(k))
- return (0, n.warn)(`Optional content group not found: ${k}`), !0;
- if (!a(this, r).get(k).visible)
- return !0;
- }
- return !1;
- } else if (b.policy === "AllOff") {
- for (const k of b.ids) {
- if (!a(this, r).has(k))
- return (0, n.warn)(`Optional content group not found: ${k}`), !0;
- if (a(this, r).get(k).visible)
- return !1;
- }
- return !0;
- }
- return (0, n.warn)(`Unknown optional content policy ${b.policy}.`), !0;
- }
- return (0, n.warn)(`Unknown group type ${b.type}.`), !0;
- }
- setVisibility(b, k = !0) {
- if (!a(this, r).has(b)) {
- (0, n.warn)(`Optional content group not found: ${b}`);
- return;
- }
- a(this, r).get(b)._setVisible(l, !!k), oe(this, o, null);
- }
- get hasInitialVisibility() {
- return a(this, T) === null || this.getHash() === a(this, T);
- }
- getOrder() {
- return a(this, r).size ? a(this, S) ? a(this, S).slice() : [...a(this, r).keys()] : null;
- }
- getGroups() {
- return a(this, r).size > 0 ? (0, n.objectFromMap)(a(this, r)) : null;
- }
- getGroup(b) {
- return a(this, r).get(b) || null;
- }
- getHash() {
- if (a(this, o) !== null)
- return a(this, o);
- const b = new s.MurmurHash3_64();
- for (const [k, F] of a(this, r))
- b.update(`${k}:${F.visible}`);
- return oe(this, o, b.hexdigest());
- }
- }
- o = new WeakMap(), r = new WeakMap(), T = new WeakMap(), S = new WeakMap(), w = new WeakSet(), tn = function(b) {
- const k = b.length;
- if (k < 2)
- return !0;
- const F = b[0];
- for (let x = 1; x < k; x++) {
- const y = b[x];
- let p;
- if (Array.isArray(y))
- p = K(this, w, tn).call(this, y);
- else if (a(this, r).has(y))
- p = a(this, r).get(y).visible;
- else
- return (0, n.warn)(`Optional content group not found: ${y}`), !0;
- switch (F) {
- case "And":
- if (!p)
- return !1;
- break;
- case "Or":
- if (p)
- return !0;
- break;
- case "Not":
- return !p;
- default:
- return !0;
- }
- }
- return F === "And";
- }, e.OptionalContentConfig = _;
- },
- /* 18 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.PDFDataTransportStream = void 0;
- var n = i(1), s = i(6);
- class l {
- constructor({
- length: o,
- initialData: r,
- progressiveDone: T = !1,
- contentDispositionFilename: S = null,
- disableRange: w = !1,
- disableStream: C = !1
- }, P) {
- if ((0, n.assert)(P, 'PDFDataTransportStream - missing required "pdfDataRangeTransport" argument.'), this._queuedChunks = [], this._progressiveDone = T, this._contentDispositionFilename = S, (r == null ? void 0 : r.length) > 0) {
- const b = r instanceof Uint8Array && r.byteLength === r.buffer.byteLength ? r.buffer : new Uint8Array(r).buffer;
- this._queuedChunks.push(b);
- }
- this._pdfDataRangeTransport = P, this._isStreamingSupported = !C, this._isRangeSupported = !w, this._contentLength = o, this._fullRequestReader = null, this._rangeReaders = [], this._pdfDataRangeTransport.addRangeListener((b, k) => {
- this._onReceiveData({
- begin: b,
- chunk: k
- });
- }), this._pdfDataRangeTransport.addProgressListener((b, k) => {
- this._onProgress({
- loaded: b,
- total: k
- });
- }), this._pdfDataRangeTransport.addProgressiveReadListener((b) => {
- this._onReceiveData({
- chunk: b
- });
- }), this._pdfDataRangeTransport.addProgressiveDoneListener(() => {
- this._onProgressiveDone();
- }), this._pdfDataRangeTransport.transportReady();
- }
- _onReceiveData({
- begin: o,
- chunk: r
- }) {
- const T = r instanceof Uint8Array && r.byteLength === r.buffer.byteLength ? r.buffer : new Uint8Array(r).buffer;
- if (o === void 0)
- this._fullRequestReader ? this._fullRequestReader._enqueue(T) : this._queuedChunks.push(T);
- else {
- const S = this._rangeReaders.some(function(w) {
- return w._begin !== o ? !1 : (w._enqueue(T), !0);
- });
- (0, n.assert)(S, "_onReceiveData - no `PDFDataTransportStreamRangeReader` instance found.");
- }
- }
- get _progressiveDataLength() {
- var o;
- return ((o = this._fullRequestReader) == null ? void 0 : o._loaded) ?? 0;
- }
- _onProgress(o) {
- var r, T, S, w;
- o.total === void 0 ? (T = (r = this._rangeReaders[0]) == null ? void 0 : r.onProgress) == null || T.call(r, {
- loaded: o.loaded
- }) : (w = (S = this._fullRequestReader) == null ? void 0 : S.onProgress) == null || w.call(S, {
- loaded: o.loaded,
- total: o.total
- });
- }
- _onProgressiveDone() {
- var o;
- (o = this._fullRequestReader) == null || o.progressiveDone(), this._progressiveDone = !0;
- }
- _removeRangeReader(o) {
- const r = this._rangeReaders.indexOf(o);
- r >= 0 && this._rangeReaders.splice(r, 1);
- }
- getFullReader() {
- (0, n.assert)(!this._fullRequestReader, "PDFDataTransportStream.getFullReader can only be called once.");
- const o = this._queuedChunks;
- return this._queuedChunks = null, new h(this, o, this._progressiveDone, this._contentDispositionFilename);
- }
- getRangeReader(o, r) {
- if (r <= this._progressiveDataLength)
- return null;
- const T = new _(this, o, r);
- return this._pdfDataRangeTransport.requestDataRange(o, r), this._rangeReaders.push(T), T;
- }
- cancelAllRequests(o) {
- var r;
- (r = this._fullRequestReader) == null || r.cancel(o);
- for (const T of this._rangeReaders.slice(0))
- T.cancel(o);
- this._pdfDataRangeTransport.abort();
- }
- }
- e.PDFDataTransportStream = l;
- class h {
- constructor(o, r, T = !1, S = null) {
- this._stream = o, this._done = T || !1, this._filename = (0, s.isPdfFile)(S) ? S : null, this._queuedChunks = r || [], this._loaded = 0;
- for (const w of this._queuedChunks)
- this._loaded += w.byteLength;
- this._requests = [], this._headersReady = Promise.resolve(), o._fullRequestReader = this, this.onProgress = null;
- }
- _enqueue(o) {
- this._done || (this._requests.length > 0 ? this._requests.shift().resolve({
- value: o,
- done: !1
- }) : this._queuedChunks.push(o), this._loaded += o.byteLength);
- }
- get headersReady() {
- return this._headersReady;
- }
- get filename() {
- return this._filename;
- }
- get isRangeSupported() {
- return this._stream._isRangeSupported;
- }
- get isStreamingSupported() {
- return this._stream._isStreamingSupported;
- }
- get contentLength() {
- return this._stream._contentLength;
- }
- async read() {
- if (this._queuedChunks.length > 0)
- return {
- value: this._queuedChunks.shift(),
- done: !1
- };
- if (this._done)
- return {
- value: void 0,
- done: !0
- };
- const o = new n.PromiseCapability();
- return this._requests.push(o), o.promise;
- }
- cancel(o) {
- this._done = !0;
- for (const r of this._requests)
- r.resolve({
- value: void 0,
- done: !0
- });
- this._requests.length = 0;
- }
- progressiveDone() {
- this._done || (this._done = !0);
- }
- }
- class _ {
- constructor(o, r, T) {
- this._stream = o, this._begin = r, this._end = T, this._queuedChunk = null, this._requests = [], this._done = !1, this.onProgress = null;
- }
- _enqueue(o) {
- if (!this._done) {
- if (this._requests.length === 0)
- this._queuedChunk = o;
- else {
- this._requests.shift().resolve({
- value: o,
- done: !1
- });
- for (const T of this._requests)
- T.resolve({
- value: void 0,
- done: !0
- });
- this._requests.length = 0;
- }
- this._done = !0, this._stream._removeRangeReader(this);
- }
- }
- get isStreamingSupported() {
- return !1;
- }
- async read() {
- if (this._queuedChunk) {
- const r = this._queuedChunk;
- return this._queuedChunk = null, {
- value: r,
- done: !1
- };
- }
- if (this._done)
- return {
- value: void 0,
- done: !0
- };
- const o = new n.PromiseCapability();
- return this._requests.push(o), o.promise;
- }
- cancel(o) {
- this._done = !0;
- for (const r of this._requests)
- r.resolve({
- value: void 0,
- done: !0
- });
- this._requests.length = 0, this._stream._removeRangeReader(this);
- }
- }
- },
- /* 19 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.PDFFetchStream = void 0;
- var n = i(1), s = i(20);
- function l(T, S, w) {
- return {
- method: "GET",
- headers: T,
- signal: w.signal,
- mode: "cors",
- credentials: S ? "include" : "same-origin",
- redirect: "follow"
- };
- }
- function h(T) {
- const S = new Headers();
- for (const w in T) {
- const C = T[w];
- C !== void 0 && S.append(w, C);
- }
- return S;
- }
- function _(T) {
- return T instanceof Uint8Array ? T.buffer : T instanceof ArrayBuffer ? T : ((0, n.warn)(`getArrayBuffer - unexpected data format: ${T}`), new Uint8Array(T).buffer);
- }
- class c {
- constructor(S) {
- this.source = S, this.isHttp = /^https?:/i.test(S.url), this.httpHeaders = this.isHttp && S.httpHeaders || {}, this._fullRequestReader = null, this._rangeRequestReaders = [];
- }
- get _progressiveDataLength() {
- var S;
- return ((S = this._fullRequestReader) == null ? void 0 : S._loaded) ?? 0;
- }
- getFullReader() {
- return (0, n.assert)(!this._fullRequestReader, "PDFFetchStream.getFullReader can only be called once."), this._fullRequestReader = new o(this), this._fullRequestReader;
- }
- getRangeReader(S, w) {
- if (w <= this._progressiveDataLength)
- return null;
- const C = new r(this, S, w);
- return this._rangeRequestReaders.push(C), C;
- }
- cancelAllRequests(S) {
- var w;
- (w = this._fullRequestReader) == null || w.cancel(S);
- for (const C of this._rangeRequestReaders.slice(0))
- C.cancel(S);
- }
- }
- e.PDFFetchStream = c;
- class o {
- constructor(S) {
- this._stream = S, this._reader = null, this._loaded = 0, this._filename = null;
- const w = S.source;
- this._withCredentials = w.withCredentials || !1, this._contentLength = w.length, this._headersCapability = new n.PromiseCapability(), this._disableRange = w.disableRange || !1, this._rangeChunkSize = w.rangeChunkSize, !this._rangeChunkSize && !this._disableRange && (this._disableRange = !0), this._abortController = new AbortController(), this._isStreamingSupported = !w.disableStream, this._isRangeSupported = !w.disableRange, this._headers = h(this._stream.httpHeaders);
- const C = w.url;
- fetch(C, l(this._headers, this._withCredentials, this._abortController)).then((P) => {
- if (!(0, s.validateResponseStatus)(P.status))
- throw (0, s.createResponseStatusError)(P.status, C);
- this._reader = P.body.getReader(), this._headersCapability.resolve();
- const b = (x) => P.headers.get(x), {
- allowRangeRequests: k,
- suggestedLength: F
- } = (0, s.validateRangeRequestCapabilities)({
- getResponseHeader: b,
- isHttp: this._stream.isHttp,
- rangeChunkSize: this._rangeChunkSize,
- disableRange: this._disableRange
- });
- this._isRangeSupported = k, this._contentLength = F || this._contentLength, this._filename = (0, s.extractFilenameFromHeader)(b), !this._isStreamingSupported && this._isRangeSupported && this.cancel(new n.AbortException("Streaming is disabled."));
- }).catch(this._headersCapability.reject), this.onProgress = null;
- }
- get headersReady() {
- return this._headersCapability.promise;
- }
- get filename() {
- return this._filename;
- }
- get contentLength() {
- return this._contentLength;
- }
- get isRangeSupported() {
- return this._isRangeSupported;
- }
- get isStreamingSupported() {
- return this._isStreamingSupported;
- }
- async read() {
- var C;
- await this._headersCapability.promise;
- const {
- value: S,
- done: w
- } = await this._reader.read();
- return w ? {
- value: S,
- done: w
- } : (this._loaded += S.byteLength, (C = this.onProgress) == null || C.call(this, {
- loaded: this._loaded,
- total: this._contentLength
- }), {
- value: _(S),
- done: !1
- });
- }
- cancel(S) {
- var w;
- (w = this._reader) == null || w.cancel(S), this._abortController.abort();
- }
- }
- class r {
- constructor(S, w, C) {
- this._stream = S, this._reader = null, this._loaded = 0;
- const P = S.source;
- this._withCredentials = P.withCredentials || !1, this._readCapability = new n.PromiseCapability(), this._isStreamingSupported = !P.disableStream, this._abortController = new AbortController(), this._headers = h(this._stream.httpHeaders), this._headers.append("Range", `bytes=${w}-${C - 1}`);
- const b = P.url;
- fetch(b, l(this._headers, this._withCredentials, this._abortController)).then((k) => {
- if (!(0, s.validateResponseStatus)(k.status))
- throw (0, s.createResponseStatusError)(k.status, b);
- this._readCapability.resolve(), this._reader = k.body.getReader();
- }).catch(this._readCapability.reject), this.onProgress = null;
- }
- get isStreamingSupported() {
- return this._isStreamingSupported;
- }
- async read() {
- var C;
- await this._readCapability.promise;
- const {
- value: S,
- done: w
- } = await this._reader.read();
- return w ? {
- value: S,
- done: w
- } : (this._loaded += S.byteLength, (C = this.onProgress) == null || C.call(this, {
- loaded: this._loaded
- }), {
- value: _(S),
- done: !1
- });
- }
- cancel(S) {
- var w;
- (w = this._reader) == null || w.cancel(S), this._abortController.abort();
- }
- }
- },
- /* 20 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.createResponseStatusError = c, e.extractFilenameFromHeader = _, e.validateRangeRequestCapabilities = h, e.validateResponseStatus = o;
- var n = i(1), s = i(21), l = i(6);
- function h({
- getResponseHeader: r,
- isHttp: T,
- rangeChunkSize: S,
- disableRange: w
- }) {
- const C = {
- allowRangeRequests: !1,
- suggestedLength: void 0
- }, P = parseInt(r("Content-Length"), 10);
- return !Number.isInteger(P) || (C.suggestedLength = P, P <= 2 * S) || w || !T || r("Accept-Ranges") !== "bytes" || (r("Content-Encoding") || "identity") !== "identity" || (C.allowRangeRequests = !0), C;
- }
- function _(r) {
- const T = r("Content-Disposition");
- if (T) {
- let S = (0, s.getFilenameFromContentDispositionHeader)(T);
- if (S.includes("%"))
- try {
- S = decodeURIComponent(S);
- } catch {
- }
- if ((0, l.isPdfFile)(S))
- return S;
- }
- return null;
- }
- function c(r, T) {
- return r === 404 || r === 0 && T.startsWith("file:") ? new n.MissingPDFException('Missing PDF "' + T + '".') : new n.UnexpectedResponseException(`Unexpected server response (${r}) while retrieving PDF "${T}".`, r);
- }
- function o(r) {
- return r === 200 || r === 206;
- }
- },
- /* 21 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.getFilenameFromContentDispositionHeader = s;
- var n = i(1);
- function s(l) {
- let h = !0, _ = c("filename\\*", "i").exec(l);
- if (_) {
- _ = _[1];
- let P = S(_);
- return P = unescape(P), P = w(P), P = C(P), r(P);
- }
- if (_ = T(l), _) {
- const P = C(_);
- return r(P);
- }
- if (_ = c("filename", "i").exec(l), _) {
- _ = _[1];
- let P = S(_);
- return P = C(P), r(P);
- }
- function c(P, b) {
- return new RegExp("(?:^|;)\\s*" + P + '\\s*=\\s*([^";\\s][^;\\s]*|"(?:[^"\\\\]|\\\\"?)+"?)', b);
- }
- function o(P, b) {
- if (P) {
- if (!/^[\x00-\xFF]+$/.test(b))
- return b;
- try {
- const k = new TextDecoder(P, {
- fatal: !0
- }), F = (0, n.stringToBytes)(b);
- b = k.decode(F), h = !1;
- } catch {
- }
- }
- return b;
- }
- function r(P) {
- return h && /[\x80-\xff]/.test(P) && (P = o("utf-8", P), h && (P = o("iso-8859-1", P))), P;
- }
- function T(P) {
- const b = [];
- let k;
- const F = c("filename\\*((?!0\\d)\\d+)(\\*?)", "ig");
- for (; (k = F.exec(P)) !== null; ) {
- let [, y, p, E] = k;
- if (y = parseInt(y, 10), y in b) {
- if (y === 0)
- break;
- continue;
- }
- b[y] = [p, E];
- }
- const x = [];
- for (let y = 0; y < b.length && y in b; ++y) {
- let [p, E] = b[y];
- E = S(E), p && (E = unescape(E), y === 0 && (E = w(E))), x.push(E);
- }
- return x.join("");
- }
- function S(P) {
- if (P.startsWith('"')) {
- const b = P.slice(1).split('\\"');
- for (let k = 0; k < b.length; ++k) {
- const F = b[k].indexOf('"');
- F !== -1 && (b[k] = b[k].slice(0, F), b.length = k + 1), b[k] = b[k].replaceAll(/\\(.)/g, "$1");
- }
- P = b.join('"');
- }
- return P;
- }
- function w(P) {
- const b = P.indexOf("'");
- if (b === -1)
- return P;
- const k = P.slice(0, b), x = P.slice(b + 1).replace(/^[^']*'/, "");
- return o(k, x);
- }
- function C(P) {
- return !P.startsWith("=?") || /[\x00-\x19\x80-\xff]/.test(P) ? P : P.replaceAll(/=\?([\w-]*)\?([QqBb])\?((?:[^?]|\?(?!=))*)\?=/g, function(b, k, F, x) {
- if (F === "q" || F === "Q")
- return x = x.replaceAll("_", " "), x = x.replaceAll(/=([0-9a-fA-F]{2})/g, function(y, p) {
- return String.fromCharCode(parseInt(p, 16));
- }), o(k, x);
- try {
- x = atob(x);
- } catch {
- }
- return o(k, x);
- });
- }
- return "";
- }
- },
- /* 22 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.PDFNetworkStream = void 0;
- var n = i(1), s = i(20);
- const l = 200, h = 206;
- function _(S) {
- const w = S.response;
- return typeof w != "string" ? w : (0, n.stringToBytes)(w).buffer;
- }
- class c {
- constructor(w, C = {}) {
- this.url = w, this.isHttp = /^https?:/i.test(w), this.httpHeaders = this.isHttp && C.httpHeaders || /* @__PURE__ */ Object.create(null), this.withCredentials = C.withCredentials || !1, this.currXhrId = 0, this.pendingRequests = /* @__PURE__ */ Object.create(null);
- }
- requestRange(w, C, P) {
- const b = {
- begin: w,
- end: C
- };
- for (const k in P)
- b[k] = P[k];
- return this.request(b);
- }
- requestFull(w) {
- return this.request(w);
- }
- request(w) {
- const C = new XMLHttpRequest(), P = this.currXhrId++, b = this.pendingRequests[P] = {
- xhr: C
- };
- C.open("GET", this.url), C.withCredentials = this.withCredentials;
- for (const k in this.httpHeaders) {
- const F = this.httpHeaders[k];
- F !== void 0 && C.setRequestHeader(k, F);
- }
- return this.isHttp && "begin" in w && "end" in w ? (C.setRequestHeader("Range", `bytes=${w.begin}-${w.end - 1}`), b.expectedStatus = h) : b.expectedStatus = l, C.responseType = "arraybuffer", w.onError && (C.onerror = function(k) {
- w.onError(C.status);
- }), C.onreadystatechange = this.onStateChange.bind(this, P), C.onprogress = this.onProgress.bind(this, P), b.onHeadersReceived = w.onHeadersReceived, b.onDone = w.onDone, b.onError = w.onError, b.onProgress = w.onProgress, C.send(null), P;
- }
- onProgress(w, C) {
- var b;
- const P = this.pendingRequests[w];
- P && ((b = P.onProgress) == null || b.call(P, C));
- }
- onStateChange(w, C) {
- var y, p, E;
- const P = this.pendingRequests[w];
- if (!P)
- return;
- const b = P.xhr;
- if (b.readyState >= 2 && P.onHeadersReceived && (P.onHeadersReceived(), delete P.onHeadersReceived), b.readyState !== 4 || !(w in this.pendingRequests))
- return;
- if (delete this.pendingRequests[w], b.status === 0 && this.isHttp) {
- (y = P.onError) == null || y.call(P, b.status);
- return;
- }
- const k = b.status || l;
- if (!(k === l && P.expectedStatus === h) && k !== P.expectedStatus) {
- (p = P.onError) == null || p.call(P, b.status);
- return;
- }
- const x = _(b);
- if (k === h) {
- const $ = b.getResponseHeader("Content-Range"), M = /bytes (\d+)-(\d+)\/(\d+)/.exec($);
- P.onDone({
- begin: parseInt(M[1], 10),
- chunk: x
- });
- } else
- x ? P.onDone({
- begin: 0,
- chunk: x
- }) : (E = P.onError) == null || E.call(P, b.status);
- }
- getRequestXhr(w) {
- return this.pendingRequests[w].xhr;
- }
- isPendingRequest(w) {
- return w in this.pendingRequests;
- }
- abortRequest(w) {
- const C = this.pendingRequests[w].xhr;
- delete this.pendingRequests[w], C.abort();
- }
- }
- class o {
- constructor(w) {
- this._source = w, this._manager = new c(w.url, {
- httpHeaders: w.httpHeaders,
- withCredentials: w.withCredentials
- }), this._rangeChunkSize = w.rangeChunkSize, this._fullRequestReader = null, this._rangeRequestReaders = [];
- }
- _onRangeRequestReaderClosed(w) {
- const C = this._rangeRequestReaders.indexOf(w);
- C >= 0 && this._rangeRequestReaders.splice(C, 1);
- }
- getFullReader() {
- return (0, n.assert)(!this._fullRequestReader, "PDFNetworkStream.getFullReader can only be called once."), this._fullRequestReader = new r(this._manager, this._source), this._fullRequestReader;
- }
- getRangeReader(w, C) {
- const P = new T(this._manager, w, C);
- return P.onClosed = this._onRangeRequestReaderClosed.bind(this), this._rangeRequestReaders.push(P), P;
- }
- cancelAllRequests(w) {
- var C;
- (C = this._fullRequestReader) == null || C.cancel(w);
- for (const P of this._rangeRequestReaders.slice(0))
- P.cancel(w);
- }
- }
- e.PDFNetworkStream = o;
- class r {
- constructor(w, C) {
- this._manager = w;
- const P = {
- onHeadersReceived: this._onHeadersReceived.bind(this),
- onDone: this._onDone.bind(this),
- onError: this._onError.bind(this),
- onProgress: this._onProgress.bind(this)
- };
- this._url = C.url, this._fullRequestId = w.requestFull(P), this._headersReceivedCapability = new n.PromiseCapability(), this._disableRange = C.disableRange || !1, this._contentLength = C.length, this._rangeChunkSize = C.rangeChunkSize, !this._rangeChunkSize && !this._disableRange && (this._disableRange = !0), this._isStreamingSupported = !1, this._isRangeSupported = !1, this._cachedChunks = [], this._requests = [], this._done = !1, this._storedError = void 0, this._filename = null, this.onProgress = null;
- }
- _onHeadersReceived() {
- const w = this._fullRequestId, C = this._manager.getRequestXhr(w), P = (F) => C.getResponseHeader(F), {
- allowRangeRequests: b,
- suggestedLength: k
- } = (0, s.validateRangeRequestCapabilities)({
- getResponseHeader: P,
- isHttp: this._manager.isHttp,
- rangeChunkSize: this._rangeChunkSize,
- disableRange: this._disableRange
- });
- b && (this._isRangeSupported = !0), this._contentLength = k || this._contentLength, this._filename = (0, s.extractFilenameFromHeader)(P), this._isRangeSupported && this._manager.abortRequest(w), this._headersReceivedCapability.resolve();
- }
- _onDone(w) {
- if (w && (this._requests.length > 0 ? this._requests.shift().resolve({
- value: w.chunk,
- done: !1
- }) : this._cachedChunks.push(w.chunk)), this._done = !0, !(this._cachedChunks.length > 0)) {
- for (const C of this._requests)
- C.resolve({
- value: void 0,
- done: !0
- });
- this._requests.length = 0;
- }
- }
- _onError(w) {
- this._storedError = (0, s.createResponseStatusError)(w, this._url), this._headersReceivedCapability.reject(this._storedError);
- for (const C of this._requests)
- C.reject(this._storedError);
- this._requests.length = 0, this._cachedChunks.length = 0;
- }
- _onProgress(w) {
- var C;
- (C = this.onProgress) == null || C.call(this, {
- loaded: w.loaded,
- total: w.lengthComputable ? w.total : this._contentLength
- });
- }
- get filename() {
- return this._filename;
- }
- get isRangeSupported() {
- return this._isRangeSupported;
- }
- get isStreamingSupported() {
- return this._isStreamingSupported;
- }
- get contentLength() {
- return this._contentLength;
- }
- get headersReady() {
- return this._headersReceivedCapability.promise;
- }
- async read() {
- if (this._storedError)
- throw this._storedError;
- if (this._cachedChunks.length > 0)
- return {
- value: this._cachedChunks.shift(),
- done: !1
- };
- if (this._done)
- return {
- value: void 0,
- done: !0
- };
- const w = new n.PromiseCapability();
- return this._requests.push(w), w.promise;
- }
- cancel(w) {
- this._done = !0, this._headersReceivedCapability.reject(w);
- for (const C of this._requests)
- C.resolve({
- value: void 0,
- done: !0
- });
- this._requests.length = 0, this._manager.isPendingRequest(this._fullRequestId) && this._manager.abortRequest(this._fullRequestId), this._fullRequestReader = null;
- }
- }
- class T {
- constructor(w, C, P) {
- this._manager = w;
- const b = {
- onDone: this._onDone.bind(this),
- onError: this._onError.bind(this),
- onProgress: this._onProgress.bind(this)
- };
- this._url = w.url, this._requestId = w.requestRange(C, P, b), this._requests = [], this._queuedChunk = null, this._done = !1, this._storedError = void 0, this.onProgress = null, this.onClosed = null;
- }
- _close() {
- var w;
- (w = this.onClosed) == null || w.call(this, this);
- }
- _onDone(w) {
- const C = w.chunk;
- this._requests.length > 0 ? this._requests.shift().resolve({
- value: C,
- done: !1
- }) : this._queuedChunk = C, this._done = !0;
- for (const P of this._requests)
- P.resolve({
- value: void 0,
- done: !0
- });
- this._requests.length = 0, this._close();
- }
- _onError(w) {
- this._storedError = (0, s.createResponseStatusError)(w, this._url);
- for (const C of this._requests)
- C.reject(this._storedError);
- this._requests.length = 0, this._queuedChunk = null;
- }
- _onProgress(w) {
- var C;
- this.isStreamingSupported || (C = this.onProgress) == null || C.call(this, {
- loaded: w.loaded
- });
- }
- get isStreamingSupported() {
- return !1;
- }
- async read() {
- if (this._storedError)
- throw this._storedError;
- if (this._queuedChunk !== null) {
- const C = this._queuedChunk;
- return this._queuedChunk = null, {
- value: C,
- done: !1
- };
- }
- if (this._done)
- return {
- value: void 0,
- done: !0
- };
- const w = new n.PromiseCapability();
- return this._requests.push(w), w.promise;
- }
- cancel(w) {
- this._done = !0;
- for (const C of this._requests)
- C.resolve({
- value: void 0,
- done: !0
- });
- this._requests.length = 0, this._manager.isPendingRequest(this._requestId) && this._manager.abortRequest(this._requestId), this._close();
- }
- }
- },
- /* 23 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.PDFNodeStream = void 0;
- var n = i(1), s = i(20);
- const l = /^file:\/\/\/[a-zA-Z]:\//;
- function h(P) {
- const b = require$$5, k = b.parse(P);
- return k.protocol === "file:" || k.host ? k : /^[a-z]:[/\\]/i.test(P) ? b.parse(`file:///${P}`) : (k.host || (k.protocol = "file:"), k);
- }
- class _ {
- constructor(b) {
- this.source = b, this.url = h(b.url), this.isHttp = this.url.protocol === "http:" || this.url.protocol === "https:", this.isFsUrl = this.url.protocol === "file:", this.httpHeaders = this.isHttp && b.httpHeaders || {}, this._fullRequestReader = null, this._rangeRequestReaders = [];
- }
- get _progressiveDataLength() {
- var b;
- return ((b = this._fullRequestReader) == null ? void 0 : b._loaded) ?? 0;
- }
- getFullReader() {
- return (0, n.assert)(!this._fullRequestReader, "PDFNodeStream.getFullReader can only be called once."), this._fullRequestReader = this.isFsUrl ? new w(this) : new T(this), this._fullRequestReader;
- }
- getRangeReader(b, k) {
- if (k <= this._progressiveDataLength)
- return null;
- const F = this.isFsUrl ? new C(this, b, k) : new S(this, b, k);
- return this._rangeRequestReaders.push(F), F;
- }
- cancelAllRequests(b) {
- var k;
- (k = this._fullRequestReader) == null || k.cancel(b);
- for (const F of this._rangeRequestReaders.slice(0))
- F.cancel(b);
- }
- }
- e.PDFNodeStream = _;
- class c {
- constructor(b) {
- this._url = b.url, this._done = !1, this._storedError = null, this.onProgress = null;
- const k = b.source;
- this._contentLength = k.length, this._loaded = 0, this._filename = null, this._disableRange = k.disableRange || !1, this._rangeChunkSize = k.rangeChunkSize, !this._rangeChunkSize && !this._disableRange && (this._disableRange = !0), this._isStreamingSupported = !k.disableStream, this._isRangeSupported = !k.disableRange, this._readableStream = null, this._readCapability = new n.PromiseCapability(), this._headersCapability = new n.PromiseCapability();
- }
- get headersReady() {
- return this._headersCapability.promise;
- }
- get filename() {
- return this._filename;
- }
- get contentLength() {
- return this._contentLength;
- }
- get isRangeSupported() {
- return this._isRangeSupported;
- }
- get isStreamingSupported() {
- return this._isStreamingSupported;
- }
- async read() {
- var F;
- if (await this._readCapability.promise, this._done)
- return {
- value: void 0,
- done: !0
- };
- if (this._storedError)
- throw this._storedError;
- const b = this._readableStream.read();
- return b === null ? (this._readCapability = new n.PromiseCapability(), this.read()) : (this._loaded += b.length, (F = this.onProgress) == null || F.call(this, {
- loaded: this._loaded,
- total: this._contentLength
- }), {
- value: new Uint8Array(b).buffer,
- done: !1
- });
- }
- cancel(b) {
- if (!this._readableStream) {
- this._error(b);
- return;
- }
- this._readableStream.destroy(b);
- }
- _error(b) {
- this._storedError = b, this._readCapability.resolve();
- }
- _setReadableStream(b) {
- this._readableStream = b, b.on("readable", () => {
- this._readCapability.resolve();
- }), b.on("end", () => {
- b.destroy(), this._done = !0, this._readCapability.resolve();
- }), b.on("error", (k) => {
- this._error(k);
- }), !this._isStreamingSupported && this._isRangeSupported && this._error(new n.AbortException("streaming is disabled")), this._storedError && this._readableStream.destroy(this._storedError);
- }
- }
- class o {
- constructor(b) {
- this._url = b.url, this._done = !1, this._storedError = null, this.onProgress = null, this._loaded = 0, this._readableStream = null, this._readCapability = new n.PromiseCapability();
- const k = b.source;
- this._isStreamingSupported = !k.disableStream;
- }
- get isStreamingSupported() {
- return this._isStreamingSupported;
- }
- async read() {
- var F;
- if (await this._readCapability.promise, this._done)
- return {
- value: void 0,
- done: !0
- };
- if (this._storedError)
- throw this._storedError;
- const b = this._readableStream.read();
- return b === null ? (this._readCapability = new n.PromiseCapability(), this.read()) : (this._loaded += b.length, (F = this.onProgress) == null || F.call(this, {
- loaded: this._loaded
- }), {
- value: new Uint8Array(b).buffer,
- done: !1
- });
- }
- cancel(b) {
- if (!this._readableStream) {
- this._error(b);
- return;
- }
- this._readableStream.destroy(b);
- }
- _error(b) {
- this._storedError = b, this._readCapability.resolve();
- }
- _setReadableStream(b) {
- this._readableStream = b, b.on("readable", () => {
- this._readCapability.resolve();
- }), b.on("end", () => {
- b.destroy(), this._done = !0, this._readCapability.resolve();
- }), b.on("error", (k) => {
- this._error(k);
- }), this._storedError && this._readableStream.destroy(this._storedError);
- }
- }
- function r(P, b) {
- return {
- protocol: P.protocol,
- auth: P.auth,
- host: P.hostname,
- port: P.port,
- path: P.path,
- method: "GET",
- headers: b
- };
- }
- class T extends c {
- constructor(b) {
- super(b);
- const k = (F) => {
- if (F.statusCode === 404) {
- const E = new n.MissingPDFException(`Missing PDF "${this._url}".`);
- this._storedError = E, this._headersCapability.reject(E);
- return;
- }
- this._headersCapability.resolve(), this._setReadableStream(F);
- const x = (E) => this._readableStream.headers[E.toLowerCase()], {
- allowRangeRequests: y,
- suggestedLength: p
- } = (0, s.validateRangeRequestCapabilities)({
- getResponseHeader: x,
- isHttp: b.isHttp,
- rangeChunkSize: this._rangeChunkSize,
- disableRange: this._disableRange
- });
- this._isRangeSupported = y, this._contentLength = p || this._contentLength, this._filename = (0, s.extractFilenameFromHeader)(x);
- };
- if (this._request = null, this._url.protocol === "http:") {
- const F = require$$5;
- this._request = F.request(r(this._url, b.httpHeaders), k);
- } else {
- const F = require$$5;
- this._request = F.request(r(this._url, b.httpHeaders), k);
- }
- this._request.on("error", (F) => {
- this._storedError = F, this._headersCapability.reject(F);
- }), this._request.end();
- }
- }
- class S extends o {
- constructor(b, k, F) {
- super(b), this._httpHeaders = {};
- for (const y in b.httpHeaders) {
- const p = b.httpHeaders[y];
- p !== void 0 && (this._httpHeaders[y] = p);
- }
- this._httpHeaders.Range = `bytes=${k}-${F - 1}`;
- const x = (y) => {
- if (y.statusCode === 404) {
- const p = new n.MissingPDFException(`Missing PDF "${this._url}".`);
- this._storedError = p;
- return;
- }
- this._setReadableStream(y);
- };
- if (this._request = null, this._url.protocol === "http:") {
- const y = require$$5;
- this._request = y.request(r(this._url, this._httpHeaders), x);
- } else {
- const y = require$$5;
- this._request = y.request(r(this._url, this._httpHeaders), x);
- }
- this._request.on("error", (y) => {
- this._storedError = y;
- }), this._request.end();
- }
- }
- class w extends c {
- constructor(b) {
- super(b);
- let k = decodeURIComponent(this._url.path);
- l.test(this._url.href) && (k = k.replace(/^\//, ""));
- const F = require$$5;
- F.lstat(k, (x, y) => {
- if (x) {
- x.code === "ENOENT" && (x = new n.MissingPDFException(`Missing PDF "${k}".`)), this._storedError = x, this._headersCapability.reject(x);
- return;
- }
- this._contentLength = y.size, this._setReadableStream(F.createReadStream(k)), this._headersCapability.resolve();
- });
- }
- }
- class C extends o {
- constructor(b, k, F) {
- super(b);
- let x = decodeURIComponent(this._url.path);
- l.test(this._url.href) && (x = x.replace(/^\//, ""));
- const y = require$$5;
- this._setReadableStream(y.createReadStream(x, {
- start: k,
- end: F - 1
- }));
- }
- }
- },
- /* 24 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.SVGGraphics = void 0;
- var n = i(6), s = i(1);
- const l = {
- fontStyle: "normal",
- fontWeight: "normal",
- fillColor: "#000000"
- }, h = "http://www.w3.org/XML/1998/namespace", _ = "http://www.w3.org/1999/xlink", c = ["butt", "round", "square"], o = ["miter", "round", "bevel"], r = function(y, p = "", E = !1) {
- if (URL.createObjectURL && typeof Blob < "u" && !E)
- return URL.createObjectURL(new Blob([y], {
- type: p
- }));
- const $ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
- let M = `data:${p};base64,`;
- for (let m = 0, N = y.length; m < N; m += 3) {
- const D = y[m] & 255, X = y[m + 1] & 255, G = y[m + 2] & 255, I = D >> 2, B = (D & 3) << 4 | X >> 4, ee = m + 1 < N ? (X & 15) << 2 | G >> 6 : 64, Y = m + 2 < N ? G & 63 : 64;
- M += $[I] + $[B] + $[ee] + $[Y];
- }
- return M;
- }, T = function() {
- const y = new Uint8Array([137, 80, 78, 71, 13, 10, 26, 10]), p = 12, E = new Int32Array(256);
- for (let G = 0; G < 256; G++) {
- let I = G;
- for (let B = 0; B < 8; B++)
- I = I & 1 ? 3988292384 ^ I >> 1 & 2147483647 : I >> 1 & 2147483647;
- E[G] = I;
- }
- function $(G, I, B) {
- let ee = -1;
- for (let Y = I; Y < B; Y++) {
- const q = (ee ^ G[Y]) & 255, le = E[q];
- ee = ee >>> 8 ^ le;
- }
- return ee ^ -1;
- }
- function M(G, I, B, ee) {
- let Y = ee;
- const q = I.length;
- B[Y] = q >> 24 & 255, B[Y + 1] = q >> 16 & 255, B[Y + 2] = q >> 8 & 255, B[Y + 3] = q & 255, Y += 4, B[Y] = G.charCodeAt(0) & 255, B[Y + 1] = G.charCodeAt(1) & 255, B[Y + 2] = G.charCodeAt(2) & 255, B[Y + 3] = G.charCodeAt(3) & 255, Y += 4, B.set(I, Y), Y += I.length;
- const le = $(B, ee + 4, Y);
- B[Y] = le >> 24 & 255, B[Y + 1] = le >> 16 & 255, B[Y + 2] = le >> 8 & 255, B[Y + 3] = le & 255;
- }
- function m(G, I, B) {
- let ee = 1, Y = 0;
- for (let q = I; q < B; ++q)
- ee = (ee + (G[q] & 255)) % 65521, Y = (Y + ee) % 65521;
- return Y << 16 | ee;
- }
- function N(G) {
- if (!s.isNodeJS)
- return D(G);
- try {
- const I = parseInt(process.versions.node) >= 8 ? G : Buffer.from(G), B = require$$5.deflateSync(I, {
- level: 9
- });
- return B instanceof Uint8Array ? B : new Uint8Array(B);
- } catch (I) {
- (0, s.warn)("Not compressing PNG because zlib.deflateSync is unavailable: " + I);
- }
- return D(G);
- }
- function D(G) {
- let I = G.length;
- const B = 65535, ee = Math.ceil(I / B), Y = new Uint8Array(2 + I + ee * 5 + 4);
- let q = 0;
- Y[q++] = 120, Y[q++] = 156;
- let le = 0;
- for (; I > B; )
- Y[q++] = 0, Y[q++] = 255, Y[q++] = 255, Y[q++] = 0, Y[q++] = 0, Y.set(G.subarray(le, le + B), q), q += B, le += B, I -= B;
- Y[q++] = 1, Y[q++] = I & 255, Y[q++] = I >> 8 & 255, Y[q++] = ~I & 65535 & 255, Y[q++] = (~I & 65535) >> 8 & 255, Y.set(G.subarray(le), q), q += G.length - le;
- const pe = m(G, 0, G.length);
- return Y[q++] = pe >> 24 & 255, Y[q++] = pe >> 16 & 255, Y[q++] = pe >> 8 & 255, Y[q++] = pe & 255, Y;
- }
- function X(G, I, B, ee) {
- const Y = G.width, q = G.height;
- let le, pe, we;
- const be = G.data;
- switch (I) {
- case s.ImageKind.GRAYSCALE_1BPP:
- pe = 0, le = 1, we = Y + 7 >> 3;
- break;
- case s.ImageKind.RGB_24BPP:
- pe = 2, le = 8, we = Y * 3;
- break;
- case s.ImageKind.RGBA_32BPP:
- pe = 6, le = 8, we = Y * 4;
- break;
- default:
- throw new Error("invalid format");
- }
- const R = new Uint8Array((1 + we) * q);
- let d = 0, g = 0;
- for (let z = 0; z < q; ++z)
- R[d++] = 0, R.set(be.subarray(g, g + we), d), g += we, d += we;
- if (I === s.ImageKind.GRAYSCALE_1BPP && ee) {
- d = 0;
- for (let z = 0; z < q; z++) {
- d++;
- for (let ae = 0; ae < we; ae++)
- R[d++] ^= 255;
- }
- }
- const f = new Uint8Array([Y >> 24 & 255, Y >> 16 & 255, Y >> 8 & 255, Y & 255, q >> 24 & 255, q >> 16 & 255, q >> 8 & 255, q & 255, le, pe, 0, 0, 0]), v = N(R), A = y.length + p * 3 + f.length + v.length, O = new Uint8Array(A);
- let H = 0;
- return O.set(y, H), H += y.length, M("IHDR", f, O, H), H += p + f.length, M("IDATA", v, O, H), H += p + v.length, M("IEND", new Uint8Array(0), O, H), r(O, "image/png", B);
- }
- return function(I, B, ee) {
- const Y = I.kind === void 0 ? s.ImageKind.GRAYSCALE_1BPP : I.kind;
- return X(I, Y, B, ee);
- };
- }();
- class S {
- constructor() {
- this.fontSizeScale = 1, this.fontWeight = l.fontWeight, this.fontSize = 0, this.textMatrix = s.IDENTITY_MATRIX, this.fontMatrix = s.FONT_IDENTITY_MATRIX, this.leading = 0, this.textRenderingMode = s.TextRenderingMode.FILL, this.textMatrixScale = 1, this.x = 0, this.y = 0, this.lineX = 0, this.lineY = 0, this.charSpacing = 0, this.wordSpacing = 0, this.textHScale = 1, this.textRise = 0, this.fillColor = l.fillColor, this.strokeColor = "#000000", this.fillAlpha = 1, this.strokeAlpha = 1, this.lineWidth = 1, this.lineJoin = "", this.lineCap = "", this.miterLimit = 0, this.dashArray = [], this.dashPhase = 0, this.dependencies = [], this.activeClipUrl = null, this.clipGroup = null, this.maskId = "";
- }
- clone() {
- return Object.create(this);
- }
- setCurrentPoint(p, E) {
- this.x = p, this.y = E;
- }
- }
- function w(y) {
- let p = [];
- const E = [];
- for (const $ of y) {
- if ($.fn === "save") {
- p.push({
- fnId: 92,
- fn: "group",
- items: []
- }), E.push(p), p = p.at(-1).items;
- continue;
- }
- $.fn === "restore" ? p = E.pop() : p.push($);
- }
- return p;
- }
- function C(y) {
- if (Number.isInteger(y))
- return y.toString();
- const p = y.toFixed(10);
- let E = p.length - 1;
- if (p[E] !== "0")
- return p;
- do
- E--;
- while (p[E] === "0");
- return p.substring(0, p[E] === "." ? E : E + 1);
- }
- function P(y) {
- if (y[4] === 0 && y[5] === 0) {
- if (y[1] === 0 && y[2] === 0)
- return y[0] === 1 && y[3] === 1 ? "" : `scale(${C(y[0])} ${C(y[3])})`;
- if (y[0] === y[3] && y[1] === -y[2]) {
- const p = Math.acos(y[0]) * 180 / Math.PI;
- return `rotate(${C(p)})`;
- }
- } else if (y[0] === 1 && y[1] === 0 && y[2] === 0 && y[3] === 1)
- return `translate(${C(y[4])} ${C(y[5])})`;
- return `matrix(${C(y[0])} ${C(y[1])} ${C(y[2])} ${C(y[3])} ${C(y[4])} ${C(y[5])})`;
- }
- let b = 0, k = 0, F = 0;
- class x {
- constructor(p, E, $ = !1) {
- (0, n.deprecated)("The SVG back-end is no longer maintained and *may* be removed in the future."), this.svgFactory = new n.DOMSVGFactory(), this.current = new S(), this.transformMatrix = s.IDENTITY_MATRIX, this.transformStack = [], this.extraStack = [], this.commonObjs = p, this.objs = E, this.pendingClip = null, this.pendingEOFill = !1, this.embedFonts = !1, this.embeddedFonts = /* @__PURE__ */ Object.create(null), this.cssStyle = null, this.forceDataSchema = !!$, this._operatorIdMapping = [];
- for (const M in s.OPS)
- this._operatorIdMapping[s.OPS[M]] = M;
- }
- getObject(p, E = null) {
- return typeof p == "string" ? p.startsWith("g_") ? this.commonObjs.get(p) : this.objs.get(p) : E;
- }
- save() {
- this.transformStack.push(this.transformMatrix);
- const p = this.current;
- this.extraStack.push(p), this.current = p.clone();
- }
- restore() {
- this.transformMatrix = this.transformStack.pop(), this.current = this.extraStack.pop(), this.pendingClip = null, this.tgrp = null;
- }
- group(p) {
- this.save(), this.executeOpTree(p), this.restore();
- }
- loadDependencies(p) {
- const E = p.fnArray, $ = p.argsArray;
- for (let M = 0, m = E.length; M < m; M++)
- if (E[M] === s.OPS.dependency)
- for (const N of $[M]) {
- const D = N.startsWith("g_") ? this.commonObjs : this.objs, X = new Promise((G) => {
- D.get(N, G);
- });
- this.current.dependencies.push(X);
- }
- return Promise.all(this.current.dependencies);
- }
- transform(p, E, $, M, m, N) {
- const D = [p, E, $, M, m, N];
- this.transformMatrix = s.Util.transform(this.transformMatrix, D), this.tgrp = null;
- }
- getSVG(p, E) {
- this.viewport = E;
- const $ = this._initialize(E);
- return this.loadDependencies(p).then(() => (this.transformMatrix = s.IDENTITY_MATRIX, this.executeOpTree(this.convertOpList(p)), $));
- }
- convertOpList(p) {
- const E = this._operatorIdMapping, $ = p.argsArray, M = p.fnArray, m = [];
- for (let N = 0, D = M.length; N < D; N++) {
- const X = M[N];
- m.push({
- fnId: X,
- fn: E[X],
- args: $[N]
- });
- }
- return w(m);
- }
- executeOpTree(p) {
- for (const E of p) {
- const $ = E.fn, M = E.fnId, m = E.args;
- switch (M | 0) {
- case s.OPS.beginText:
- this.beginText();
- break;
- case s.OPS.dependency:
- break;
- case s.OPS.setLeading:
- this.setLeading(m);
- break;
- case s.OPS.setLeadingMoveText:
- this.setLeadingMoveText(m[0], m[1]);
- break;
- case s.OPS.setFont:
- this.setFont(m);
- break;
- case s.OPS.showText:
- this.showText(m[0]);
- break;
- case s.OPS.showSpacedText:
- this.showText(m[0]);
- break;
- case s.OPS.endText:
- this.endText();
- break;
- case s.OPS.moveText:
- this.moveText(m[0], m[1]);
- break;
- case s.OPS.setCharSpacing:
- this.setCharSpacing(m[0]);
- break;
- case s.OPS.setWordSpacing:
- this.setWordSpacing(m[0]);
- break;
- case s.OPS.setHScale:
- this.setHScale(m[0]);
- break;
- case s.OPS.setTextMatrix:
- this.setTextMatrix(m[0], m[1], m[2], m[3], m[4], m[5]);
- break;
- case s.OPS.setTextRise:
- this.setTextRise(m[0]);
- break;
- case s.OPS.setTextRenderingMode:
- this.setTextRenderingMode(m[0]);
- break;
- case s.OPS.setLineWidth:
- this.setLineWidth(m[0]);
- break;
- case s.OPS.setLineJoin:
- this.setLineJoin(m[0]);
- break;
- case s.OPS.setLineCap:
- this.setLineCap(m[0]);
- break;
- case s.OPS.setMiterLimit:
- this.setMiterLimit(m[0]);
- break;
- case s.OPS.setFillRGBColor:
- this.setFillRGBColor(m[0], m[1], m[2]);
- break;
- case s.OPS.setStrokeRGBColor:
- this.setStrokeRGBColor(m[0], m[1], m[2]);
- break;
- case s.OPS.setStrokeColorN:
- this.setStrokeColorN(m);
- break;
- case s.OPS.setFillColorN:
- this.setFillColorN(m);
- break;
- case s.OPS.shadingFill:
- this.shadingFill(m[0]);
- break;
- case s.OPS.setDash:
- this.setDash(m[0], m[1]);
- break;
- case s.OPS.setRenderingIntent:
- this.setRenderingIntent(m[0]);
- break;
- case s.OPS.setFlatness:
- this.setFlatness(m[0]);
- break;
- case s.OPS.setGState:
- this.setGState(m[0]);
- break;
- case s.OPS.fill:
- this.fill();
- break;
- case s.OPS.eoFill:
- this.eoFill();
- break;
- case s.OPS.stroke:
- this.stroke();
- break;
- case s.OPS.fillStroke:
- this.fillStroke();
- break;
- case s.OPS.eoFillStroke:
- this.eoFillStroke();
- break;
- case s.OPS.clip:
- this.clip("nonzero");
- break;
- case s.OPS.eoClip:
- this.clip("evenodd");
- break;
- case s.OPS.paintSolidColorImageMask:
- this.paintSolidColorImageMask();
- break;
- case s.OPS.paintImageXObject:
- this.paintImageXObject(m[0]);
- break;
- case s.OPS.paintInlineImageXObject:
- this.paintInlineImageXObject(m[0]);
- break;
- case s.OPS.paintImageMaskXObject:
- this.paintImageMaskXObject(m[0]);
- break;
- case s.OPS.paintFormXObjectBegin:
- this.paintFormXObjectBegin(m[0], m[1]);
- break;
- case s.OPS.paintFormXObjectEnd:
- this.paintFormXObjectEnd();
- break;
- case s.OPS.closePath:
- this.closePath();
- break;
- case s.OPS.closeStroke:
- this.closeStroke();
- break;
- case s.OPS.closeFillStroke:
- this.closeFillStroke();
- break;
- case s.OPS.closeEOFillStroke:
- this.closeEOFillStroke();
- break;
- case s.OPS.nextLine:
- this.nextLine();
- break;
- case s.OPS.transform:
- this.transform(m[0], m[1], m[2], m[3], m[4], m[5]);
- break;
- case s.OPS.constructPath:
- this.constructPath(m[0], m[1]);
- break;
- case s.OPS.endPath:
- this.endPath();
- break;
- case 92:
- this.group(E.items);
- break;
- default:
- (0, s.warn)(`Unimplemented operator ${$}`);
- break;
- }
- }
- }
- setWordSpacing(p) {
- this.current.wordSpacing = p;
- }
- setCharSpacing(p) {
- this.current.charSpacing = p;
- }
- nextLine() {
- this.moveText(0, this.current.leading);
- }
- setTextMatrix(p, E, $, M, m, N) {
- const D = this.current;
- D.textMatrix = D.lineMatrix = [p, E, $, M, m, N], D.textMatrixScale = Math.hypot(p, E), D.x = D.lineX = 0, D.y = D.lineY = 0, D.xcoords = [], D.ycoords = [], D.tspan = this.svgFactory.createElement("svg:tspan"), D.tspan.setAttributeNS(null, "font-family", D.fontFamily), D.tspan.setAttributeNS(null, "font-size", `${C(D.fontSize)}px`), D.tspan.setAttributeNS(null, "y", C(-D.y)), D.txtElement = this.svgFactory.createElement("svg:text"), D.txtElement.append(D.tspan);
- }
- beginText() {
- const p = this.current;
- p.x = p.lineX = 0, p.y = p.lineY = 0, p.textMatrix = s.IDENTITY_MATRIX, p.lineMatrix = s.IDENTITY_MATRIX, p.textMatrixScale = 1, p.tspan = this.svgFactory.createElement("svg:tspan"), p.txtElement = this.svgFactory.createElement("svg:text"), p.txtgrp = this.svgFactory.createElement("svg:g"), p.xcoords = [], p.ycoords = [];
- }
- moveText(p, E) {
- const $ = this.current;
- $.x = $.lineX += p, $.y = $.lineY += E, $.xcoords = [], $.ycoords = [], $.tspan = this.svgFactory.createElement("svg:tspan"), $.tspan.setAttributeNS(null, "font-family", $.fontFamily), $.tspan.setAttributeNS(null, "font-size", `${C($.fontSize)}px`), $.tspan.setAttributeNS(null, "y", C(-$.y));
- }
- showText(p) {
- const E = this.current, $ = E.font, M = E.fontSize;
- if (M === 0)
- return;
- const m = E.fontSizeScale, N = E.charSpacing, D = E.wordSpacing, X = E.fontDirection, G = E.textHScale * X, I = $.vertical, B = I ? 1 : -1, ee = $.defaultVMetrics, Y = M * E.fontMatrix[0];
- let q = 0;
- for (const we of p) {
- if (we === null) {
- q += X * D;
- continue;
- } else if (typeof we == "number") {
- q += B * we * M / 1e3;
- continue;
- }
- const be = (we.isSpace ? D : 0) + N, R = we.fontChar;
- let d, g, f = we.width;
- if (I) {
- let A;
- const O = we.vmetric || ee;
- A = we.vmetric ? O[1] : f * 0.5, A = -A * Y;
- const H = O[2] * Y;
- f = O ? -O[0] : f, d = A / m, g = (q + H) / m;
- } else
- d = q / m, g = 0;
- (we.isInFont || $.missingFile) && (E.xcoords.push(E.x + d), I && E.ycoords.push(-E.y + g), E.tspan.textContent += R);
- const v = I ? f * Y - be * X : f * Y + be * X;
- q += v;
- }
- E.tspan.setAttributeNS(null, "x", E.xcoords.map(C).join(" ")), I ? E.tspan.setAttributeNS(null, "y", E.ycoords.map(C).join(" ")) : E.tspan.setAttributeNS(null, "y", C(-E.y)), I ? E.y -= q : E.x += q * G, E.tspan.setAttributeNS(null, "font-family", E.fontFamily), E.tspan.setAttributeNS(null, "font-size", `${C(E.fontSize)}px`), E.fontStyle !== l.fontStyle && E.tspan.setAttributeNS(null, "font-style", E.fontStyle), E.fontWeight !== l.fontWeight && E.tspan.setAttributeNS(null, "font-weight", E.fontWeight);
- const le = E.textRenderingMode & s.TextRenderingMode.FILL_STROKE_MASK;
- if (le === s.TextRenderingMode.FILL || le === s.TextRenderingMode.FILL_STROKE ? (E.fillColor !== l.fillColor && E.tspan.setAttributeNS(null, "fill", E.fillColor), E.fillAlpha < 1 && E.tspan.setAttributeNS(null, "fill-opacity", E.fillAlpha)) : E.textRenderingMode === s.TextRenderingMode.ADD_TO_PATH ? E.tspan.setAttributeNS(null, "fill", "transparent") : E.tspan.setAttributeNS(null, "fill", "none"), le === s.TextRenderingMode.STROKE || le === s.TextRenderingMode.FILL_STROKE) {
- const we = 1 / (E.textMatrixScale || 1);
- this._setStrokeAttributes(E.tspan, we);
- }
- let pe = E.textMatrix;
- E.textRise !== 0 && (pe = pe.slice(), pe[5] += E.textRise), E.txtElement.setAttributeNS(null, "transform", `${P(pe)} scale(${C(G)}, -1)`), E.txtElement.setAttributeNS(h, "xml:space", "preserve"), E.txtElement.append(E.tspan), E.txtgrp.append(E.txtElement), this._ensureTransformGroup().append(E.txtElement);
- }
- setLeadingMoveText(p, E) {
- this.setLeading(-E), this.moveText(p, E);
- }
- addFontStyle(p) {
- if (!p.data)
- throw new Error('addFontStyle: No font data available, ensure that the "fontExtraProperties" API parameter is set.');
- this.cssStyle || (this.cssStyle = this.svgFactory.createElement("svg:style"), this.cssStyle.setAttributeNS(null, "type", "text/css"), this.defs.append(this.cssStyle));
- const E = r(p.data, p.mimetype, this.forceDataSchema);
- this.cssStyle.textContent += `@font-face { font-family: "${p.loadedName}"; src: url(${E}); }
-`;
- }
- setFont(p) {
- const E = this.current, $ = this.commonObjs.get(p[0]);
- let M = p[1];
- E.font = $, this.embedFonts && !$.missingFile && !this.embeddedFonts[$.loadedName] && (this.addFontStyle($), this.embeddedFonts[$.loadedName] = $), E.fontMatrix = $.fontMatrix || s.FONT_IDENTITY_MATRIX;
- let m = "normal";
- $.black ? m = "900" : $.bold && (m = "bold");
- const N = $.italic ? "italic" : "normal";
- M < 0 ? (M = -M, E.fontDirection = -1) : E.fontDirection = 1, E.fontSize = M, E.fontFamily = $.loadedName, E.fontWeight = m, E.fontStyle = N, E.tspan = this.svgFactory.createElement("svg:tspan"), E.tspan.setAttributeNS(null, "y", C(-E.y)), E.xcoords = [], E.ycoords = [];
- }
- endText() {
- var E;
- const p = this.current;
- p.textRenderingMode & s.TextRenderingMode.ADD_TO_PATH_FLAG && ((E = p.txtElement) != null && E.hasChildNodes()) && (p.element = p.txtElement, this.clip("nonzero"), this.endPath());
- }
- setLineWidth(p) {
- p > 0 && (this.current.lineWidth = p);
- }
- setLineCap(p) {
- this.current.lineCap = c[p];
- }
- setLineJoin(p) {
- this.current.lineJoin = o[p];
- }
- setMiterLimit(p) {
- this.current.miterLimit = p;
- }
- setStrokeAlpha(p) {
- this.current.strokeAlpha = p;
- }
- setStrokeRGBColor(p, E, $) {
- this.current.strokeColor = s.Util.makeHexColor(p, E, $);
- }
- setFillAlpha(p) {
- this.current.fillAlpha = p;
- }
- setFillRGBColor(p, E, $) {
- this.current.fillColor = s.Util.makeHexColor(p, E, $), this.current.tspan = this.svgFactory.createElement("svg:tspan"), this.current.xcoords = [], this.current.ycoords = [];
- }
- setStrokeColorN(p) {
- this.current.strokeColor = this._makeColorN_Pattern(p);
- }
- setFillColorN(p) {
- this.current.fillColor = this._makeColorN_Pattern(p);
- }
- shadingFill(p) {
- const {
- width: E,
- height: $
- } = this.viewport, M = s.Util.inverseTransform(this.transformMatrix), [m, N, D, X] = s.Util.getAxialAlignedBoundingBox([0, 0, E, $], M), G = this.svgFactory.createElement("svg:rect");
- G.setAttributeNS(null, "x", m), G.setAttributeNS(null, "y", N), G.setAttributeNS(null, "width", D - m), G.setAttributeNS(null, "height", X - N), G.setAttributeNS(null, "fill", this._makeShadingPattern(p)), this.current.fillAlpha < 1 && G.setAttributeNS(null, "fill-opacity", this.current.fillAlpha), this._ensureTransformGroup().append(G);
- }
- _makeColorN_Pattern(p) {
- return p[0] === "TilingPattern" ? this._makeTilingPattern(p) : this._makeShadingPattern(p);
- }
- _makeTilingPattern(p) {
- const E = p[1], $ = p[2], M = p[3] || s.IDENTITY_MATRIX, [m, N, D, X] = p[4], G = p[5], I = p[6], B = p[7], ee = `shading${F++}`, [Y, q, le, pe] = s.Util.normalizeRect([...s.Util.applyTransform([m, N], M), ...s.Util.applyTransform([D, X], M)]), [we, be] = s.Util.singularValueDecompose2dScale(M), R = G * we, d = I * be, g = this.svgFactory.createElement("svg:pattern");
- g.setAttributeNS(null, "id", ee), g.setAttributeNS(null, "patternUnits", "userSpaceOnUse"), g.setAttributeNS(null, "width", R), g.setAttributeNS(null, "height", d), g.setAttributeNS(null, "x", `${Y}`), g.setAttributeNS(null, "y", `${q}`);
- const f = this.svg, v = this.transformMatrix, A = this.current.fillColor, O = this.current.strokeColor, H = this.svgFactory.create(le - Y, pe - q);
- if (this.svg = H, this.transformMatrix = M, B === 2) {
- const z = s.Util.makeHexColor(...E);
- this.current.fillColor = z, this.current.strokeColor = z;
- }
- return this.executeOpTree(this.convertOpList($)), this.svg = f, this.transformMatrix = v, this.current.fillColor = A, this.current.strokeColor = O, g.append(H.childNodes[0]), this.defs.append(g), `url(#${ee})`;
- }
- _makeShadingPattern(p) {
- switch (typeof p == "string" && (p = this.objs.get(p)), p[0]) {
- case "RadialAxial":
- const E = `shading${F++}`, $ = p[3];
- let M;
- switch (p[1]) {
- case "axial":
- const m = p[4], N = p[5];
- M = this.svgFactory.createElement("svg:linearGradient"), M.setAttributeNS(null, "id", E), M.setAttributeNS(null, "gradientUnits", "userSpaceOnUse"), M.setAttributeNS(null, "x1", m[0]), M.setAttributeNS(null, "y1", m[1]), M.setAttributeNS(null, "x2", N[0]), M.setAttributeNS(null, "y2", N[1]);
- break;
- case "radial":
- const D = p[4], X = p[5], G = p[6], I = p[7];
- M = this.svgFactory.createElement("svg:radialGradient"), M.setAttributeNS(null, "id", E), M.setAttributeNS(null, "gradientUnits", "userSpaceOnUse"), M.setAttributeNS(null, "cx", X[0]), M.setAttributeNS(null, "cy", X[1]), M.setAttributeNS(null, "r", I), M.setAttributeNS(null, "fx", D[0]), M.setAttributeNS(null, "fy", D[1]), M.setAttributeNS(null, "fr", G);
- break;
- default:
- throw new Error(`Unknown RadialAxial type: ${p[1]}`);
- }
- for (const m of $) {
- const N = this.svgFactory.createElement("svg:stop");
- N.setAttributeNS(null, "offset", m[0]), N.setAttributeNS(null, "stop-color", m[1]), M.append(N);
- }
- return this.defs.append(M), `url(#${E})`;
- case "Mesh":
- return (0, s.warn)("Unimplemented pattern Mesh"), null;
- case "Dummy":
- return "hotpink";
- default:
- throw new Error(`Unknown IR type: ${p[0]}`);
- }
- }
- setDash(p, E) {
- this.current.dashArray = p, this.current.dashPhase = E;
- }
- constructPath(p, E) {
- const $ = this.current;
- let M = $.x, m = $.y, N = [], D = 0;
- for (const X of p)
- switch (X | 0) {
- case s.OPS.rectangle:
- M = E[D++], m = E[D++];
- const G = E[D++], I = E[D++], B = M + G, ee = m + I;
- N.push("M", C(M), C(m), "L", C(B), C(m), "L", C(B), C(ee), "L", C(M), C(ee), "Z");
- break;
- case s.OPS.moveTo:
- M = E[D++], m = E[D++], N.push("M", C(M), C(m));
- break;
- case s.OPS.lineTo:
- M = E[D++], m = E[D++], N.push("L", C(M), C(m));
- break;
- case s.OPS.curveTo:
- M = E[D + 4], m = E[D + 5], N.push("C", C(E[D]), C(E[D + 1]), C(E[D + 2]), C(E[D + 3]), C(M), C(m)), D += 6;
- break;
- case s.OPS.curveTo2:
- N.push("C", C(M), C(m), C(E[D]), C(E[D + 1]), C(E[D + 2]), C(E[D + 3])), M = E[D + 2], m = E[D + 3], D += 4;
- break;
- case s.OPS.curveTo3:
- M = E[D + 2], m = E[D + 3], N.push("C", C(E[D]), C(E[D + 1]), C(M), C(m), C(M), C(m)), D += 4;
- break;
- case s.OPS.closePath:
- N.push("Z");
- break;
- }
- N = N.join(" "), $.path && p.length > 0 && p[0] !== s.OPS.rectangle && p[0] !== s.OPS.moveTo ? N = $.path.getAttributeNS(null, "d") + N : ($.path = this.svgFactory.createElement("svg:path"), this._ensureTransformGroup().append($.path)), $.path.setAttributeNS(null, "d", N), $.path.setAttributeNS(null, "fill", "none"), $.element = $.path, $.setCurrentPoint(M, m);
- }
- endPath() {
- const p = this.current;
- if (p.path = null, !this.pendingClip)
- return;
- if (!p.element) {
- this.pendingClip = null;
- return;
- }
- const E = `clippath${b++}`, $ = this.svgFactory.createElement("svg:clipPath");
- $.setAttributeNS(null, "id", E), $.setAttributeNS(null, "transform", P(this.transformMatrix));
- const M = p.element.cloneNode(!0);
- if (this.pendingClip === "evenodd" ? M.setAttributeNS(null, "clip-rule", "evenodd") : M.setAttributeNS(null, "clip-rule", "nonzero"), this.pendingClip = null, $.append(M), this.defs.append($), p.activeClipUrl) {
- p.clipGroup = null;
- for (const m of this.extraStack)
- m.clipGroup = null;
- $.setAttributeNS(null, "clip-path", p.activeClipUrl);
- }
- p.activeClipUrl = `url(#${E})`, this.tgrp = null;
- }
- clip(p) {
- this.pendingClip = p;
- }
- closePath() {
- const p = this.current;
- if (p.path) {
- const E = `${p.path.getAttributeNS(null, "d")}Z`;
- p.path.setAttributeNS(null, "d", E);
- }
- }
- setLeading(p) {
- this.current.leading = -p;
- }
- setTextRise(p) {
- this.current.textRise = p;
- }
- setTextRenderingMode(p) {
- this.current.textRenderingMode = p;
- }
- setHScale(p) {
- this.current.textHScale = p / 100;
- }
- setRenderingIntent(p) {
- }
- setFlatness(p) {
- }
- setGState(p) {
- for (const [E, $] of p)
- switch (E) {
- case "LW":
- this.setLineWidth($);
- break;
- case "LC":
- this.setLineCap($);
- break;
- case "LJ":
- this.setLineJoin($);
- break;
- case "ML":
- this.setMiterLimit($);
- break;
- case "D":
- this.setDash($[0], $[1]);
- break;
- case "RI":
- this.setRenderingIntent($);
- break;
- case "FL":
- this.setFlatness($);
- break;
- case "Font":
- this.setFont($);
- break;
- case "CA":
- this.setStrokeAlpha($);
- break;
- case "ca":
- this.setFillAlpha($);
- break;
- default:
- (0, s.warn)(`Unimplemented graphic state operator ${E}`);
- break;
- }
- }
- fill() {
- const p = this.current;
- p.element && (p.element.setAttributeNS(null, "fill", p.fillColor), p.element.setAttributeNS(null, "fill-opacity", p.fillAlpha), this.endPath());
- }
- stroke() {
- const p = this.current;
- p.element && (this._setStrokeAttributes(p.element), p.element.setAttributeNS(null, "fill", "none"), this.endPath());
- }
- _setStrokeAttributes(p, E = 1) {
- const $ = this.current;
- let M = $.dashArray;
- E !== 1 && M.length > 0 && (M = M.map(function(m) {
- return E * m;
- })), p.setAttributeNS(null, "stroke", $.strokeColor), p.setAttributeNS(null, "stroke-opacity", $.strokeAlpha), p.setAttributeNS(null, "stroke-miterlimit", C($.miterLimit)), p.setAttributeNS(null, "stroke-linecap", $.lineCap), p.setAttributeNS(null, "stroke-linejoin", $.lineJoin), p.setAttributeNS(null, "stroke-width", C(E * $.lineWidth) + "px"), p.setAttributeNS(null, "stroke-dasharray", M.map(C).join(" ")), p.setAttributeNS(null, "stroke-dashoffset", C(E * $.dashPhase) + "px");
- }
- eoFill() {
- var p;
- (p = this.current.element) == null || p.setAttributeNS(null, "fill-rule", "evenodd"), this.fill();
- }
- fillStroke() {
- this.stroke(), this.fill();
- }
- eoFillStroke() {
- var p;
- (p = this.current.element) == null || p.setAttributeNS(null, "fill-rule", "evenodd"), this.fillStroke();
- }
- closeStroke() {
- this.closePath(), this.stroke();
- }
- closeFillStroke() {
- this.closePath(), this.fillStroke();
- }
- closeEOFillStroke() {
- this.closePath(), this.eoFillStroke();
- }
- paintSolidColorImageMask() {
- const p = this.svgFactory.createElement("svg:rect");
- p.setAttributeNS(null, "x", "0"), p.setAttributeNS(null, "y", "0"), p.setAttributeNS(null, "width", "1px"), p.setAttributeNS(null, "height", "1px"), p.setAttributeNS(null, "fill", this.current.fillColor), this._ensureTransformGroup().append(p);
- }
- paintImageXObject(p) {
- const E = this.getObject(p);
- if (!E) {
- (0, s.warn)(`Dependent image with object ID ${p} is not ready yet`);
- return;
- }
- this.paintInlineImageXObject(E);
- }
- paintInlineImageXObject(p, E) {
- const $ = p.width, M = p.height, m = T(p, this.forceDataSchema, !!E), N = this.svgFactory.createElement("svg:rect");
- N.setAttributeNS(null, "x", "0"), N.setAttributeNS(null, "y", "0"), N.setAttributeNS(null, "width", C($)), N.setAttributeNS(null, "height", C(M)), this.current.element = N, this.clip("nonzero");
- const D = this.svgFactory.createElement("svg:image");
- D.setAttributeNS(_, "xlink:href", m), D.setAttributeNS(null, "x", "0"), D.setAttributeNS(null, "y", C(-M)), D.setAttributeNS(null, "width", C($) + "px"), D.setAttributeNS(null, "height", C(M) + "px"), D.setAttributeNS(null, "transform", `scale(${C(1 / $)} ${C(-1 / M)})`), E ? E.append(D) : this._ensureTransformGroup().append(D);
- }
- paintImageMaskXObject(p) {
- const E = this.getObject(p.data, p);
- if (E.bitmap) {
- (0, s.warn)("paintImageMaskXObject: ImageBitmap support is not implemented, ensure that the `isOffscreenCanvasSupported` API parameter is disabled.");
- return;
- }
- const $ = this.current, M = E.width, m = E.height, N = $.fillColor;
- $.maskId = `mask${k++}`;
- const D = this.svgFactory.createElement("svg:mask");
- D.setAttributeNS(null, "id", $.maskId);
- const X = this.svgFactory.createElement("svg:rect");
- X.setAttributeNS(null, "x", "0"), X.setAttributeNS(null, "y", "0"), X.setAttributeNS(null, "width", C(M)), X.setAttributeNS(null, "height", C(m)), X.setAttributeNS(null, "fill", N), X.setAttributeNS(null, "mask", `url(#${$.maskId})`), this.defs.append(D), this._ensureTransformGroup().append(X), this.paintInlineImageXObject(E, D);
- }
- paintFormXObjectBegin(p, E) {
- if (Array.isArray(p) && p.length === 6 && this.transform(p[0], p[1], p[2], p[3], p[4], p[5]), E) {
- const $ = E[2] - E[0], M = E[3] - E[1], m = this.svgFactory.createElement("svg:rect");
- m.setAttributeNS(null, "x", E[0]), m.setAttributeNS(null, "y", E[1]), m.setAttributeNS(null, "width", C($)), m.setAttributeNS(null, "height", C(M)), this.current.element = m, this.clip("nonzero"), this.endPath();
- }
- }
- paintFormXObjectEnd() {
- }
- _initialize(p) {
- const E = this.svgFactory.create(p.width, p.height), $ = this.svgFactory.createElement("svg:defs");
- E.append($), this.defs = $;
- const M = this.svgFactory.createElement("svg:g");
- return M.setAttributeNS(null, "transform", P(p.transform)), E.append(M), this.svg = M, E;
- }
- _ensureClipGroup() {
- if (!this.current.clipGroup) {
- const p = this.svgFactory.createElement("svg:g");
- p.setAttributeNS(null, "clip-path", this.current.activeClipUrl), this.svg.append(p), this.current.clipGroup = p;
- }
- return this.current.clipGroup;
- }
- _ensureTransformGroup() {
- return this.tgrp || (this.tgrp = this.svgFactory.createElement("svg:g"), this.tgrp.setAttributeNS(null, "transform", P(this.transformMatrix)), this.current.activeClipUrl ? this._ensureClipGroup().append(this.tgrp) : this.svg.append(this.tgrp)), this.tgrp;
- }
- }
- e.SVGGraphics = x;
- },
- /* 25 */
- /***/
- (t, e) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.XfaText = void 0;
- class i {
- static textContent(s) {
- const l = [], h = {
- items: l,
- styles: /* @__PURE__ */ Object.create(null)
- };
- function _(c) {
- var T;
- if (!c)
- return;
- let o = null;
- const r = c.name;
- if (r === "#text")
- o = c.value;
- else if (i.shouldBuildText(r))
- (T = c == null ? void 0 : c.attributes) != null && T.textContent ? o = c.attributes.textContent : c.value && (o = c.value);
- else
- return;
- if (o !== null && l.push({
- str: o
- }), !!c.children)
- for (const S of c.children)
- _(S);
- }
- return _(s), h;
- }
- static shouldBuildText(s) {
- return !(s === "textarea" || s === "input" || s === "option" || s === "select");
- }
- }
- e.XfaText = i;
- },
- /* 26 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.TextLayerRenderTask = void 0, e.renderTextLayer = P, e.updateTextLayer = b;
- var n = i(1), s = i(6);
- const l = 1e5, h = 30, _ = 0.8, c = /* @__PURE__ */ new Map();
- function o(k, F) {
- let x;
- if (F && n.FeatureTest.isOffscreenCanvasSupported)
- x = new OffscreenCanvas(k, k).getContext("2d", {
- alpha: !1
- });
- else {
- const y = document.createElement("canvas");
- y.width = y.height = k, x = y.getContext("2d", {
- alpha: !1
- });
- }
- return x;
- }
- function r(k, F) {
- const x = c.get(k);
- if (x)
- return x;
- const y = o(h, F);
- y.font = `${h}px ${k}`;
- const p = y.measureText("");
- let E = p.fontBoundingBoxAscent, $ = Math.abs(p.fontBoundingBoxDescent);
- if (E) {
- const m = E / (E + $);
- return c.set(k, m), y.canvas.width = y.canvas.height = 0, m;
- }
- y.strokeStyle = "red", y.clearRect(0, 0, h, h), y.strokeText("g", 0, 0);
- let M = y.getImageData(0, 0, h, h).data;
- $ = 0;
- for (let m = M.length - 1 - 3; m >= 0; m -= 4)
- if (M[m] > 0) {
- $ = Math.ceil(m / 4 / h);
- break;
- }
- y.clearRect(0, 0, h, h), y.strokeText("A", 0, h), M = y.getImageData(0, 0, h, h).data, E = 0;
- for (let m = 0, N = M.length; m < N; m += 4)
- if (M[m] > 0) {
- E = h - Math.floor(m / 4 / h);
- break;
- }
- if (y.canvas.width = y.canvas.height = 0, E) {
- const m = E / (E + $);
- return c.set(k, m), m;
- }
- return c.set(k, _), _;
- }
- function T(k, F, x) {
- const y = document.createElement("span"), p = {
- angle: 0,
- canvasWidth: 0,
- hasText: F.str !== "",
- hasEOL: F.hasEOL,
- fontSize: 0
- };
- k._textDivs.push(y);
- const E = n.Util.transform(k._transform, F.transform);
- let $ = Math.atan2(E[1], E[0]);
- const M = x[F.fontName];
- M.vertical && ($ += Math.PI / 2);
- const m = Math.hypot(E[2], E[3]), N = m * r(M.fontFamily, k._isOffscreenCanvasSupported);
- let D, X;
- $ === 0 ? (D = E[4], X = E[5] - N) : (D = E[4] + N * Math.sin($), X = E[5] - N * Math.cos($));
- const G = "calc(var(--scale-factor)*", I = y.style;
- k._container === k._rootContainer ? (I.left = `${(100 * D / k._pageWidth).toFixed(2)}%`, I.top = `${(100 * X / k._pageHeight).toFixed(2)}%`) : (I.left = `${G}${D.toFixed(2)}px)`, I.top = `${G}${X.toFixed(2)}px)`), I.fontSize = `${G}${m.toFixed(2)}px)`, I.fontFamily = M.fontFamily, p.fontSize = m, y.setAttribute("role", "presentation"), y.textContent = F.str, y.dir = F.dir, k._fontInspectorEnabled && (y.dataset.fontName = F.fontName), $ !== 0 && (p.angle = $ * (180 / Math.PI));
- let B = !1;
- if (F.str.length > 1)
- B = !0;
- else if (F.str !== " " && F.transform[0] !== F.transform[3]) {
- const ee = Math.abs(F.transform[0]), Y = Math.abs(F.transform[3]);
- ee !== Y && Math.max(ee, Y) / Math.min(ee, Y) > 1.5 && (B = !0);
- }
- B && (p.canvasWidth = M.vertical ? F.height : F.width), k._textDivProperties.set(y, p), k._isReadableStream && k._layoutText(y);
- }
- function S(k) {
- const {
- div: F,
- scale: x,
- properties: y,
- ctx: p,
- prevFontSize: E,
- prevFontFamily: $
- } = k, {
- style: M
- } = F;
- let m = "";
- if (y.canvasWidth !== 0 && y.hasText) {
- const {
- fontFamily: N
- } = M, {
- canvasWidth: D,
- fontSize: X
- } = y;
- (E !== X || $ !== N) && (p.font = `${X * x}px ${N}`, k.prevFontSize = X, k.prevFontFamily = N);
- const {
- width: G
- } = p.measureText(F.textContent);
- G > 0 && (m = `scaleX(${D * x / G})`);
- }
- y.angle !== 0 && (m = `rotate(${y.angle}deg) ${m}`), m.length > 0 && (M.transform = m);
- }
- function w(k) {
- if (k._canceled)
- return;
- const F = k._textDivs, x = k._capability;
- if (F.length > l) {
- x.resolve();
- return;
- }
- if (!k._isReadableStream)
- for (const p of F)
- k._layoutText(p);
- x.resolve();
- }
- class C {
- constructor({
- textContentSource: F,
- container: x,
- viewport: y,
- textDivs: p,
- textDivProperties: E,
- textContentItemsStr: $,
- isOffscreenCanvasSupported: M
- }) {
- var G;
- this._textContentSource = F, this._isReadableStream = F instanceof ReadableStream, this._container = this._rootContainer = x, this._textDivs = p || [], this._textContentItemsStr = $ || [], this._isOffscreenCanvasSupported = M, this._fontInspectorEnabled = !!((G = globalThis.FontInspector) != null && G.enabled), this._reader = null, this._textDivProperties = E || /* @__PURE__ */ new WeakMap(), this._canceled = !1, this._capability = new n.PromiseCapability(), this._layoutTextParams = {
- prevFontSize: null,
- prevFontFamily: null,
- div: null,
- scale: y.scale * (globalThis.devicePixelRatio || 1),
- properties: null,
- ctx: o(0, M)
- };
- const {
- pageWidth: m,
- pageHeight: N,
- pageX: D,
- pageY: X
- } = y.rawDims;
- this._transform = [1, 0, 0, -1, -D, X + N], this._pageWidth = m, this._pageHeight = N, (0, s.setLayerDimensions)(x, y), this._capability.promise.finally(() => {
- this._layoutTextParams = null;
- }).catch(() => {
- });
- }
- get promise() {
- return this._capability.promise;
- }
- cancel() {
- this._canceled = !0, this._reader && (this._reader.cancel(new n.AbortException("TextLayer task cancelled.")).catch(() => {
- }), this._reader = null), this._capability.reject(new n.AbortException("TextLayer task cancelled."));
- }
- _processItems(F, x) {
- for (const y of F) {
- if (y.str === void 0) {
- if (y.type === "beginMarkedContentProps" || y.type === "beginMarkedContent") {
- const p = this._container;
- this._container = document.createElement("span"), this._container.classList.add("markedContent"), y.id !== null && this._container.setAttribute("id", `${y.id}`), p.append(this._container);
- } else
- y.type === "endMarkedContent" && (this._container = this._container.parentNode);
- continue;
- }
- this._textContentItemsStr.push(y.str), T(this, y, x);
- }
- }
- _layoutText(F) {
- const x = this._layoutTextParams.properties = this._textDivProperties.get(F);
- if (this._layoutTextParams.div = F, S(this._layoutTextParams), x.hasText && this._container.append(F), x.hasEOL) {
- const y = document.createElement("br");
- y.setAttribute("role", "presentation"), this._container.append(y);
- }
- }
- _render() {
- const F = new n.PromiseCapability();
- let x = /* @__PURE__ */ Object.create(null);
- if (this._isReadableStream) {
- const y = () => {
- this._reader.read().then(({
- value: p,
- done: E
- }) => {
- if (E) {
- F.resolve();
- return;
- }
- Object.assign(x, p.styles), this._processItems(p.items, x), y();
- }, F.reject);
- };
- this._reader = this._textContentSource.getReader(), y();
- } else if (this._textContentSource) {
- const {
- items: y,
- styles: p
- } = this._textContentSource;
- this._processItems(y, p), F.resolve();
- } else
- throw new Error('No "textContentSource" parameter specified.');
- F.promise.then(() => {
- x = null, w(this);
- }, this._capability.reject);
- }
- }
- e.TextLayerRenderTask = C;
- function P(k) {
- !k.textContentSource && (k.textContent || k.textContentStream) && ((0, s.deprecated)("The TextLayerRender `textContent`/`textContentStream` parameters will be removed in the future, please use `textContentSource` instead."), k.textContentSource = k.textContent || k.textContentStream);
- const {
- container: F,
- viewport: x
- } = k, y = getComputedStyle(F), p = y.getPropertyValue("visibility"), E = parseFloat(y.getPropertyValue("--scale-factor"));
- p === "visible" && (!E || Math.abs(E - x.scale) > 1e-5) && console.error("The `--scale-factor` CSS-variable must be set, to the same value as `viewport.scale`, either on the `container`-element itself or higher up in the DOM.");
- const $ = new C(k);
- return $._render(), $;
- }
- function b({
- container: k,
- viewport: F,
- textDivs: x,
- textDivProperties: y,
- isOffscreenCanvasSupported: p,
- mustRotate: E = !0,
- mustRescale: $ = !0
- }) {
- if (E && (0, s.setLayerDimensions)(k, {
- rotation: F.rotation
- }), $) {
- const M = o(0, p), N = {
- prevFontSize: null,
- prevFontFamily: null,
- div: null,
- scale: F.scale * (globalThis.devicePixelRatio || 1),
- properties: null,
- ctx: M
- };
- for (const D of x)
- N.properties = y.get(D), N.div = D, S(N);
- }
- }
- },
- /* 27 */
- /***/
- (t, e, i) => {
- var r, T, S, w, C, P, b, k, F, x, y, nn, E, Rt, M, sn, N, rn;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.AnnotationEditorLayer = void 0;
- var n = i(1), s = i(4), l = i(28), h = i(33), _ = i(6), c = i(34);
- const X = class X {
- constructor({
- uiManager: I,
- pageIndex: B,
- div: ee,
- accessibilityManager: Y,
- annotationLayer: q,
- viewport: le,
- l10n: pe
- }) {
- W(this, y);
- W(this, E);
- W(this, M);
- W(this, N);
- W(this, r, void 0);
- W(this, T, !1);
- W(this, S, null);
- W(this, w, this.pointerup.bind(this));
- W(this, C, this.pointerdown.bind(this));
- W(this, P, /* @__PURE__ */ new Map());
- W(this, b, !1);
- W(this, k, !1);
- W(this, F, !1);
- W(this, x, void 0);
- const we = [l.FreeTextEditor, h.InkEditor, c.StampEditor];
- if (!X._initialized) {
- X._initialized = !0;
- for (const be of we)
- be.initialize(pe);
- }
- I.registerEditorTypes(we), oe(this, x, I), this.pageIndex = B, this.div = ee, oe(this, r, Y), oe(this, S, q), this.viewport = le, a(this, x).addLayer(this);
- }
- get isEmpty() {
- return a(this, P).size === 0;
- }
- updateToolbar(I) {
- a(this, x).updateToolbar(I);
- }
- updateMode(I = a(this, x).getMode()) {
- K(this, N, rn).call(this), I === n.AnnotationEditorType.INK ? (this.addInkEditorIfNeeded(!1), this.disableClick()) : this.enableClick(), I !== n.AnnotationEditorType.NONE && (this.div.classList.toggle("freeTextEditing", I === n.AnnotationEditorType.FREETEXT), this.div.classList.toggle("inkEditing", I === n.AnnotationEditorType.INK), this.div.classList.toggle("stampEditing", I === n.AnnotationEditorType.STAMP), this.div.hidden = !1);
- }
- addInkEditorIfNeeded(I) {
- if (!I && a(this, x).getMode() !== n.AnnotationEditorType.INK)
- return;
- if (!I) {
- for (const ee of a(this, P).values())
- if (ee.isEmpty()) {
- ee.setInBackground();
- return;
- }
- }
- K(this, E, Rt).call(this, {
- offsetX: 0,
- offsetY: 0
- }, !1).setInBackground();
- }
- setEditingState(I) {
- a(this, x).setEditingState(I);
- }
- addCommands(I) {
- a(this, x).addCommands(I);
- }
- enable() {
- this.div.style.pointerEvents = "auto";
- const I = /* @__PURE__ */ new Set();
- for (const ee of a(this, P).values())
- ee.enableEditing(), ee.annotationElementId && I.add(ee.annotationElementId);
- if (!a(this, S))
- return;
- const B = a(this, S).getEditableAnnotations();
- for (const ee of B) {
- if (ee.hide(), a(this, x).isDeletedAnnotationElement(ee.data.id) || I.has(ee.data.id))
- continue;
- const Y = this.deserialize(ee);
- Y && (this.addOrRebuild(Y), Y.enableEditing());
- }
- }
- disable() {
- var B;
- oe(this, F, !0), this.div.style.pointerEvents = "none";
- const I = /* @__PURE__ */ new Set();
- for (const ee of a(this, P).values()) {
- if (ee.disableEditing(), !ee.annotationElementId || ee.serialize() !== null) {
- I.add(ee.annotationElementId);
- continue;
- }
- (B = this.getEditableAnnotation(ee.annotationElementId)) == null || B.show(), ee.remove();
- }
- if (a(this, S)) {
- const ee = a(this, S).getEditableAnnotations();
- for (const Y of ee) {
- const {
- id: q
- } = Y.data;
- I.has(q) || a(this, x).isDeletedAnnotationElement(q) || Y.show();
- }
- }
- K(this, N, rn).call(this), this.isEmpty && (this.div.hidden = !0), oe(this, F, !1);
- }
- getEditableAnnotation(I) {
- var B;
- return ((B = a(this, S)) == null ? void 0 : B.getEditableAnnotation(I)) || null;
- }
- setActiveEditor(I) {
- a(this, x).getActive() !== I && a(this, x).setActiveEditor(I);
- }
- enableClick() {
- this.div.addEventListener("pointerdown", a(this, C)), this.div.addEventListener("pointerup", a(this, w));
- }
- disableClick() {
- this.div.removeEventListener("pointerdown", a(this, C)), this.div.removeEventListener("pointerup", a(this, w));
- }
- attach(I) {
- a(this, P).set(I.id, I);
- const {
- annotationElementId: B
- } = I;
- B && a(this, x).isDeletedAnnotationElement(B) && a(this, x).removeDeletedAnnotationElement(I);
- }
- detach(I) {
- var B;
- a(this, P).delete(I.id), (B = a(this, r)) == null || B.removePointerInTextLayer(I.contentDiv), !a(this, F) && I.annotationElementId && a(this, x).addDeletedAnnotationElement(I);
- }
- remove(I) {
- this.detach(I), a(this, x).removeEditor(I), I.div.contains(document.activeElement) && setTimeout(() => {
- a(this, x).focusMainContainer();
- }, 0), I.div.remove(), I.isAttachedToDOM = !1, a(this, k) || this.addInkEditorIfNeeded(!1);
- }
- changeParent(I) {
- var B;
- I.parent !== this && (I.annotationElementId && (a(this, x).addDeletedAnnotationElement(I.annotationElementId), s.AnnotationEditor.deleteAnnotationElement(I), I.annotationElementId = null), this.attach(I), (B = I.parent) == null || B.detach(I), I.setParent(this), I.div && I.isAttachedToDOM && (I.div.remove(), this.div.append(I.div)));
- }
- add(I) {
- if (this.changeParent(I), a(this, x).addEditor(I), this.attach(I), !I.isAttachedToDOM) {
- const B = I.render();
- this.div.append(B), I.isAttachedToDOM = !0;
- }
- I.fixAndSetPosition(), I.onceAdded(), a(this, x).addToAnnotationStorage(I);
- }
- moveEditorInDOM(I) {
- var ee;
- if (!I.isAttachedToDOM)
- return;
- const {
- activeElement: B
- } = document;
- I.div.contains(B) && (I._focusEventsAllowed = !1, setTimeout(() => {
- I.div.contains(document.activeElement) ? I._focusEventsAllowed = !0 : (I.div.addEventListener("focusin", () => {
- I._focusEventsAllowed = !0;
- }, {
- once: !0
- }), B.focus());
- }, 0)), I._structTreeParentId = (ee = a(this, r)) == null ? void 0 : ee.moveElementInDOM(this.div, I.div, I.contentDiv, !0);
- }
- addOrRebuild(I) {
- I.needsToBeRebuilt() ? I.rebuild() : this.add(I);
- }
- addUndoableEditor(I) {
- const B = () => I._uiManager.rebuild(I), ee = () => {
- I.remove();
- };
- this.addCommands({
- cmd: B,
- undo: ee,
- mustExec: !1
- });
- }
- getNextId() {
- return a(this, x).getId();
- }
- pasteEditor(I, B) {
- a(this, x).updateToolbar(I), a(this, x).updateMode(I);
- const {
- offsetX: ee,
- offsetY: Y
- } = K(this, M, sn).call(this), q = this.getNextId(), le = K(this, y, nn).call(this, {
- parent: this,
- id: q,
- x: ee,
- y: Y,
- uiManager: a(this, x),
- isCentered: !0,
- ...B
- });
- le && this.add(le);
- }
- deserialize(I) {
- switch (I.annotationType ?? I.annotationEditorType) {
- case n.AnnotationEditorType.FREETEXT:
- return l.FreeTextEditor.deserialize(I, this, a(this, x));
- case n.AnnotationEditorType.INK:
- return h.InkEditor.deserialize(I, this, a(this, x));
- case n.AnnotationEditorType.STAMP:
- return c.StampEditor.deserialize(I, this, a(this, x));
- }
- return null;
- }
- addNewEditor() {
- K(this, E, Rt).call(this, K(this, M, sn).call(this), !0);
- }
- setSelected(I) {
- a(this, x).setSelected(I);
- }
- toggleSelected(I) {
- a(this, x).toggleSelected(I);
- }
- isSelected(I) {
- return a(this, x).isSelected(I);
- }
- unselect(I) {
- a(this, x).unselect(I);
- }
- pointerup(I) {
- const {
- isMac: B
- } = n.FeatureTest.platform;
- if (!(I.button !== 0 || I.ctrlKey && B) && I.target === this.div && a(this, b)) {
- if (oe(this, b, !1), !a(this, T)) {
- oe(this, T, !0);
- return;
- }
- if (a(this, x).getMode() === n.AnnotationEditorType.STAMP) {
- a(this, x).unselectAll();
- return;
- }
- K(this, E, Rt).call(this, I, !1);
- }
- }
- pointerdown(I) {
- if (a(this, b)) {
- oe(this, b, !1);
- return;
- }
- const {
- isMac: B
- } = n.FeatureTest.platform;
- if (I.button !== 0 || I.ctrlKey && B || I.target !== this.div)
- return;
- oe(this, b, !0);
- const ee = a(this, x).getActive();
- oe(this, T, !ee || ee.isEmpty());
- }
- findNewParent(I, B, ee) {
- const Y = a(this, x).findParent(B, ee);
- return Y === null || Y === this ? !1 : (Y.changeParent(I), !0);
- }
- destroy() {
- var I, B;
- ((I = a(this, x).getActive()) == null ? void 0 : I.parent) === this && (a(this, x).commitOrRemove(), a(this, x).setActiveEditor(null));
- for (const ee of a(this, P).values())
- (B = a(this, r)) == null || B.removePointerInTextLayer(ee.contentDiv), ee.setParent(null), ee.isAttachedToDOM = !1, ee.div.remove();
- this.div = null, a(this, P).clear(), a(this, x).removeLayer(this);
- }
- render({
- viewport: I
- }) {
- this.viewport = I, (0, _.setLayerDimensions)(this.div, I);
- for (const B of a(this, x).getEditors(this.pageIndex))
- this.add(B);
- this.updateMode();
- }
- update({
- viewport: I
- }) {
- a(this, x).commitOrRemove(), this.viewport = I, (0, _.setLayerDimensions)(this.div, {
- rotation: I.rotation
- }), this.updateMode();
- }
- get pageDimensions() {
- const {
- pageWidth: I,
- pageHeight: B
- } = this.viewport.rawDims;
- return [I, B];
- }
- };
- r = new WeakMap(), T = new WeakMap(), S = new WeakMap(), w = new WeakMap(), C = new WeakMap(), P = new WeakMap(), b = new WeakMap(), k = new WeakMap(), F = new WeakMap(), x = new WeakMap(), y = new WeakSet(), nn = function(I) {
- switch (a(this, x).getMode()) {
- case n.AnnotationEditorType.FREETEXT:
- return new l.FreeTextEditor(I);
- case n.AnnotationEditorType.INK:
- return new h.InkEditor(I);
- case n.AnnotationEditorType.STAMP:
- return new c.StampEditor(I);
- }
- return null;
- }, E = new WeakSet(), Rt = function(I, B) {
- const ee = this.getNextId(), Y = K(this, y, nn).call(this, {
- parent: this,
- id: ee,
- x: I.offsetX,
- y: I.offsetY,
- uiManager: a(this, x),
- isCentered: B
- });
- return Y && this.add(Y), Y;
- }, M = new WeakSet(), sn = function() {
- const {
- x: I,
- y: B,
- width: ee,
- height: Y
- } = this.div.getBoundingClientRect(), q = Math.max(0, I), le = Math.max(0, B), pe = Math.min(window.innerWidth, I + ee), we = Math.min(window.innerHeight, B + Y), be = (q + pe) / 2 - I, R = (le + we) / 2 - B, [d, g] = this.viewport.rotation % 180 === 0 ? [be, R] : [R, be];
- return {
- offsetX: d,
- offsetY: g
- };
- }, N = new WeakSet(), rn = function() {
- oe(this, k, !0);
- for (const I of a(this, P).values())
- I.isEmpty() && I.remove();
- oe(this, k, !1);
- }, nt(X, "_initialized", !1);
- let o = X;
- e.AnnotationEditorLayer = o;
- },
- /* 28 */
- /***/
- (t, e, i) => {
- var c, o, r, T, S, w, C, P, b, k, In, x, Ln, p, Dn, $, At, m, an, D, On, G, on;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.FreeTextEditor = void 0;
- var n = i(1), s = i(5), l = i(4), h = i(29);
- const B = class B extends l.AnnotationEditor {
- constructor(q) {
- super({
- ...q,
- name: "freeTextEditor"
- });
- W(this, k);
- W(this, x);
- W(this, p);
- W(this, $);
- W(this, m);
- W(this, D);
- W(this, G);
- W(this, c, this.editorDivBlur.bind(this));
- W(this, o, this.editorDivFocus.bind(this));
- W(this, r, this.editorDivInput.bind(this));
- W(this, T, this.editorDivKeydown.bind(this));
- W(this, S, void 0);
- W(this, w, "");
- W(this, C, `${this.id}-editor`);
- W(this, P, void 0);
- W(this, b, null);
- oe(this, S, q.color || B._defaultColor || l.AnnotationEditor._defaultLineColor), oe(this, P, q.fontSize || B._defaultFontSize);
- }
- static get _keyboardManager() {
- const q = B.prototype, le = (be) => be.isEmpty(), pe = s.AnnotationEditorUIManager.TRANSLATE_SMALL, we = s.AnnotationEditorUIManager.TRANSLATE_BIG;
- return (0, n.shadow)(this, "_keyboardManager", new s.KeyboardManager([[["ctrl+s", "mac+meta+s", "ctrl+p", "mac+meta+p"], q.commitOrRemove, {
- bubbles: !0
- }], [["ctrl+Enter", "mac+meta+Enter", "Escape", "mac+Escape"], q.commitOrRemove], [["ArrowLeft", "mac+ArrowLeft"], q._translateEmpty, {
- args: [-pe, 0],
- checker: le
- }], [["ctrl+ArrowLeft", "mac+shift+ArrowLeft"], q._translateEmpty, {
- args: [-we, 0],
- checker: le
- }], [["ArrowRight", "mac+ArrowRight"], q._translateEmpty, {
- args: [pe, 0],
- checker: le
- }], [["ctrl+ArrowRight", "mac+shift+ArrowRight"], q._translateEmpty, {
- args: [we, 0],
- checker: le
- }], [["ArrowUp", "mac+ArrowUp"], q._translateEmpty, {
- args: [0, -pe],
- checker: le
- }], [["ctrl+ArrowUp", "mac+shift+ArrowUp"], q._translateEmpty, {
- args: [0, -we],
- checker: le
- }], [["ArrowDown", "mac+ArrowDown"], q._translateEmpty, {
- args: [0, pe],
- checker: le
- }], [["ctrl+ArrowDown", "mac+shift+ArrowDown"], q._translateEmpty, {
- args: [0, we],
- checker: le
- }]]));
- }
- static initialize(q) {
- l.AnnotationEditor.initialize(q, {
- strings: ["free_text2_default_content", "editor_free_text2_aria_label"]
- });
- const le = getComputedStyle(document.documentElement);
- this._internalPadding = parseFloat(le.getPropertyValue("--freetext-padding"));
- }
- static updateDefaultParams(q, le) {
- switch (q) {
- case n.AnnotationEditorParamsType.FREETEXT_SIZE:
- B._defaultFontSize = le;
- break;
- case n.AnnotationEditorParamsType.FREETEXT_COLOR:
- B._defaultColor = le;
- break;
- }
- }
- updateParams(q, le) {
- switch (q) {
- case n.AnnotationEditorParamsType.FREETEXT_SIZE:
- K(this, k, In).call(this, le);
- break;
- case n.AnnotationEditorParamsType.FREETEXT_COLOR:
- K(this, x, Ln).call(this, le);
- break;
- }
- }
- static get defaultPropertiesToUpdate() {
- return [[n.AnnotationEditorParamsType.FREETEXT_SIZE, B._defaultFontSize], [n.AnnotationEditorParamsType.FREETEXT_COLOR, B._defaultColor || l.AnnotationEditor._defaultLineColor]];
- }
- get propertiesToUpdate() {
- return [[n.AnnotationEditorParamsType.FREETEXT_SIZE, a(this, P)], [n.AnnotationEditorParamsType.FREETEXT_COLOR, a(this, S)]];
- }
- _translateEmpty(q, le) {
- this._uiManager.translateSelectedEditors(q, le, !0);
- }
- getInitialTranslation() {
- const q = this.parentScale;
- return [-B._internalPadding * q, -(B._internalPadding + a(this, P)) * q];
- }
- rebuild() {
- this.parent && (super.rebuild(), this.div !== null && (this.isAttachedToDOM || this.parent.add(this)));
- }
- enableEditMode() {
- this.isInEditMode() || (this.parent.setEditingState(!1), this.parent.updateToolbar(n.AnnotationEditorType.FREETEXT), super.enableEditMode(), this.overlayDiv.classList.remove("enabled"), this.editorDiv.contentEditable = !0, this._isDraggable = !1, this.div.removeAttribute("aria-activedescendant"), this.editorDiv.addEventListener("keydown", a(this, T)), this.editorDiv.addEventListener("focus", a(this, o)), this.editorDiv.addEventListener("blur", a(this, c)), this.editorDiv.addEventListener("input", a(this, r)));
- }
- disableEditMode() {
- this.isInEditMode() && (this.parent.setEditingState(!0), super.disableEditMode(), this.overlayDiv.classList.add("enabled"), this.editorDiv.contentEditable = !1, this.div.setAttribute("aria-activedescendant", a(this, C)), this._isDraggable = !0, this.editorDiv.removeEventListener("keydown", a(this, T)), this.editorDiv.removeEventListener("focus", a(this, o)), this.editorDiv.removeEventListener("blur", a(this, c)), this.editorDiv.removeEventListener("input", a(this, r)), this.div.focus({
- preventScroll: !0
- }), this.isEditing = !1, this.parent.div.classList.add("freeTextEditing"));
- }
- focusin(q) {
- this._focusEventsAllowed && (super.focusin(q), q.target !== this.editorDiv && this.editorDiv.focus());
- }
- onceAdded() {
- var q;
- if (this.width) {
- K(this, G, on).call(this);
- return;
- }
- this.enableEditMode(), this.editorDiv.focus(), (q = this._initialOptions) != null && q.isCentered && this.center(), this._initialOptions = null;
- }
- isEmpty() {
- return !this.editorDiv || this.editorDiv.innerText.trim() === "";
- }
- remove() {
- this.isEditing = !1, this.parent && (this.parent.setEditingState(!0), this.parent.div.classList.add("freeTextEditing")), super.remove();
- }
- commit() {
- if (!this.isInEditMode())
- return;
- super.commit(), this.disableEditMode();
- const q = a(this, w), le = oe(this, w, K(this, p, Dn).call(this).trimEnd());
- if (q === le)
- return;
- const pe = (we) => {
- if (oe(this, w, we), !we) {
- this.remove();
- return;
- }
- K(this, m, an).call(this), this._uiManager.rebuild(this), K(this, $, At).call(this);
- };
- this.addCommands({
- cmd: () => {
- pe(le);
- },
- undo: () => {
- pe(q);
- },
- mustExec: !1
- }), K(this, $, At).call(this);
- }
- shouldGetKeyboardEvents() {
- return this.isInEditMode();
- }
- enterInEditMode() {
- this.enableEditMode(), this.editorDiv.focus();
- }
- dblclick(q) {
- this.enterInEditMode();
- }
- keydown(q) {
- q.target === this.div && q.key === "Enter" && (this.enterInEditMode(), q.preventDefault());
- }
- editorDivKeydown(q) {
- B._keyboardManager.exec(this, q);
- }
- editorDivFocus(q) {
- this.isEditing = !0;
- }
- editorDivBlur(q) {
- this.isEditing = !1;
- }
- editorDivInput(q) {
- this.parent.div.classList.toggle("freeTextEditing", this.isEmpty());
- }
- disableEditing() {
- this.editorDiv.setAttribute("role", "comment"), this.editorDiv.removeAttribute("aria-multiline");
- }
- enableEditing() {
- this.editorDiv.setAttribute("role", "textbox"), this.editorDiv.setAttribute("aria-multiline", !0);
- }
- render() {
- if (this.div)
- return this.div;
- let q, le;
- this.width && (q = this.x, le = this.y), super.render(), this.editorDiv = document.createElement("div"), this.editorDiv.className = "internal", this.editorDiv.setAttribute("id", a(this, C)), this.enableEditing(), l.AnnotationEditor._l10nPromise.get("editor_free_text2_aria_label").then((we) => {
- var be;
- return (be = this.editorDiv) == null ? void 0 : be.setAttribute("aria-label", we);
- }), l.AnnotationEditor._l10nPromise.get("free_text2_default_content").then((we) => {
- var be;
- return (be = this.editorDiv) == null ? void 0 : be.setAttribute("default-content", we);
- }), this.editorDiv.contentEditable = !0;
- const {
- style: pe
- } = this.editorDiv;
- if (pe.fontSize = `calc(${a(this, P)}px * var(--scale-factor))`, pe.color = a(this, S), this.div.append(this.editorDiv), this.overlayDiv = document.createElement("div"), this.overlayDiv.classList.add("overlay", "enabled"), this.div.append(this.overlayDiv), (0, s.bindEvents)(this, this.div, ["dblclick", "keydown"]), this.width) {
- const [we, be] = this.parentDimensions;
- if (this.annotationElementId) {
- const {
- position: R
- } = a(this, b);
- let [d, g] = this.getInitialTranslation();
- [d, g] = this.pageTranslationToScreen(d, g);
- const [f, v] = this.pageDimensions, [A, O] = this.pageTranslation;
- let H, z;
- switch (this.rotation) {
- case 0:
- H = q + (R[0] - A) / f, z = le + this.height - (R[1] - O) / v;
- break;
- case 90:
- H = q + (R[0] - A) / f, z = le - (R[1] - O) / v, [d, g] = [g, -d];
- break;
- case 180:
- H = q - this.width + (R[0] - A) / f, z = le - (R[1] - O) / v, [d, g] = [-d, -g];
- break;
- case 270:
- H = q + (R[0] - A - this.height * v) / f, z = le + (R[1] - O - this.width * f) / v, [d, g] = [-g, d];
- break;
- }
- this.setAt(H * we, z * be, d, g);
- } else
- this.setAt(q * we, le * be, this.width * we, this.height * be);
- K(this, m, an).call(this), this._isDraggable = !0, this.editorDiv.contentEditable = !1;
- } else
- this._isDraggable = !1, this.editorDiv.contentEditable = !0;
- return this.div;
- }
- get contentDiv() {
- return this.editorDiv;
- }
- static deserialize(q, le, pe) {
- let we = null;
- if (q instanceof h.FreeTextAnnotationElement) {
- const {
- data: {
- defaultAppearanceData: {
- fontSize: R,
- fontColor: d
- },
- rect: g,
- rotation: f,
- id: v
- },
- textContent: A,
- textPosition: O,
- parent: {
- page: {
- pageNumber: H
- }
- }
- } = q;
- if (!A || A.length === 0)
- return null;
- we = q = {
- annotationType: n.AnnotationEditorType.FREETEXT,
- color: Array.from(d),
- fontSize: R,
- value: A.join(`
-`),
- position: O,
- pageIndex: H - 1,
- rect: g,
- rotation: f,
- id: v,
- deleted: !1
- };
- }
- const be = super.deserialize(q, le, pe);
- return oe(be, P, q.fontSize), oe(be, S, n.Util.makeHexColor(...q.color)), oe(be, w, q.value), be.annotationElementId = q.id || null, oe(be, b, we), be;
- }
- serialize(q = !1) {
- if (this.isEmpty())
- return null;
- if (this.deleted)
- return {
- pageIndex: this.pageIndex,
- id: this.annotationElementId,
- deleted: !0
- };
- const le = B._internalPadding * this.parentScale, pe = this.getRect(le, le), we = l.AnnotationEditor._colorManager.convert(this.isAttachedToDOM ? getComputedStyle(this.editorDiv).color : a(this, S)), be = {
- annotationType: n.AnnotationEditorType.FREETEXT,
- color: we,
- fontSize: a(this, P),
- value: a(this, w),
- pageIndex: this.pageIndex,
- rect: pe,
- rotation: this.rotation,
- structTreeParentId: this._structTreeParentId
- };
- return q ? be : this.annotationElementId && !K(this, D, On).call(this, be) ? null : (be.id = this.annotationElementId, be);
- }
- };
- c = new WeakMap(), o = new WeakMap(), r = new WeakMap(), T = new WeakMap(), S = new WeakMap(), w = new WeakMap(), C = new WeakMap(), P = new WeakMap(), b = new WeakMap(), k = new WeakSet(), In = function(q) {
- const le = (we) => {
- this.editorDiv.style.fontSize = `calc(${we}px * var(--scale-factor))`, this.translate(0, -(we - a(this, P)) * this.parentScale), oe(this, P, we), K(this, $, At).call(this);
- }, pe = a(this, P);
- this.addCommands({
- cmd: () => {
- le(q);
- },
- undo: () => {
- le(pe);
- },
- mustExec: !0,
- type: n.AnnotationEditorParamsType.FREETEXT_SIZE,
- overwriteIfSameType: !0,
- keepUndo: !0
- });
- }, x = new WeakSet(), Ln = function(q) {
- const le = a(this, S);
- this.addCommands({
- cmd: () => {
- oe(this, S, this.editorDiv.style.color = q);
- },
- undo: () => {
- oe(this, S, this.editorDiv.style.color = le);
- },
- mustExec: !0,
- type: n.AnnotationEditorParamsType.FREETEXT_COLOR,
- overwriteIfSameType: !0,
- keepUndo: !0
- });
- }, p = new WeakSet(), Dn = function() {
- const q = this.editorDiv.getElementsByTagName("div");
- if (q.length === 0)
- return this.editorDiv.innerText;
- const le = [];
- for (const pe of q)
- le.push(pe.innerText.replace(/\r\n?|\n/, ""));
- return le.join(`
-`);
- }, $ = new WeakSet(), At = function() {
- const [q, le] = this.parentDimensions;
- let pe;
- if (this.isAttachedToDOM)
- pe = this.div.getBoundingClientRect();
- else {
- const {
- currentLayer: we,
- div: be
- } = this, R = be.style.display;
- be.style.display = "hidden", we.div.append(this.div), pe = be.getBoundingClientRect(), be.remove(), be.style.display = R;
- }
- this.rotation % 180 === this.parentRotation % 180 ? (this.width = pe.width / q, this.height = pe.height / le) : (this.width = pe.height / q, this.height = pe.width / le), this.fixAndSetPosition();
- }, m = new WeakSet(), an = function() {
- if (this.editorDiv.replaceChildren(), !!a(this, w))
- for (const q of a(this, w).split(`
-`)) {
- const le = document.createElement("div");
- le.append(q ? document.createTextNode(q) : document.createElement("br")), this.editorDiv.append(le);
- }
- }, D = new WeakSet(), On = function(q) {
- const {
- value: le,
- fontSize: pe,
- color: we,
- rect: be,
- pageIndex: R
- } = a(this, b);
- return q.value !== le || q.fontSize !== pe || q.rect.some((d, g) => Math.abs(d - be[g]) >= 1) || q.color.some((d, g) => d !== we[g]) || q.pageIndex !== R;
- }, G = new WeakSet(), on = function(q = !1) {
- if (!this.annotationElementId)
- return;
- if (K(this, $, At).call(this), !q && (this.width === 0 || this.height === 0)) {
- setTimeout(() => K(this, G, on).call(this, !0), 0);
- return;
- }
- const le = B._internalPadding * this.parentScale;
- a(this, b).rect = this.getRect(le, le);
- }, nt(B, "_freeTextDefaultContent", ""), nt(B, "_internalPadding", 0), nt(B, "_defaultColor", null), nt(B, "_defaultFontSize", 10), nt(B, "_type", "freetext");
- let _ = B;
- e.FreeTextEditor = _;
- },
- /* 29 */
- /***/
- (t, e, i) => {
- var g, v, ht, O, Nn, z, ae, Q, ce, ue, me, fe, Pe, Fe, Ee, De, _e, ie, se, ge, Ce, xe, Ue, $n, je, Mt, Xe, ln, Ye, cn, ne, J, ve, Se, tt, et, te, hn, Ne, ke, $e, Be, Bn, Ae, un;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.StampAnnotationElement = e.InkAnnotationElement = e.FreeTextAnnotationElement = e.AnnotationLayer = void 0;
- var n = i(1), s = i(6), l = i(3), h = i(30), _ = i(31), c = i(32);
- const o = 1e3, r = 9, T = /* @__PURE__ */ new WeakSet();
- function S(Oe) {
- return {
- width: Oe[2] - Oe[0],
- height: Oe[3] - Oe[1]
- };
- }
- class w {
- static create(U) {
- switch (U.data.annotationType) {
- case n.AnnotationType.LINK:
- return new P(U);
- case n.AnnotationType.TEXT:
- return new b(U);
- case n.AnnotationType.WIDGET:
- switch (U.data.fieldType) {
- case "Tx":
- return new F(U);
- case "Btn":
- return U.data.radioButton ? new p(U) : U.data.checkBox ? new y(U) : new E(U);
- case "Ch":
- return new $(U);
- case "Sig":
- return new x(U);
- }
- return new k(U);
- case n.AnnotationType.POPUP:
- return new M(U);
- case n.AnnotationType.FREETEXT:
- return new N(U);
- case n.AnnotationType.LINE:
- return new D(U);
- case n.AnnotationType.SQUARE:
- return new X(U);
- case n.AnnotationType.CIRCLE:
- return new G(U);
- case n.AnnotationType.POLYLINE:
- return new I(U);
- case n.AnnotationType.CARET:
- return new ee(U);
- case n.AnnotationType.INK:
- return new Y(U);
- case n.AnnotationType.POLYGON:
- return new B(U);
- case n.AnnotationType.HIGHLIGHT:
- return new q(U);
- case n.AnnotationType.UNDERLINE:
- return new le(U);
- case n.AnnotationType.SQUIGGLY:
- return new pe(U);
- case n.AnnotationType.STRIKEOUT:
- return new we(U);
- case n.AnnotationType.STAMP:
- return new be(U);
- case n.AnnotationType.FILEATTACHMENT:
- return new R(U);
- default:
- return new C(U);
- }
- }
- }
- const f = class f {
- constructor(U, {
- isRenderable: u = !1,
- ignoreBorder: L = !1,
- createQuadrilaterals: j = !1
- } = {}) {
- W(this, g, !1);
- this.isRenderable = u, this.data = U.data, this.layer = U.layer, this.linkService = U.linkService, this.downloadManager = U.downloadManager, this.imageResourcesPath = U.imageResourcesPath, this.renderForms = U.renderForms, this.svgFactory = U.svgFactory, this.annotationStorage = U.annotationStorage, this.enableScripting = U.enableScripting, this.hasJSActions = U.hasJSActions, this._fieldObjects = U.fieldObjects, this.parent = U.parent, u && (this.container = this._createContainer(L)), j && this._createQuadrilaterals();
- }
- static _hasPopupData({
- titleObj: U,
- contentsObj: u,
- richText: L
- }) {
- return !!(U != null && U.str || u != null && u.str || L != null && L.str);
- }
- get hasPopupData() {
- return f._hasPopupData(this.data);
- }
- _createContainer(U) {
- const {
- data: u,
- parent: {
- page: L,
- viewport: j
- }
- } = this, V = document.createElement("section");
- V.setAttribute("data-annotation-id", u.id), this instanceof k || (V.tabIndex = o), V.style.zIndex = this.parent.zIndex++, this.data.popupRef && V.setAttribute("aria-haspopup", "dialog"), u.noRotate && V.classList.add("norotate");
- const {
- pageWidth: Z,
- pageHeight: he,
- pageX: ye,
- pageY: Me
- } = j.rawDims;
- if (!u.rect || this instanceof M) {
- const {
- rotation: He
- } = u;
- return !u.hasOwnCanvas && He !== 0 && this.setRotation(He, V), V;
- }
- const {
- width: Re,
- height: qe
- } = S(u.rect), Ie = n.Util.normalizeRect([u.rect[0], L.view[3] - u.rect[1] + L.view[1], u.rect[2], L.view[3] - u.rect[3] + L.view[1]]);
- if (!U && u.borderStyle.width > 0) {
- V.style.borderWidth = `${u.borderStyle.width}px`;
- const He = u.borderStyle.horizontalCornerRadius, Ve = u.borderStyle.verticalCornerRadius;
- if (He > 0 || Ve > 0) {
- const Ze = `calc(${He}px * var(--scale-factor)) / calc(${Ve}px * var(--scale-factor))`;
- V.style.borderRadius = Ze;
- } else if (this instanceof p) {
- const Ze = `calc(${Re}px * var(--scale-factor)) / calc(${qe}px * var(--scale-factor))`;
- V.style.borderRadius = Ze;
- }
- switch (u.borderStyle.style) {
- case n.AnnotationBorderStyleType.SOLID:
- V.style.borderStyle = "solid";
- break;
- case n.AnnotationBorderStyleType.DASHED:
- V.style.borderStyle = "dashed";
- break;
- case n.AnnotationBorderStyleType.BEVELED:
- (0, n.warn)("Unimplemented border style: beveled");
- break;
- case n.AnnotationBorderStyleType.INSET:
- (0, n.warn)("Unimplemented border style: inset");
- break;
- case n.AnnotationBorderStyleType.UNDERLINE:
- V.style.borderBottomStyle = "solid";
- break;
- }
- const Je = u.borderColor || null;
- Je ? (oe(this, g, !0), V.style.borderColor = n.Util.makeHexColor(Je[0] | 0, Je[1] | 0, Je[2] | 0)) : V.style.borderWidth = 0;
- }
- V.style.left = `${100 * (Ie[0] - ye) / Z}%`, V.style.top = `${100 * (Ie[1] - Me) / he}%`;
- const {
- rotation: Le
- } = u;
- return u.hasOwnCanvas || Le === 0 ? (V.style.width = `${100 * Re / Z}%`, V.style.height = `${100 * qe / he}%`) : this.setRotation(Le, V), V;
- }
- setRotation(U, u = this.container) {
- if (!this.data.rect)
- return;
- const {
- pageWidth: L,
- pageHeight: j
- } = this.parent.viewport.rawDims, {
- width: V,
- height: Z
- } = S(this.data.rect);
- let he, ye;
- U % 180 === 0 ? (he = 100 * V / L, ye = 100 * Z / j) : (he = 100 * Z / L, ye = 100 * V / j), u.style.width = `${he}%`, u.style.height = `${ye}%`, u.setAttribute("data-main-rotation", (360 - U) % 360);
- }
- get _commonActions() {
- const U = (u, L, j) => {
- const V = j.detail[u], Z = V[0], he = V.slice(1);
- j.target.style[L] = h.ColorConverters[`${Z}_HTML`](he), this.annotationStorage.setValue(this.data.id, {
- [L]: h.ColorConverters[`${Z}_rgb`](he)
- });
- };
- return (0, n.shadow)(this, "_commonActions", {
- display: (u) => {
- const {
- display: L
- } = u.detail, j = L % 2 === 1;
- this.container.style.visibility = j ? "hidden" : "visible", this.annotationStorage.setValue(this.data.id, {
- noView: j,
- noPrint: L === 1 || L === 2
- });
- },
- print: (u) => {
- this.annotationStorage.setValue(this.data.id, {
- noPrint: !u.detail.print
- });
- },
- hidden: (u) => {
- const {
- hidden: L
- } = u.detail;
- this.container.style.visibility = L ? "hidden" : "visible", this.annotationStorage.setValue(this.data.id, {
- noPrint: L,
- noView: L
- });
- },
- focus: (u) => {
- setTimeout(() => u.target.focus({
- preventScroll: !1
- }), 0);
- },
- userName: (u) => {
- u.target.title = u.detail.userName;
- },
- readonly: (u) => {
- u.target.disabled = u.detail.readonly;
- },
- required: (u) => {
- this._setRequired(u.target, u.detail.required);
- },
- bgColor: (u) => {
- U("bgColor", "backgroundColor", u);
- },
- fillColor: (u) => {
- U("fillColor", "backgroundColor", u);
- },
- fgColor: (u) => {
- U("fgColor", "color", u);
- },
- textColor: (u) => {
- U("textColor", "color", u);
- },
- borderColor: (u) => {
- U("borderColor", "borderColor", u);
- },
- strokeColor: (u) => {
- U("strokeColor", "borderColor", u);
- },
- rotation: (u) => {
- const L = u.detail.rotation;
- this.setRotation(L), this.annotationStorage.setValue(this.data.id, {
- rotation: L
- });
- }
- });
- }
- _dispatchEventFromSandbox(U, u) {
- const L = this._commonActions;
- for (const j of Object.keys(u.detail)) {
- const V = U[j] || L[j];
- V == null || V(u);
- }
- }
- _setDefaultPropertiesFromJS(U) {
- if (!this.enableScripting)
- return;
- const u = this.annotationStorage.getRawValue(this.data.id);
- if (!u)
- return;
- const L = this._commonActions;
- for (const [j, V] of Object.entries(u)) {
- const Z = L[j];
- if (Z) {
- const he = {
- detail: {
- [j]: V
- },
- target: U
- };
- Z(he), delete u[j];
- }
- }
- }
- _createQuadrilaterals() {
- if (!this.container)
- return;
- const {
- quadPoints: U
- } = this.data;
- if (!U)
- return;
- const [u, L, j, V] = this.data.rect;
- if (U.length === 1) {
- const [, {
- x: Ve,
- y: Je
- }, {
- x: Ze,
- y: st
- }] = U[0];
- if (j === Ve && V === Je && u === Ze && L === st)
- return;
- }
- const {
- style: Z
- } = this.container;
- let he;
- if (a(this, g)) {
- const {
- borderColor: Ve,
- borderWidth: Je
- } = Z;
- Z.borderWidth = 0, he = ["url('data:image/svg+xml;utf8,", '')"), Z.backgroundImage = he.join("")), this.container.append(qe), this.container.style.clipPath = `url(#${He})`;
- }
- _createPopup() {
- const {
- container: U,
- data: u
- } = this;
- U.setAttribute("aria-haspopup", "dialog");
- const L = new M({
- data: {
- color: u.color,
- titleObj: u.titleObj,
- modificationDate: u.modificationDate,
- contentsObj: u.contentsObj,
- richText: u.richText,
- parentRect: u.rect,
- borderStyle: 0,
- id: `popup_${u.id}`,
- rotation: u.rotation
- },
- parent: this.parent,
- elements: [this]
- });
- this.parent.div.append(L.render());
- }
- render() {
- (0, n.unreachable)("Abstract method `AnnotationElement.render` called");
- }
- _getElementsByName(U, u = null) {
- const L = [];
- if (this._fieldObjects) {
- const j = this._fieldObjects[U];
- if (j)
- for (const {
- page: V,
- id: Z,
- exportValues: he
- } of j) {
- if (V === -1 || Z === u)
- continue;
- const ye = typeof he == "string" ? he : null, Me = document.querySelector(`[data-element-id="${Z}"]`);
- if (Me && !T.has(Me)) {
- (0, n.warn)(`_getElementsByName - element not allowed: ${Z}`);
- continue;
- }
- L.push({
- id: Z,
- exportValue: ye,
- domElement: Me
- });
- }
- return L;
- }
- for (const j of document.getElementsByName(U)) {
- const {
- exportValue: V
- } = j, Z = j.getAttribute("data-element-id");
- Z !== u && T.has(j) && L.push({
- id: Z,
- exportValue: V,
- domElement: j
- });
- }
- return L;
- }
- show() {
- var U;
- this.container && (this.container.hidden = !1), (U = this.popup) == null || U.maybeShow();
- }
- hide() {
- var U;
- this.container && (this.container.hidden = !0), (U = this.popup) == null || U.forceHide();
- }
- getElementsToTriggerPopup() {
- return this.container;
- }
- addHighlightArea() {
- const U = this.getElementsToTriggerPopup();
- if (Array.isArray(U))
- for (const u of U)
- u.classList.add("highlightArea");
- else
- U.classList.add("highlightArea");
- }
- _editOnDoubleClick() {
- const {
- annotationEditorType: U,
- data: {
- id: u
- }
- } = this;
- this.container.addEventListener("dblclick", () => {
- var L;
- (L = this.linkService.eventBus) == null || L.dispatch("switchannotationeditormode", {
- source: this,
- mode: U,
- editId: u
- });
- });
- }
- };
- g = new WeakMap();
- let C = f;
- class P extends C {
- constructor(u, L = null) {
- super(u, {
- isRenderable: !0,
- ignoreBorder: !!(L != null && L.ignoreBorder),
- createQuadrilaterals: !0
- });
- W(this, v);
- W(this, O);
- this.isTooltipOnly = u.data.isTooltipOnly;
- }
- render() {
- const {
- data: u,
- linkService: L
- } = this, j = document.createElement("a");
- j.setAttribute("data-element-id", u.id);
- let V = !1;
- return u.url ? (L.addLinkAttributes(j, u.url, u.newWindow), V = !0) : u.action ? (this._bindNamedAction(j, u.action), V = !0) : u.attachment ? (this._bindAttachment(j, u.attachment), V = !0) : u.setOCGState ? (K(this, O, Nn).call(this, j, u.setOCGState), V = !0) : u.dest ? (this._bindLink(j, u.dest), V = !0) : (u.actions && (u.actions.Action || u.actions["Mouse Up"] || u.actions["Mouse Down"]) && this.enableScripting && this.hasJSActions && (this._bindJSAction(j, u), V = !0), u.resetForm ? (this._bindResetFormAction(j, u.resetForm), V = !0) : this.isTooltipOnly && !V && (this._bindLink(j, ""), V = !0)), this.container.classList.add("linkAnnotation"), V && this.container.append(j), this.container;
- }
- _bindLink(u, L) {
- u.href = this.linkService.getDestinationHash(L), u.onclick = () => (L && this.linkService.goToDestination(L), !1), (L || L === "") && K(this, v, ht).call(this);
- }
- _bindNamedAction(u, L) {
- u.href = this.linkService.getAnchorUrl(""), u.onclick = () => (this.linkService.executeNamedAction(L), !1), K(this, v, ht).call(this);
- }
- _bindAttachment(u, L) {
- u.href = this.linkService.getAnchorUrl(""), u.onclick = () => {
- var j;
- return (j = this.downloadManager) == null || j.openOrDownloadData(this.container, L.content, L.filename), !1;
- }, K(this, v, ht).call(this);
- }
- _bindJSAction(u, L) {
- u.href = this.linkService.getAnchorUrl("");
- const j = /* @__PURE__ */ new Map([["Action", "onclick"], ["Mouse Up", "onmouseup"], ["Mouse Down", "onmousedown"]]);
- for (const V of Object.keys(L.actions)) {
- const Z = j.get(V);
- Z && (u[Z] = () => {
- var he;
- return (he = this.linkService.eventBus) == null || he.dispatch("dispatcheventinsandbox", {
- source: this,
- detail: {
- id: L.id,
- name: V
- }
- }), !1;
- });
- }
- u.onclick || (u.onclick = () => !1), K(this, v, ht).call(this);
- }
- _bindResetFormAction(u, L) {
- const j = u.onclick;
- if (j || (u.href = this.linkService.getAnchorUrl("")), K(this, v, ht).call(this), !this._fieldObjects) {
- (0, n.warn)('_bindResetFormAction - "resetForm" action not supported, ensure that the `fieldObjects` parameter is provided.'), j || (u.onclick = () => !1);
- return;
- }
- u.onclick = () => {
- var qe;
- j == null || j();
- const {
- fields: V,
- refs: Z,
- include: he
- } = L, ye = [];
- if (V.length !== 0 || Z.length !== 0) {
- const Ie = new Set(Z);
- for (const Le of V) {
- const He = this._fieldObjects[Le] || [];
- for (const {
- id: Ve
- } of He)
- Ie.add(Ve);
- }
- for (const Le of Object.values(this._fieldObjects))
- for (const He of Le)
- Ie.has(He.id) === he && ye.push(He);
- } else
- for (const Ie of Object.values(this._fieldObjects))
- ye.push(...Ie);
- const Me = this.annotationStorage, Re = [];
- for (const Ie of ye) {
- const {
- id: Le
- } = Ie;
- switch (Re.push(Le), Ie.type) {
- case "text": {
- const Ve = Ie.defaultValue || "";
- Me.setValue(Le, {
- value: Ve
- });
- break;
- }
- case "checkbox":
- case "radiobutton": {
- const Ve = Ie.defaultValue === Ie.exportValues;
- Me.setValue(Le, {
- value: Ve
- });
- break;
- }
- case "combobox":
- case "listbox": {
- const Ve = Ie.defaultValue || "";
- Me.setValue(Le, {
- value: Ve
- });
- break;
- }
- default:
- continue;
- }
- const He = document.querySelector(`[data-element-id="${Le}"]`);
- if (He) {
- if (!T.has(He)) {
- (0, n.warn)(`_bindResetFormAction - element not allowed: ${Le}`);
- continue;
- }
- } else
- continue;
- He.dispatchEvent(new Event("resetform"));
- }
- return this.enableScripting && ((qe = this.linkService.eventBus) == null || qe.dispatch("dispatcheventinsandbox", {
- source: this,
- detail: {
- id: "app",
- ids: Re,
- name: "ResetForm"
- }
- })), !1;
- };
- }
- }
- v = new WeakSet(), ht = function() {
- this.container.setAttribute("data-internal-link", "");
- }, O = new WeakSet(), Nn = function(u, L) {
- u.href = this.linkService.getAnchorUrl(""), u.onclick = () => (this.linkService.executeSetOCGState(L), !1), K(this, v, ht).call(this);
- };
- class b extends C {
- constructor(U) {
- super(U, {
- isRenderable: !0
- });
- }
- render() {
- this.container.classList.add("textAnnotation");
- const U = document.createElement("img");
- return U.src = this.imageResourcesPath + "annotation-" + this.data.name.toLowerCase() + ".svg", U.alt = "[{{type}} Annotation]", U.dataset.l10nId = "text_annotation_type", U.dataset.l10nArgs = JSON.stringify({
- type: this.data.name
- }), !this.data.popupRef && this.hasPopupData && this._createPopup(), this.container.append(U), this.container;
- }
- }
- class k extends C {
- render() {
- return this.data.alternativeText && (this.container.title = this.data.alternativeText), this.container;
- }
- showElementAndHideCanvas(U) {
- var u;
- this.data.hasOwnCanvas && (((u = U.previousSibling) == null ? void 0 : u.nodeName) === "CANVAS" && (U.previousSibling.hidden = !0), U.hidden = !1);
- }
- _getKeyModifier(U) {
- const {
- isWin: u,
- isMac: L
- } = n.FeatureTest.platform;
- return u && U.ctrlKey || L && U.metaKey;
- }
- _setEventListener(U, u, L, j, V) {
- L.includes("mouse") ? U.addEventListener(L, (Z) => {
- var he;
- (he = this.linkService.eventBus) == null || he.dispatch("dispatcheventinsandbox", {
- source: this,
- detail: {
- id: this.data.id,
- name: j,
- value: V(Z),
- shift: Z.shiftKey,
- modifier: this._getKeyModifier(Z)
- }
- });
- }) : U.addEventListener(L, (Z) => {
- var he;
- if (L === "blur") {
- if (!u.focused || !Z.relatedTarget)
- return;
- u.focused = !1;
- } else if (L === "focus") {
- if (u.focused)
- return;
- u.focused = !0;
- }
- V && ((he = this.linkService.eventBus) == null || he.dispatch("dispatcheventinsandbox", {
- source: this,
- detail: {
- id: this.data.id,
- name: j,
- value: V(Z)
- }
- }));
- });
- }
- _setEventListeners(U, u, L, j) {
- var V, Z, he;
- for (const [ye, Me] of L)
- (Me === "Action" || (V = this.data.actions) != null && V[Me]) && ((Me === "Focus" || Me === "Blur") && (u || (u = {
- focused: !1
- })), this._setEventListener(U, u, ye, Me, j), Me === "Focus" && !((Z = this.data.actions) != null && Z.Blur) ? this._setEventListener(U, u, "blur", "Blur", null) : Me === "Blur" && !((he = this.data.actions) != null && he.Focus) && this._setEventListener(U, u, "focus", "Focus", null));
- }
- _setBackgroundColor(U) {
- const u = this.data.backgroundColor || null;
- U.style.backgroundColor = u === null ? "transparent" : n.Util.makeHexColor(u[0], u[1], u[2]);
- }
- _setTextStyle(U) {
- const u = ["left", "center", "right"], {
- fontColor: L
- } = this.data.defaultAppearanceData, j = this.data.defaultAppearanceData.fontSize || r, V = U.style;
- let Z;
- const he = 2, ye = (Me) => Math.round(10 * Me) / 10;
- if (this.data.multiLine) {
- const Me = Math.abs(this.data.rect[3] - this.data.rect[1] - he), Re = Math.round(Me / (n.LINE_FACTOR * j)) || 1, qe = Me / Re;
- Z = Math.min(j, ye(qe / n.LINE_FACTOR));
- } else {
- const Me = Math.abs(this.data.rect[3] - this.data.rect[1] - he);
- Z = Math.min(j, ye(Me / n.LINE_FACTOR));
- }
- V.fontSize = `calc(${Z}px * var(--scale-factor))`, V.color = n.Util.makeHexColor(L[0], L[1], L[2]), this.data.textAlignment !== null && (V.textAlign = u[this.data.textAlignment]);
- }
- _setRequired(U, u) {
- u ? U.setAttribute("required", !0) : U.removeAttribute("required"), U.setAttribute("aria-required", u);
- }
- }
- class F extends k {
- constructor(U) {
- const u = U.renderForms || !U.data.hasAppearance && !!U.data.fieldValue;
- super(U, {
- isRenderable: u
- });
- }
- setPropertyOnSiblings(U, u, L, j) {
- const V = this.annotationStorage;
- for (const Z of this._getElementsByName(U.name, U.id))
- Z.domElement && (Z.domElement[u] = L), V.setValue(Z.id, {
- [j]: L
- });
- }
- render() {
- var j, V;
- const U = this.annotationStorage, u = this.data.id;
- this.container.classList.add("textWidgetAnnotation");
- let L = null;
- if (this.renderForms) {
- const Z = U.getValue(u, {
- value: this.data.fieldValue
- });
- let he = Z.value || "";
- const ye = U.getValue(u, {
- charLimit: this.data.maxLen
- }).charLimit;
- ye && he.length > ye && (he = he.slice(0, ye));
- let Me = Z.formattedValue || ((j = this.data.textContent) == null ? void 0 : j.join(`
-`)) || null;
- Me && this.data.comb && (Me = Me.replaceAll(/\s+/g, ""));
- const Re = {
- userValue: he,
- formattedValue: Me,
- lastCommittedValue: null,
- commitKey: 1,
- focused: !1
- };
- this.data.multiLine ? (L = document.createElement("textarea"), L.textContent = Me ?? he, this.data.doNotScroll && (L.style.overflowY = "hidden")) : (L = document.createElement("input"), L.type = "text", L.setAttribute("value", Me ?? he), this.data.doNotScroll && (L.style.overflowX = "hidden")), this.data.hasOwnCanvas && (L.hidden = !0), T.add(L), L.setAttribute("data-element-id", u), L.disabled = this.data.readOnly, L.name = this.data.fieldName, L.tabIndex = o, this._setRequired(L, this.data.required), ye && (L.maxLength = ye), L.addEventListener("input", (Ie) => {
- U.setValue(u, {
- value: Ie.target.value
- }), this.setPropertyOnSiblings(L, "value", Ie.target.value, "value"), Re.formattedValue = null;
- }), L.addEventListener("resetform", (Ie) => {
- const Le = this.data.defaultFieldValue ?? "";
- L.value = Re.userValue = Le, Re.formattedValue = null;
- });
- let qe = (Ie) => {
- const {
- formattedValue: Le
- } = Re;
- Le != null && (Ie.target.value = Le), Ie.target.scrollLeft = 0;
- };
- if (this.enableScripting && this.hasJSActions) {
- L.addEventListener("focus", (Le) => {
- if (Re.focused)
- return;
- const {
- target: He
- } = Le;
- Re.userValue && (He.value = Re.userValue), Re.lastCommittedValue = He.value, Re.commitKey = 1, Re.focused = !0;
- }), L.addEventListener("updatefromsandbox", (Le) => {
- this.showElementAndHideCanvas(Le.target);
- const He = {
- value(Ve) {
- Re.userValue = Ve.detail.value ?? "", U.setValue(u, {
- value: Re.userValue.toString()
- }), Ve.target.value = Re.userValue;
- },
- formattedValue(Ve) {
- const {
- formattedValue: Je
- } = Ve.detail;
- Re.formattedValue = Je, Je != null && Ve.target !== document.activeElement && (Ve.target.value = Je), U.setValue(u, {
- formattedValue: Je
- });
- },
- selRange(Ve) {
- Ve.target.setSelectionRange(...Ve.detail.selRange);
- },
- charLimit: (Ve) => {
- var it;
- const {
- charLimit: Je
- } = Ve.detail, {
- target: Ze
- } = Ve;
- if (Je === 0) {
- Ze.removeAttribute("maxLength");
- return;
- }
- Ze.setAttribute("maxLength", Je);
- let st = Re.userValue;
- !st || st.length <= Je || (st = st.slice(0, Je), Ze.value = Re.userValue = st, U.setValue(u, {
- value: st
- }), (it = this.linkService.eventBus) == null || it.dispatch("dispatcheventinsandbox", {
- source: this,
- detail: {
- id: u,
- name: "Keystroke",
- value: st,
- willCommit: !0,
- commitKey: 1,
- selStart: Ze.selectionStart,
- selEnd: Ze.selectionEnd
- }
- }));
- }
- };
- this._dispatchEventFromSandbox(He, Le);
- }), L.addEventListener("keydown", (Le) => {
- var Je;
- Re.commitKey = 1;
- let He = -1;
- if (Le.key === "Escape" ? He = 0 : Le.key === "Enter" && !this.data.multiLine ? He = 2 : Le.key === "Tab" && (Re.commitKey = 3), He === -1)
- return;
- const {
- value: Ve
- } = Le.target;
- Re.lastCommittedValue !== Ve && (Re.lastCommittedValue = Ve, Re.userValue = Ve, (Je = this.linkService.eventBus) == null || Je.dispatch("dispatcheventinsandbox", {
- source: this,
- detail: {
- id: u,
- name: "Keystroke",
- value: Ve,
- willCommit: !0,
- commitKey: He,
- selStart: Le.target.selectionStart,
- selEnd: Le.target.selectionEnd
- }
- }));
- });
- const Ie = qe;
- qe = null, L.addEventListener("blur", (Le) => {
- var Ve;
- if (!Re.focused || !Le.relatedTarget)
- return;
- Re.focused = !1;
- const {
- value: He
- } = Le.target;
- Re.userValue = He, Re.lastCommittedValue !== He && ((Ve = this.linkService.eventBus) == null || Ve.dispatch("dispatcheventinsandbox", {
- source: this,
- detail: {
- id: u,
- name: "Keystroke",
- value: He,
- willCommit: !0,
- commitKey: Re.commitKey,
- selStart: Le.target.selectionStart,
- selEnd: Le.target.selectionEnd
- }
- })), Ie(Le);
- }), (V = this.data.actions) != null && V.Keystroke && L.addEventListener("beforeinput", (Le) => {
- var lt;
- Re.lastCommittedValue = null;
- const {
- data: He,
- target: Ve
- } = Le, {
- value: Je,
- selectionStart: Ze,
- selectionEnd: st
- } = Ve;
- let it = Ze, rt = st;
- switch (Le.inputType) {
- case "deleteWordBackward": {
- const ct = Je.substring(0, Ze).match(/\w*[^\w]*$/);
- ct && (it -= ct[0].length);
- break;
- }
- case "deleteWordForward": {
- const ct = Je.substring(Ze).match(/^[^\w]*\w*/);
- ct && (rt += ct[0].length);
- break;
- }
- case "deleteContentBackward":
- Ze === st && (it -= 1);
- break;
- case "deleteContentForward":
- Ze === st && (rt += 1);
- break;
- }
- Le.preventDefault(), (lt = this.linkService.eventBus) == null || lt.dispatch("dispatcheventinsandbox", {
- source: this,
- detail: {
- id: u,
- name: "Keystroke",
- value: Je,
- change: He || "",
- willCommit: !1,
- selStart: it,
- selEnd: rt
- }
- });
- }), this._setEventListeners(L, Re, [["focus", "Focus"], ["blur", "Blur"], ["mousedown", "Mouse Down"], ["mouseenter", "Mouse Enter"], ["mouseleave", "Mouse Exit"], ["mouseup", "Mouse Up"]], (Le) => Le.target.value);
- }
- if (qe && L.addEventListener("blur", qe), this.data.comb) {
- const Le = (this.data.rect[2] - this.data.rect[0]) / ye;
- L.classList.add("comb"), L.style.letterSpacing = `calc(${Le}px * var(--scale-factor) - 1ch)`;
- }
- } else
- L = document.createElement("div"), L.textContent = this.data.fieldValue, L.style.verticalAlign = "middle", L.style.display = "table-cell";
- return this._setTextStyle(L), this._setBackgroundColor(L), this._setDefaultPropertiesFromJS(L), this.container.append(L), this.container;
- }
- }
- class x extends k {
- constructor(U) {
- super(U, {
- isRenderable: !!U.data.hasOwnCanvas
- });
- }
- }
- class y extends k {
- constructor(U) {
- super(U, {
- isRenderable: U.renderForms
- });
- }
- render() {
- const U = this.annotationStorage, u = this.data, L = u.id;
- let j = U.getValue(L, {
- value: u.exportValue === u.fieldValue
- }).value;
- typeof j == "string" && (j = j !== "Off", U.setValue(L, {
- value: j
- })), this.container.classList.add("buttonWidgetAnnotation", "checkBox");
- const V = document.createElement("input");
- return T.add(V), V.setAttribute("data-element-id", L), V.disabled = u.readOnly, this._setRequired(V, this.data.required), V.type = "checkbox", V.name = u.fieldName, j && V.setAttribute("checked", !0), V.setAttribute("exportValue", u.exportValue), V.tabIndex = o, V.addEventListener("change", (Z) => {
- const {
- name: he,
- checked: ye
- } = Z.target;
- for (const Me of this._getElementsByName(he, L)) {
- const Re = ye && Me.exportValue === u.exportValue;
- Me.domElement && (Me.domElement.checked = Re), U.setValue(Me.id, {
- value: Re
- });
- }
- U.setValue(L, {
- value: ye
- });
- }), V.addEventListener("resetform", (Z) => {
- const he = u.defaultFieldValue || "Off";
- Z.target.checked = he === u.exportValue;
- }), this.enableScripting && this.hasJSActions && (V.addEventListener("updatefromsandbox", (Z) => {
- const he = {
- value(ye) {
- ye.target.checked = ye.detail.value !== "Off", U.setValue(L, {
- value: ye.target.checked
- });
- }
- };
- this._dispatchEventFromSandbox(he, Z);
- }), this._setEventListeners(V, null, [["change", "Validate"], ["change", "Action"], ["focus", "Focus"], ["blur", "Blur"], ["mousedown", "Mouse Down"], ["mouseenter", "Mouse Enter"], ["mouseleave", "Mouse Exit"], ["mouseup", "Mouse Up"]], (Z) => Z.target.checked)), this._setBackgroundColor(V), this._setDefaultPropertiesFromJS(V), this.container.append(V), this.container;
- }
- }
- class p extends k {
- constructor(U) {
- super(U, {
- isRenderable: U.renderForms
- });
- }
- render() {
- this.container.classList.add("buttonWidgetAnnotation", "radioButton");
- const U = this.annotationStorage, u = this.data, L = u.id;
- let j = U.getValue(L, {
- value: u.fieldValue === u.buttonValue
- }).value;
- typeof j == "string" && (j = j !== u.buttonValue, U.setValue(L, {
- value: j
- }));
- const V = document.createElement("input");
- if (T.add(V), V.setAttribute("data-element-id", L), V.disabled = u.readOnly, this._setRequired(V, this.data.required), V.type = "radio", V.name = u.fieldName, j && V.setAttribute("checked", !0), V.tabIndex = o, V.addEventListener("change", (Z) => {
- const {
- name: he,
- checked: ye
- } = Z.target;
- for (const Me of this._getElementsByName(he, L))
- U.setValue(Me.id, {
- value: !1
- });
- U.setValue(L, {
- value: ye
- });
- }), V.addEventListener("resetform", (Z) => {
- const he = u.defaultFieldValue;
- Z.target.checked = he != null && he === u.buttonValue;
- }), this.enableScripting && this.hasJSActions) {
- const Z = u.buttonValue;
- V.addEventListener("updatefromsandbox", (he) => {
- const ye = {
- value: (Me) => {
- const Re = Z === Me.detail.value;
- for (const qe of this._getElementsByName(Me.target.name)) {
- const Ie = Re && qe.id === L;
- qe.domElement && (qe.domElement.checked = Ie), U.setValue(qe.id, {
- value: Ie
- });
- }
- }
- };
- this._dispatchEventFromSandbox(ye, he);
- }), this._setEventListeners(V, null, [["change", "Validate"], ["change", "Action"], ["focus", "Focus"], ["blur", "Blur"], ["mousedown", "Mouse Down"], ["mouseenter", "Mouse Enter"], ["mouseleave", "Mouse Exit"], ["mouseup", "Mouse Up"]], (he) => he.target.checked);
- }
- return this._setBackgroundColor(V), this._setDefaultPropertiesFromJS(V), this.container.append(V), this.container;
- }
- }
- class E extends P {
- constructor(U) {
- super(U, {
- ignoreBorder: U.data.hasAppearance
- });
- }
- render() {
- const U = super.render();
- U.classList.add("buttonWidgetAnnotation", "pushButton"), this.data.alternativeText && (U.title = this.data.alternativeText);
- const u = U.lastChild;
- return this.enableScripting && this.hasJSActions && u && (this._setDefaultPropertiesFromJS(u), u.addEventListener("updatefromsandbox", (L) => {
- this._dispatchEventFromSandbox({}, L);
- })), U;
- }
- }
- class $ extends k {
- constructor(U) {
- super(U, {
- isRenderable: U.renderForms
- });
- }
- render() {
- this.container.classList.add("choiceWidgetAnnotation");
- const U = this.annotationStorage, u = this.data.id, L = U.getValue(u, {
- value: this.data.fieldValue
- }), j = document.createElement("select");
- T.add(j), j.setAttribute("data-element-id", u), j.disabled = this.data.readOnly, this._setRequired(j, this.data.required), j.name = this.data.fieldName, j.tabIndex = o;
- let V = this.data.combo && this.data.options.length > 0;
- this.data.combo || (j.size = this.data.options.length, this.data.multiSelect && (j.multiple = !0)), j.addEventListener("resetform", (Re) => {
- const qe = this.data.defaultFieldValue;
- for (const Ie of j.options)
- Ie.selected = Ie.value === qe;
- });
- for (const Re of this.data.options) {
- const qe = document.createElement("option");
- qe.textContent = Re.displayValue, qe.value = Re.exportValue, L.value.includes(Re.exportValue) && (qe.setAttribute("selected", !0), V = !1), j.append(qe);
- }
- let Z = null;
- if (V) {
- const Re = document.createElement("option");
- Re.value = " ", Re.setAttribute("hidden", !0), Re.setAttribute("selected", !0), j.prepend(Re), Z = () => {
- Re.remove(), j.removeEventListener("input", Z), Z = null;
- }, j.addEventListener("input", Z);
- }
- const he = (Re) => {
- const qe = Re ? "value" : "textContent", {
- options: Ie,
- multiple: Le
- } = j;
- return Le ? Array.prototype.filter.call(Ie, (He) => He.selected).map((He) => He[qe]) : Ie.selectedIndex === -1 ? null : Ie[Ie.selectedIndex][qe];
- };
- let ye = he(!1);
- const Me = (Re) => {
- const qe = Re.target.options;
- return Array.prototype.map.call(qe, (Ie) => ({
- displayValue: Ie.textContent,
- exportValue: Ie.value
- }));
- };
- return this.enableScripting && this.hasJSActions ? (j.addEventListener("updatefromsandbox", (Re) => {
- const qe = {
- value(Ie) {
- Z == null || Z();
- const Le = Ie.detail.value, He = new Set(Array.isArray(Le) ? Le : [Le]);
- for (const Ve of j.options)
- Ve.selected = He.has(Ve.value);
- U.setValue(u, {
- value: he(!0)
- }), ye = he(!1);
- },
- multipleSelection(Ie) {
- j.multiple = !0;
- },
- remove(Ie) {
- const Le = j.options, He = Ie.detail.remove;
- Le[He].selected = !1, j.remove(He), Le.length > 0 && Array.prototype.findIndex.call(Le, (Je) => Je.selected) === -1 && (Le[0].selected = !0), U.setValue(u, {
- value: he(!0),
- items: Me(Ie)
- }), ye = he(!1);
- },
- clear(Ie) {
- for (; j.length !== 0; )
- j.remove(0);
- U.setValue(u, {
- value: null,
- items: []
- }), ye = he(!1);
- },
- insert(Ie) {
- const {
- index: Le,
- displayValue: He,
- exportValue: Ve
- } = Ie.detail.insert, Je = j.children[Le], Ze = document.createElement("option");
- Ze.textContent = He, Ze.value = Ve, Je ? Je.before(Ze) : j.append(Ze), U.setValue(u, {
- value: he(!0),
- items: Me(Ie)
- }), ye = he(!1);
- },
- items(Ie) {
- const {
- items: Le
- } = Ie.detail;
- for (; j.length !== 0; )
- j.remove(0);
- for (const He of Le) {
- const {
- displayValue: Ve,
- exportValue: Je
- } = He, Ze = document.createElement("option");
- Ze.textContent = Ve, Ze.value = Je, j.append(Ze);
- }
- j.options.length > 0 && (j.options[0].selected = !0), U.setValue(u, {
- value: he(!0),
- items: Me(Ie)
- }), ye = he(!1);
- },
- indices(Ie) {
- const Le = new Set(Ie.detail.indices);
- for (const He of Ie.target.options)
- He.selected = Le.has(He.index);
- U.setValue(u, {
- value: he(!0)
- }), ye = he(!1);
- },
- editable(Ie) {
- Ie.target.disabled = !Ie.detail.editable;
- }
- };
- this._dispatchEventFromSandbox(qe, Re);
- }), j.addEventListener("input", (Re) => {
- var Ie;
- const qe = he(!0);
- U.setValue(u, {
- value: qe
- }), Re.preventDefault(), (Ie = this.linkService.eventBus) == null || Ie.dispatch("dispatcheventinsandbox", {
- source: this,
- detail: {
- id: u,
- name: "Keystroke",
- value: ye,
- changeEx: qe,
- willCommit: !1,
- commitKey: 1,
- keyDown: !1
- }
- });
- }), this._setEventListeners(j, null, [["focus", "Focus"], ["blur", "Blur"], ["mousedown", "Mouse Down"], ["mouseenter", "Mouse Enter"], ["mouseleave", "Mouse Exit"], ["mouseup", "Mouse Up"], ["input", "Action"], ["input", "Validate"]], (Re) => Re.target.value)) : j.addEventListener("input", function(Re) {
- U.setValue(u, {
- value: he(!0)
- });
- }), this.data.combo && this._setTextStyle(j), this._setBackgroundColor(j), this._setDefaultPropertiesFromJS(j), this.container.append(j), this.container;
- }
- }
- class M extends C {
- constructor(U) {
- const {
- data: u,
- elements: L
- } = U;
- super(U, {
- isRenderable: C._hasPopupData(u)
- }), this.elements = L;
- }
- render() {
- this.container.classList.add("popupAnnotation");
- const U = new m({
- container: this.container,
- color: this.data.color,
- titleObj: this.data.titleObj,
- modificationDate: this.data.modificationDate,
- contentsObj: this.data.contentsObj,
- richText: this.data.richText,
- rect: this.data.rect,
- parentRect: this.data.parentRect || null,
- parent: this.parent,
- elements: this.elements,
- open: this.data.open
- }), u = [];
- for (const L of this.elements)
- L.popup = U, u.push(L.data.id), L.addHighlightArea();
- return this.container.setAttribute("aria-controls", u.map((L) => `${n.AnnotationPrefix}${L}`).join(",")), this.container;
- }
- }
- class m {
- constructor({
- container: U,
- color: u,
- elements: L,
- titleObj: j,
- modificationDate: V,
- contentsObj: Z,
- richText: he,
- parent: ye,
- rect: Me,
- parentRect: Re,
- open: qe
- }) {
- W(this, Ue);
- W(this, je);
- W(this, Xe);
- W(this, Ye);
- W(this, z, null);
- W(this, ae, K(this, Ue, $n).bind(this));
- W(this, Q, K(this, Ye, cn).bind(this));
- W(this, ce, K(this, Xe, ln).bind(this));
- W(this, ue, K(this, je, Mt).bind(this));
- W(this, me, null);
- W(this, fe, null);
- W(this, Pe, null);
- W(this, Fe, null);
- W(this, Ee, null);
- W(this, De, null);
- W(this, _e, !1);
- W(this, ie, null);
- W(this, se, null);
- W(this, ge, null);
- W(this, Ce, null);
- W(this, xe, !1);
- var Le;
- oe(this, fe, U), oe(this, Ce, j), oe(this, Pe, Z), oe(this, ge, he), oe(this, Ee, ye), oe(this, me, u), oe(this, se, Me), oe(this, De, Re), oe(this, Fe, L);
- const Ie = s.PDFDateString.toDateObject(V);
- Ie && oe(this, z, ye.l10n.get("annotation_date_string", {
- date: Ie.toLocaleDateString(),
- time: Ie.toLocaleTimeString()
- })), this.trigger = L.flatMap((He) => He.getElementsToTriggerPopup());
- for (const He of this.trigger)
- He.addEventListener("click", a(this, ue)), He.addEventListener("mouseenter", a(this, ce)), He.addEventListener("mouseleave", a(this, Q)), He.classList.add("popupTriggerArea");
- for (const He of L)
- (Le = He.container) == null || Le.addEventListener("keydown", a(this, ae));
- a(this, fe).hidden = !0, qe && K(this, je, Mt).call(this);
- }
- render() {
- if (a(this, ie))
- return;
- const {
- page: {
- view: U
- },
- viewport: {
- rawDims: {
- pageWidth: u,
- pageHeight: L,
- pageX: j,
- pageY: V
- }
- }
- } = a(this, Ee), Z = oe(this, ie, document.createElement("div"));
- if (Z.className = "popup", a(this, me)) {
- const it = Z.style.outlineColor = n.Util.makeHexColor(...a(this, me));
- CSS.supports("background-color", "color-mix(in srgb, red 30%, white)") ? Z.style.backgroundColor = `color-mix(in srgb, ${it} 30%, white)` : Z.style.backgroundColor = n.Util.makeHexColor(...a(this, me).map((lt) => Math.floor(0.7 * (255 - lt) + lt)));
- }
- const he = document.createElement("span");
- he.className = "header";
- const ye = document.createElement("h1");
- if (he.append(ye), {
- dir: ye.dir,
- str: ye.textContent
- } = a(this, Ce), Z.append(he), a(this, z)) {
- const it = document.createElement("span");
- it.classList.add("popupDate"), a(this, z).then((rt) => {
- it.textContent = rt;
- }), he.append(it);
- }
- const Me = a(this, Pe), Re = a(this, ge);
- if (Re != null && Re.str && (!(Me != null && Me.str) || Me.str === Re.str))
- c.XfaLayer.render({
- xfaHtml: Re.html,
- intent: "richText",
- div: Z
- }), Z.lastChild.classList.add("richText", "popupContent");
- else {
- const it = this._formatContents(Me);
- Z.append(it);
- }
- let qe = !!a(this, De), Ie = qe ? a(this, De) : a(this, se);
- for (const it of a(this, Fe))
- if (!Ie || n.Util.intersect(it.data.rect, Ie) !== null) {
- Ie = it.data.rect, qe = !0;
- break;
- }
- const Le = n.Util.normalizeRect([Ie[0], U[3] - Ie[1] + U[1], Ie[2], U[3] - Ie[3] + U[1]]), He = 5, Ve = qe ? Ie[2] - Ie[0] + He : 0, Je = Le[0] + Ve, Ze = Le[1], {
- style: st
- } = a(this, fe);
- st.left = `${100 * (Je - j) / u}%`, st.top = `${100 * (Ze - V) / L}%`, a(this, fe).append(Z);
- }
- _formatContents({
- str: U,
- dir: u
- }) {
- const L = document.createElement("p");
- L.classList.add("popupContent"), L.dir = u;
- const j = U.split(/(?:\r\n?|\n)/);
- for (let V = 0, Z = j.length; V < Z; ++V) {
- const he = j[V];
- L.append(document.createTextNode(he)), V < Z - 1 && L.append(document.createElement("br"));
- }
- return L;
- }
- forceHide() {
- oe(this, xe, this.isVisible), a(this, xe) && (a(this, fe).hidden = !0);
- }
- maybeShow() {
- a(this, xe) && (oe(this, xe, !1), a(this, fe).hidden = !1);
- }
- get isVisible() {
- return a(this, fe).hidden === !1;
- }
- }
- z = new WeakMap(), ae = new WeakMap(), Q = new WeakMap(), ce = new WeakMap(), ue = new WeakMap(), me = new WeakMap(), fe = new WeakMap(), Pe = new WeakMap(), Fe = new WeakMap(), Ee = new WeakMap(), De = new WeakMap(), _e = new WeakMap(), ie = new WeakMap(), se = new WeakMap(), ge = new WeakMap(), Ce = new WeakMap(), xe = new WeakMap(), Ue = new WeakSet(), $n = function(U) {
- U.altKey || U.shiftKey || U.ctrlKey || U.metaKey || (U.key === "Enter" || U.key === "Escape" && a(this, _e)) && K(this, je, Mt).call(this);
- }, je = new WeakSet(), Mt = function() {
- oe(this, _e, !a(this, _e)), a(this, _e) ? (K(this, Xe, ln).call(this), a(this, fe).addEventListener("click", a(this, ue)), a(this, fe).addEventListener("keydown", a(this, ae))) : (K(this, Ye, cn).call(this), a(this, fe).removeEventListener("click", a(this, ue)), a(this, fe).removeEventListener("keydown", a(this, ae)));
- }, Xe = new WeakSet(), ln = function() {
- a(this, ie) || this.render(), this.isVisible ? a(this, _e) && a(this, fe).classList.add("focused") : (a(this, fe).hidden = !1, a(this, fe).style.zIndex = parseInt(a(this, fe).style.zIndex) + 1e3);
- }, Ye = new WeakSet(), cn = function() {
- a(this, fe).classList.remove("focused"), !(a(this, _e) || !this.isVisible) && (a(this, fe).hidden = !0, a(this, fe).style.zIndex = parseInt(a(this, fe).style.zIndex) - 1e3);
- };
- class N extends C {
- constructor(U) {
- super(U, {
- isRenderable: !0,
- ignoreBorder: !0
- }), this.textContent = U.data.textContent, this.textPosition = U.data.textPosition, this.annotationEditorType = n.AnnotationEditorType.FREETEXT;
- }
- render() {
- if (this.container.classList.add("freeTextAnnotation"), this.textContent) {
- const U = document.createElement("div");
- U.classList.add("annotationTextContent"), U.setAttribute("role", "comment");
- for (const u of this.textContent) {
- const L = document.createElement("span");
- L.textContent = u, U.append(L);
- }
- this.container.append(U);
- }
- return !this.data.popupRef && this.hasPopupData && this._createPopup(), this._editOnDoubleClick(), this.container;
- }
- }
- e.FreeTextAnnotationElement = N;
- class D extends C {
- constructor(u) {
- super(u, {
- isRenderable: !0,
- ignoreBorder: !0
- });
- W(this, ne, null);
- }
- render() {
- this.container.classList.add("lineAnnotation");
- const u = this.data, {
- width: L,
- height: j
- } = S(u.rect), V = this.svgFactory.create(L, j, !0), Z = oe(this, ne, this.svgFactory.createElement("svg:line"));
- return Z.setAttribute("x1", u.rect[2] - u.lineCoordinates[0]), Z.setAttribute("y1", u.rect[3] - u.lineCoordinates[1]), Z.setAttribute("x2", u.rect[2] - u.lineCoordinates[2]), Z.setAttribute("y2", u.rect[3] - u.lineCoordinates[3]), Z.setAttribute("stroke-width", u.borderStyle.width || 1), Z.setAttribute("stroke", "transparent"), Z.setAttribute("fill", "transparent"), V.append(Z), this.container.append(V), !u.popupRef && this.hasPopupData && this._createPopup(), this.container;
- }
- getElementsToTriggerPopup() {
- return a(this, ne);
- }
- addHighlightArea() {
- this.container.classList.add("highlightArea");
- }
- }
- ne = new WeakMap();
- class X extends C {
- constructor(u) {
- super(u, {
- isRenderable: !0,
- ignoreBorder: !0
- });
- W(this, J, null);
- }
- render() {
- this.container.classList.add("squareAnnotation");
- const u = this.data, {
- width: L,
- height: j
- } = S(u.rect), V = this.svgFactory.create(L, j, !0), Z = u.borderStyle.width, he = oe(this, J, this.svgFactory.createElement("svg:rect"));
- return he.setAttribute("x", Z / 2), he.setAttribute("y", Z / 2), he.setAttribute("width", L - Z), he.setAttribute("height", j - Z), he.setAttribute("stroke-width", Z || 1), he.setAttribute("stroke", "transparent"), he.setAttribute("fill", "transparent"), V.append(he), this.container.append(V), !u.popupRef && this.hasPopupData && this._createPopup(), this.container;
- }
- getElementsToTriggerPopup() {
- return a(this, J);
- }
- addHighlightArea() {
- this.container.classList.add("highlightArea");
- }
- }
- J = new WeakMap();
- class G extends C {
- constructor(u) {
- super(u, {
- isRenderable: !0,
- ignoreBorder: !0
- });
- W(this, ve, null);
- }
- render() {
- this.container.classList.add("circleAnnotation");
- const u = this.data, {
- width: L,
- height: j
- } = S(u.rect), V = this.svgFactory.create(L, j, !0), Z = u.borderStyle.width, he = oe(this, ve, this.svgFactory.createElement("svg:ellipse"));
- return he.setAttribute("cx", L / 2), he.setAttribute("cy", j / 2), he.setAttribute("rx", L / 2 - Z / 2), he.setAttribute("ry", j / 2 - Z / 2), he.setAttribute("stroke-width", Z || 1), he.setAttribute("stroke", "transparent"), he.setAttribute("fill", "transparent"), V.append(he), this.container.append(V), !u.popupRef && this.hasPopupData && this._createPopup(), this.container;
- }
- getElementsToTriggerPopup() {
- return a(this, ve);
- }
- addHighlightArea() {
- this.container.classList.add("highlightArea");
- }
- }
- ve = new WeakMap();
- class I extends C {
- constructor(u) {
- super(u, {
- isRenderable: !0,
- ignoreBorder: !0
- });
- W(this, Se, null);
- this.containerClassName = "polylineAnnotation", this.svgElementName = "svg:polyline";
- }
- render() {
- this.container.classList.add(this.containerClassName);
- const u = this.data, {
- width: L,
- height: j
- } = S(u.rect), V = this.svgFactory.create(L, j, !0);
- let Z = [];
- for (const ye of u.vertices) {
- const Me = ye.x - u.rect[0], Re = u.rect[3] - ye.y;
- Z.push(Me + "," + Re);
- }
- Z = Z.join(" ");
- const he = oe(this, Se, this.svgFactory.createElement(this.svgElementName));
- return he.setAttribute("points", Z), he.setAttribute("stroke-width", u.borderStyle.width || 1), he.setAttribute("stroke", "transparent"), he.setAttribute("fill", "transparent"), V.append(he), this.container.append(V), !u.popupRef && this.hasPopupData && this._createPopup(), this.container;
- }
- getElementsToTriggerPopup() {
- return a(this, Se);
- }
- addHighlightArea() {
- this.container.classList.add("highlightArea");
- }
- }
- Se = new WeakMap();
- class B extends I {
- constructor(U) {
- super(U), this.containerClassName = "polygonAnnotation", this.svgElementName = "svg:polygon";
- }
- }
- class ee extends C {
- constructor(U) {
- super(U, {
- isRenderable: !0,
- ignoreBorder: !0
- });
- }
- render() {
- return this.container.classList.add("caretAnnotation"), !this.data.popupRef && this.hasPopupData && this._createPopup(), this.container;
- }
- }
- class Y extends C {
- constructor(u) {
- super(u, {
- isRenderable: !0,
- ignoreBorder: !0
- });
- W(this, tt, []);
- this.containerClassName = "inkAnnotation", this.svgElementName = "svg:polyline", this.annotationEditorType = n.AnnotationEditorType.INK;
- }
- render() {
- this.container.classList.add(this.containerClassName);
- const u = this.data, {
- width: L,
- height: j
- } = S(u.rect), V = this.svgFactory.create(L, j, !0);
- for (const Z of u.inkLists) {
- let he = [];
- for (const Me of Z) {
- const Re = Me.x - u.rect[0], qe = u.rect[3] - Me.y;
- he.push(`${Re},${qe}`);
- }
- he = he.join(" ");
- const ye = this.svgFactory.createElement(this.svgElementName);
- a(this, tt).push(ye), ye.setAttribute("points", he), ye.setAttribute("stroke-width", u.borderStyle.width || 1), ye.setAttribute("stroke", "transparent"), ye.setAttribute("fill", "transparent"), !u.popupRef && this.hasPopupData && this._createPopup(), V.append(ye);
- }
- return this.container.append(V), this.container;
- }
- getElementsToTriggerPopup() {
- return a(this, tt);
- }
- addHighlightArea() {
- this.container.classList.add("highlightArea");
- }
- }
- tt = new WeakMap(), e.InkAnnotationElement = Y;
- class q extends C {
- constructor(U) {
- super(U, {
- isRenderable: !0,
- ignoreBorder: !0,
- createQuadrilaterals: !0
- });
- }
- render() {
- return !this.data.popupRef && this.hasPopupData && this._createPopup(), this.container.classList.add("highlightAnnotation"), this.container;
- }
- }
- class le extends C {
- constructor(U) {
- super(U, {
- isRenderable: !0,
- ignoreBorder: !0,
- createQuadrilaterals: !0
- });
- }
- render() {
- return !this.data.popupRef && this.hasPopupData && this._createPopup(), this.container.classList.add("underlineAnnotation"), this.container;
- }
- }
- class pe extends C {
- constructor(U) {
- super(U, {
- isRenderable: !0,
- ignoreBorder: !0,
- createQuadrilaterals: !0
- });
- }
- render() {
- return !this.data.popupRef && this.hasPopupData && this._createPopup(), this.container.classList.add("squigglyAnnotation"), this.container;
- }
- }
- class we extends C {
- constructor(U) {
- super(U, {
- isRenderable: !0,
- ignoreBorder: !0,
- createQuadrilaterals: !0
- });
- }
- render() {
- return !this.data.popupRef && this.hasPopupData && this._createPopup(), this.container.classList.add("strikeoutAnnotation"), this.container;
- }
- }
- class be extends C {
- constructor(U) {
- super(U, {
- isRenderable: !0,
- ignoreBorder: !0
- });
- }
- render() {
- return this.container.classList.add("stampAnnotation"), !this.data.popupRef && this.hasPopupData && this._createPopup(), this.container;
- }
- }
- e.StampAnnotationElement = be;
- class R extends C {
- constructor(u) {
- var V;
- super(u, {
- isRenderable: !0
- });
- W(this, te);
- W(this, et, null);
- const {
- filename: L,
- content: j
- } = this.data.file;
- this.filename = (0, s.getFilenameFromUrl)(L, !0), this.content = j, (V = this.linkService.eventBus) == null || V.dispatch("fileattachmentannotation", {
- source: this,
- filename: L,
- content: j
- });
- }
- render() {
- this.container.classList.add("fileAttachmentAnnotation");
- const {
- container: u,
- data: L
- } = this;
- let j;
- L.hasAppearance || L.fillAlpha === 0 ? j = document.createElement("div") : (j = document.createElement("img"), j.src = `${this.imageResourcesPath}annotation-${/paperclip/i.test(L.name) ? "paperclip" : "pushpin"}.svg`, L.fillAlpha && L.fillAlpha < 1 && (j.style = `filter: opacity(${Math.round(L.fillAlpha * 100)}%);`)), j.addEventListener("dblclick", K(this, te, hn).bind(this)), oe(this, et, j);
- const {
- isMac: V
- } = n.FeatureTest.platform;
- return u.addEventListener("keydown", (Z) => {
- Z.key === "Enter" && (V ? Z.metaKey : Z.ctrlKey) && K(this, te, hn).call(this);
- }), !L.popupRef && this.hasPopupData ? this._createPopup() : j.classList.add("popupTriggerArea"), u.append(j), u;
- }
- getElementsToTriggerPopup() {
- return a(this, et);
- }
- addHighlightArea() {
- this.container.classList.add("highlightArea");
- }
- }
- et = new WeakMap(), te = new WeakSet(), hn = function() {
- var u;
- (u = this.downloadManager) == null || u.openOrDownloadData(this.container, this.content, this.filename);
- };
- class d {
- constructor({
- div: U,
- accessibilityManager: u,
- annotationCanvasMap: L,
- l10n: j,
- page: V,
- viewport: Z
- }) {
- W(this, Be);
- W(this, Ae);
- W(this, Ne, null);
- W(this, ke, null);
- W(this, $e, /* @__PURE__ */ new Map());
- this.div = U, oe(this, Ne, u), oe(this, ke, L), this.l10n = j, this.page = V, this.viewport = Z, this.zIndex = 0, this.l10n || (this.l10n = _.NullL10n);
- }
- async render(U) {
- const {
- annotations: u
- } = U, L = this.div;
- (0, s.setLayerDimensions)(L, this.viewport);
- const j = /* @__PURE__ */ new Map(), V = {
- data: null,
- layer: L,
- linkService: U.linkService,
- downloadManager: U.downloadManager,
- imageResourcesPath: U.imageResourcesPath || "",
- renderForms: U.renderForms !== !1,
- svgFactory: new s.DOMSVGFactory(),
- annotationStorage: U.annotationStorage || new l.AnnotationStorage(),
- enableScripting: U.enableScripting === !0,
- hasJSActions: U.hasJSActions,
- fieldObjects: U.fieldObjects,
- parent: this,
- elements: null
- };
- for (const Z of u) {
- if (Z.noHTML)
- continue;
- const he = Z.annotationType === n.AnnotationType.POPUP;
- if (he) {
- const Re = j.get(Z.id);
- if (!Re)
- continue;
- V.elements = Re;
- } else {
- const {
- width: Re,
- height: qe
- } = S(Z.rect);
- if (Re <= 0 || qe <= 0)
- continue;
- }
- V.data = Z;
- const ye = w.create(V);
- if (!ye.isRenderable)
- continue;
- if (!he && Z.popupRef) {
- const Re = j.get(Z.popupRef);
- Re ? Re.push(ye) : j.set(Z.popupRef, [ye]);
- }
- ye.annotationEditorType > 0 && a(this, $e).set(ye.data.id, ye);
- const Me = ye.render();
- Z.hidden && (Me.style.visibility = "hidden"), K(this, Be, Bn).call(this, Me, Z.id);
- }
- K(this, Ae, un).call(this), await this.l10n.translate(L);
- }
- update({
- viewport: U
- }) {
- const u = this.div;
- this.viewport = U, (0, s.setLayerDimensions)(u, {
- rotation: U.rotation
- }), K(this, Ae, un).call(this), u.hidden = !1;
- }
- getEditableAnnotations() {
- return Array.from(a(this, $e).values());
- }
- getEditableAnnotation(U) {
- return a(this, $e).get(U);
- }
- }
- Ne = new WeakMap(), ke = new WeakMap(), $e = new WeakMap(), Be = new WeakSet(), Bn = function(U, u) {
- var j;
- const L = U.firstChild || U;
- L.id = `${n.AnnotationPrefix}${u}`, this.div.append(U), (j = a(this, Ne)) == null || j.moveElementInDOM(this.div, U, L, !1);
- }, Ae = new WeakSet(), un = function() {
- if (!a(this, ke))
- return;
- const U = this.div;
- for (const [u, L] of a(this, ke)) {
- const j = U.querySelector(`[data-annotation-id="${u}"]`);
- if (!j)
- continue;
- const {
- firstChild: V
- } = j;
- V ? V.nodeName === "CANVAS" ? V.replaceWith(L) : V.before(L) : j.append(L);
- }
- a(this, ke).clear();
- }, e.AnnotationLayer = d;
- },
- /* 30 */
- /***/
- (t, e) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.ColorConverters = void 0;
- function i(l) {
- return Math.floor(Math.max(0, Math.min(1, l)) * 255).toString(16).padStart(2, "0");
- }
- function n(l) {
- return Math.max(0, Math.min(255, 255 * l));
- }
- class s {
- static CMYK_G([h, _, c, o]) {
- return ["G", 1 - Math.min(1, 0.3 * h + 0.59 * c + 0.11 * _ + o)];
- }
- static G_CMYK([h]) {
- return ["CMYK", 0, 0, 0, 1 - h];
- }
- static G_RGB([h]) {
- return ["RGB", h, h, h];
- }
- static G_rgb([h]) {
- return h = n(h), [h, h, h];
- }
- static G_HTML([h]) {
- const _ = i(h);
- return `#${_}${_}${_}`;
- }
- static RGB_G([h, _, c]) {
- return ["G", 0.3 * h + 0.59 * _ + 0.11 * c];
- }
- static RGB_rgb(h) {
- return h.map(n);
- }
- static RGB_HTML(h) {
- return `#${h.map(i).join("")}`;
- }
- static T_HTML() {
- return "#00000000";
- }
- static T_rgb() {
- return [null];
- }
- static CMYK_RGB([h, _, c, o]) {
- return ["RGB", 1 - Math.min(1, h + o), 1 - Math.min(1, c + o), 1 - Math.min(1, _ + o)];
- }
- static CMYK_rgb([h, _, c, o]) {
- return [n(1 - Math.min(1, h + o)), n(1 - Math.min(1, c + o)), n(1 - Math.min(1, _ + o))];
- }
- static CMYK_HTML(h) {
- const _ = this.CMYK_RGB(h).slice(1);
- return this.RGB_HTML(_);
- }
- static RGB_CMYK([h, _, c]) {
- const o = 1 - h, r = 1 - _, T = 1 - c, S = Math.min(o, r, T);
- return ["CMYK", o, r, T, S];
- }
- }
- e.ColorConverters = s;
- },
- /* 31 */
- /***/
- (t, e) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.NullL10n = void 0, e.getL10nFallback = n;
- const i = {
- of_pages: "of {{pagesCount}}",
- page_of_pages: "({{pageNumber}} of {{pagesCount}})",
- document_properties_kb: "{{size_kb}} KB ({{size_b}} bytes)",
- document_properties_mb: "{{size_mb}} MB ({{size_b}} bytes)",
- document_properties_date_string: "{{date}}, {{time}}",
- document_properties_page_size_unit_inches: "in",
- document_properties_page_size_unit_millimeters: "mm",
- document_properties_page_size_orientation_portrait: "portrait",
- document_properties_page_size_orientation_landscape: "landscape",
- document_properties_page_size_name_a3: "A3",
- document_properties_page_size_name_a4: "A4",
- document_properties_page_size_name_letter: "Letter",
- document_properties_page_size_name_legal: "Legal",
- document_properties_page_size_dimension_string: "{{width}} × {{height}} {{unit}} ({{orientation}})",
- document_properties_page_size_dimension_name_string: "{{width}} × {{height}} {{unit}} ({{name}}, {{orientation}})",
- document_properties_linearized_yes: "Yes",
- document_properties_linearized_no: "No",
- additional_layers: "Additional Layers",
- page_landmark: "Page {{page}}",
- thumb_page_title: "Page {{page}}",
- thumb_page_canvas: "Thumbnail of Page {{page}}",
- find_reached_top: "Reached top of document, continued from bottom",
- find_reached_bottom: "Reached end of document, continued from top",
- "find_match_count[one]": "{{current}} of {{total}} match",
- "find_match_count[other]": "{{current}} of {{total}} matches",
- "find_match_count_limit[one]": "More than {{limit}} match",
- "find_match_count_limit[other]": "More than {{limit}} matches",
- find_not_found: "Phrase not found",
- page_scale_width: "Page Width",
- page_scale_fit: "Page Fit",
- page_scale_auto: "Automatic Zoom",
- page_scale_actual: "Actual Size",
- page_scale_percent: "{{scale}}%",
- loading_error: "An error occurred while loading the PDF.",
- invalid_file_error: "Invalid or corrupted PDF file.",
- missing_file_error: "Missing PDF file.",
- unexpected_response_error: "Unexpected server response.",
- rendering_error: "An error occurred while rendering the page.",
- annotation_date_string: "{{date}}, {{time}}",
- printing_not_supported: "Warning: Printing is not fully supported by this browser.",
- printing_not_ready: "Warning: The PDF is not fully loaded for printing.",
- web_fonts_disabled: "Web fonts are disabled: unable to use embedded PDF fonts.",
- free_text2_default_content: "Start typing…",
- editor_free_text2_aria_label: "Text Editor",
- editor_ink2_aria_label: "Draw Editor",
- editor_ink_canvas_aria_label: "User-created image",
- editor_alt_text_button_label: "Alt text",
- editor_alt_text_edit_button_label: "Edit alt text",
- editor_alt_text_decorative_tooltip: "Marked as decorative"
- };
- i.print_progress_percent = "{{progress}}%";
- function n(h, _) {
- switch (h) {
- case "find_match_count":
- h = `find_match_count[${_.total === 1 ? "one" : "other"}]`;
- break;
- case "find_match_count_limit":
- h = `find_match_count_limit[${_.limit === 1 ? "one" : "other"}]`;
- break;
- }
- return i[h] || "";
- }
- function s(h, _) {
- return _ ? h.replaceAll(/\{\{\s*(\w+)\s*\}\}/g, (c, o) => o in _ ? _[o] : "{{" + o + "}}") : h;
- }
- const l = {
- async getLanguage() {
- return "en-us";
- },
- async getDirection() {
- return "ltr";
- },
- async get(h, _ = null, c = n(h, _)) {
- return s(c, _);
- },
- async translate(h) {
- }
- };
- e.NullL10n = l;
- },
- /* 32 */
- /***/
- (t, e, i) => {
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.XfaLayer = void 0;
- var n = i(25);
- class s {
- static setupStorage(h, _, c, o, r) {
- const T = o.getValue(_, {
- value: null
- });
- switch (c.name) {
- case "textarea":
- if (T.value !== null && (h.textContent = T.value), r === "print")
- break;
- h.addEventListener("input", (S) => {
- o.setValue(_, {
- value: S.target.value
- });
- });
- break;
- case "input":
- if (c.attributes.type === "radio" || c.attributes.type === "checkbox") {
- if (T.value === c.attributes.xfaOn ? h.setAttribute("checked", !0) : T.value === c.attributes.xfaOff && h.removeAttribute("checked"), r === "print")
- break;
- h.addEventListener("change", (S) => {
- o.setValue(_, {
- value: S.target.checked ? S.target.getAttribute("xfaOn") : S.target.getAttribute("xfaOff")
- });
- });
- } else {
- if (T.value !== null && h.setAttribute("value", T.value), r === "print")
- break;
- h.addEventListener("input", (S) => {
- o.setValue(_, {
- value: S.target.value
- });
- });
- }
- break;
- case "select":
- if (T.value !== null) {
- h.setAttribute("value", T.value);
- for (const S of c.children)
- S.attributes.value === T.value ? S.attributes.selected = !0 : S.attributes.hasOwnProperty("selected") && delete S.attributes.selected;
- }
- h.addEventListener("input", (S) => {
- const w = S.target.options, C = w.selectedIndex === -1 ? "" : w[w.selectedIndex].value;
- o.setValue(_, {
- value: C
- });
- });
- break;
- }
- }
- static setAttributes({
- html: h,
- element: _,
- storage: c = null,
- intent: o,
- linkService: r
- }) {
- const {
- attributes: T
- } = _, S = h instanceof HTMLAnchorElement;
- T.type === "radio" && (T.name = `${T.name}-${o}`);
- for (const [w, C] of Object.entries(T))
- if (C != null)
- switch (w) {
- case "class":
- C.length && h.setAttribute(w, C.join(" "));
- break;
- case "dataId":
- break;
- case "id":
- h.setAttribute("data-element-id", C);
- break;
- case "style":
- Object.assign(h.style, C);
- break;
- case "textContent":
- h.textContent = C;
- break;
- default:
- (!S || w !== "href" && w !== "newWindow") && h.setAttribute(w, C);
- }
- S && r.addLinkAttributes(h, T.href, T.newWindow), c && T.dataId && this.setupStorage(h, T.dataId, _, c);
- }
- static render(h) {
- var P;
- const _ = h.annotationStorage, c = h.linkService, o = h.xfaHtml, r = h.intent || "display", T = document.createElement(o.name);
- o.attributes && this.setAttributes({
- html: T,
- element: o,
- intent: r,
- linkService: c
- });
- const S = [[o, -1, T]], w = h.div;
- if (w.append(T), h.viewport) {
- const b = `matrix(${h.viewport.transform.join(",")})`;
- w.style.transform = b;
- }
- r !== "richText" && w.setAttribute("class", "xfaLayer xfaFont");
- const C = [];
- for (; S.length > 0; ) {
- const [b, k, F] = S.at(-1);
- if (k + 1 === b.children.length) {
- S.pop();
- continue;
- }
- const x = b.children[++S.at(-1)[1]];
- if (x === null)
- continue;
- const {
- name: y
- } = x;
- if (y === "#text") {
- const E = document.createTextNode(x.value);
- C.push(E), F.append(E);
- continue;
- }
- const p = (P = x == null ? void 0 : x.attributes) != null && P.xmlns ? document.createElementNS(x.attributes.xmlns, y) : document.createElement(y);
- if (F.append(p), x.attributes && this.setAttributes({
- html: p,
- element: x,
- storage: _,
- intent: r,
- linkService: c
- }), x.children && x.children.length > 0)
- S.push([x, -1, p]);
- else if (x.value) {
- const E = document.createTextNode(x.value);
- n.XfaText.shouldBuildText(y) && C.push(E), p.append(E);
- }
- }
- for (const b of w.querySelectorAll(".xfaNonInteractive input, .xfaNonInteractive textarea"))
- b.setAttribute("readOnly", !0);
- return {
- textDivs: C
- };
- }
- static update(h) {
- const _ = `matrix(${h.viewport.transform.join(",")})`;
- h.div.style.transform = _, h.div.hidden = !1;
- }
- }
- e.XfaLayer = s;
- },
- /* 33 */
- /***/
- (t, e, i) => {
- var o, r, T, S, w, C, P, b, k, F, x, y, p, E, $, Hn, m, Un, D, jn, G, Gn, B, dn, Y, Wn, le, pn, we, qn, R, zn, g, Xn, v, Vn, O, Yn, z, ot, Q, gn, ue, Ft, fe, It, Fe, pt, De, _n, ie, Lt, ge, Kn, xe, mn, We, Jn, ze, Qn, Ge, bn, de, Dt, J, gt;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.InkEditor = void 0;
- var n = i(1), s = i(4), l = i(29), h = i(6), _ = i(5);
- const Se = class Se extends s.AnnotationEditor {
- constructor(te) {
- super({
- ...te,
- name: "inkEditor"
- });
- W(this, $);
- W(this, m);
- W(this, D);
- W(this, G);
- W(this, B);
- W(this, Y);
- W(this, le);
- W(this, we);
- W(this, R);
- W(this, g);
- W(this, v);
- W(this, O);
- W(this, z);
- W(this, Q);
- W(this, ue);
- W(this, fe);
- W(this, Fe);
- W(this, De);
- W(this, ie);
- W(this, ze);
- W(this, Ge);
- W(this, de);
- W(this, J);
- W(this, o, 0);
- W(this, r, 0);
- W(this, T, this.canvasPointermove.bind(this));
- W(this, S, this.canvasPointerleave.bind(this));
- W(this, w, this.canvasPointerup.bind(this));
- W(this, C, this.canvasPointerdown.bind(this));
- W(this, P, new Path2D());
- W(this, b, !1);
- W(this, k, !1);
- W(this, F, !1);
- W(this, x, null);
- W(this, y, 0);
- W(this, p, 0);
- W(this, E, null);
- this.color = te.color || null, this.thickness = te.thickness || null, this.opacity = te.opacity || null, this.paths = [], this.bezierPath2D = [], this.allRawPaths = [], this.currentPath = [], this.scaleFactor = 1, this.translationX = this.translationY = 0, this.x = 0, this.y = 0, this._willKeepAspectRatio = !0;
- }
- static initialize(te) {
- s.AnnotationEditor.initialize(te, {
- strings: ["editor_ink_canvas_aria_label", "editor_ink2_aria_label"]
- });
- }
- static updateDefaultParams(te, Te) {
- switch (te) {
- case n.AnnotationEditorParamsType.INK_THICKNESS:
- Se._defaultThickness = Te;
- break;
- case n.AnnotationEditorParamsType.INK_COLOR:
- Se._defaultColor = Te;
- break;
- case n.AnnotationEditorParamsType.INK_OPACITY:
- Se._defaultOpacity = Te / 100;
- break;
- }
- }
- updateParams(te, Te) {
- switch (te) {
- case n.AnnotationEditorParamsType.INK_THICKNESS:
- K(this, $, Hn).call(this, Te);
- break;
- case n.AnnotationEditorParamsType.INK_COLOR:
- K(this, m, Un).call(this, Te);
- break;
- case n.AnnotationEditorParamsType.INK_OPACITY:
- K(this, D, jn).call(this, Te);
- break;
- }
- }
- static get defaultPropertiesToUpdate() {
- return [[n.AnnotationEditorParamsType.INK_THICKNESS, Se._defaultThickness], [n.AnnotationEditorParamsType.INK_COLOR, Se._defaultColor || s.AnnotationEditor._defaultLineColor], [n.AnnotationEditorParamsType.INK_OPACITY, Math.round(Se._defaultOpacity * 100)]];
- }
- get propertiesToUpdate() {
- return [[n.AnnotationEditorParamsType.INK_THICKNESS, this.thickness || Se._defaultThickness], [n.AnnotationEditorParamsType.INK_COLOR, this.color || Se._defaultColor || s.AnnotationEditor._defaultLineColor], [n.AnnotationEditorParamsType.INK_OPACITY, Math.round(100 * (this.opacity ?? Se._defaultOpacity))]];
- }
- rebuild() {
- this.parent && (super.rebuild(), this.div !== null && (this.canvas || (K(this, ue, Ft).call(this), K(this, fe, It).call(this)), this.isAttachedToDOM || (this.parent.add(this), K(this, Fe, pt).call(this)), K(this, J, gt).call(this)));
- }
- remove() {
- this.canvas !== null && (this.isEmpty() || this.commit(), this.canvas.width = this.canvas.height = 0, this.canvas.remove(), this.canvas = null, a(this, x).disconnect(), oe(this, x, null), super.remove());
- }
- setParent(te) {
- !this.parent && te ? this._uiManager.removeShouldRescale(this) : this.parent && te === null && this._uiManager.addShouldRescale(this), super.setParent(te);
- }
- onScaleChanging() {
- const [te, Te] = this.parentDimensions, Ne = this.width * te, ke = this.height * Te;
- this.setDimensions(Ne, ke);
- }
- enableEditMode() {
- a(this, b) || this.canvas === null || (super.enableEditMode(), this._isDraggable = !1, this.canvas.addEventListener("pointerdown", a(this, C)));
- }
- disableEditMode() {
- !this.isInEditMode() || this.canvas === null || (super.disableEditMode(), this._isDraggable = !this.isEmpty(), this.div.classList.remove("editing"), this.canvas.removeEventListener("pointerdown", a(this, C)));
- }
- onceAdded() {
- this._isDraggable = !this.isEmpty();
- }
- isEmpty() {
- return this.paths.length === 0 || this.paths.length === 1 && this.paths[0].length === 0;
- }
- commit() {
- a(this, b) || (super.commit(), this.isEditing = !1, this.disableEditMode(), this.setInForeground(), oe(this, b, !0), this.div.classList.add("disabled"), K(this, J, gt).call(this, !0), this.makeResizable(), this.parent.addInkEditorIfNeeded(!0), this.moveInDOM(), this.div.focus({
- preventScroll: !0
- }));
- }
- focusin(te) {
- this._focusEventsAllowed && (super.focusin(te), this.enableEditMode());
- }
- canvasPointerdown(te) {
- te.button !== 0 || !this.isInEditMode() || a(this, b) || (this.setInForeground(), te.preventDefault(), te.type !== "mouse" && this.div.focus(), K(this, Y, Wn).call(this, te.offsetX, te.offsetY));
- }
- canvasPointermove(te) {
- te.preventDefault(), K(this, le, pn).call(this, te.offsetX, te.offsetY);
- }
- canvasPointerup(te) {
- te.preventDefault(), K(this, Q, gn).call(this, te);
- }
- canvasPointerleave(te) {
- K(this, Q, gn).call(this, te);
- }
- get isResizable() {
- return !this.isEmpty() && a(this, b);
- }
- render() {
- if (this.div)
- return this.div;
- let te, Te;
- this.width && (te = this.x, Te = this.y), super.render(), s.AnnotationEditor._l10nPromise.get("editor_ink2_aria_label").then((Qe) => {
- var Ae;
- return (Ae = this.div) == null ? void 0 : Ae.setAttribute("aria-label", Qe);
- });
- const [Ne, ke, $e, Be] = K(this, G, Gn).call(this);
- if (this.setAt(Ne, ke, 0, 0), this.setDims($e, Be), K(this, ue, Ft).call(this), this.width) {
- const [Qe, Ae] = this.parentDimensions;
- this.setAspectRatio(this.width * Qe, this.height * Ae), this.setAt(te * Qe, Te * Ae, this.width * Qe, this.height * Ae), oe(this, F, !0), K(this, Fe, pt).call(this), this.setDims(this.width * Qe, this.height * Ae), K(this, z, ot).call(this), this.div.classList.add("disabled");
- } else
- this.div.classList.add("editing"), this.enableEditMode();
- return K(this, fe, It).call(this), this.div;
- }
- setDimensions(te, Te) {
- const Ne = Math.round(te), ke = Math.round(Te);
- if (a(this, y) === Ne && a(this, p) === ke)
- return;
- oe(this, y, Ne), oe(this, p, ke), this.canvas.style.visibility = "hidden";
- const [$e, Be] = this.parentDimensions;
- this.width = te / $e, this.height = Te / Be, this.fixAndSetPosition(), a(this, b) && K(this, De, _n).call(this, te, Te), K(this, Fe, pt).call(this), K(this, z, ot).call(this), this.canvas.style.visibility = "visible", this.fixDims();
- }
- static deserialize(te, Te, Ne) {
- var V, Z, he;
- if (te instanceof l.InkAnnotationElement)
- return null;
- const ke = super.deserialize(te, Te, Ne);
- ke.thickness = te.thickness, ke.color = n.Util.makeHexColor(...te.color), ke.opacity = te.opacity;
- const [$e, Be] = ke.pageDimensions, Qe = ke.width * $e, Ae = ke.height * Be, Ke = ke.parentScale, Oe = te.thickness / 2;
- oe(ke, b, !0), oe(ke, y, Math.round(Qe)), oe(ke, p, Math.round(Ae));
- const {
- paths: U,
- rect: u,
- rotation: L
- } = te;
- for (let {
- bezier: ye
- } of U) {
- ye = K(V = Se, We, Jn).call(V, ye, u, L);
- const Me = [];
- ke.paths.push(Me);
- let Re = Ke * (ye[0] - Oe), qe = Ke * (ye[1] - Oe);
- for (let Le = 2, He = ye.length; Le < He; Le += 6) {
- const Ve = Ke * (ye[Le] - Oe), Je = Ke * (ye[Le + 1] - Oe), Ze = Ke * (ye[Le + 2] - Oe), st = Ke * (ye[Le + 3] - Oe), it = Ke * (ye[Le + 4] - Oe), rt = Ke * (ye[Le + 5] - Oe);
- Me.push([[Re, qe], [Ve, Je], [Ze, st], [it, rt]]), Re = it, qe = rt;
- }
- const Ie = K(this, ge, Kn).call(this, Me);
- ke.bezierPath2D.push(Ie);
- }
- const j = K(Z = ke, Ge, bn).call(Z);
- return oe(ke, r, Math.max(s.AnnotationEditor.MIN_SIZE, j[2] - j[0])), oe(ke, o, Math.max(s.AnnotationEditor.MIN_SIZE, j[3] - j[1])), K(he = ke, De, _n).call(he, Qe, Ae), ke;
- }
- serialize() {
- if (this.isEmpty())
- return null;
- const te = this.getRect(0, 0), Te = s.AnnotationEditor._colorManager.convert(this.ctx.strokeStyle);
- return {
- annotationType: n.AnnotationEditorType.INK,
- color: Te,
- thickness: this.thickness,
- opacity: this.opacity,
- paths: K(this, ze, Qn).call(this, this.scaleFactor / this.parentScale, this.translationX, this.translationY, te),
- pageIndex: this.pageIndex,
- rect: te,
- rotation: this.rotation,
- structTreeParentId: this._structTreeParentId
- };
- }
- };
- o = new WeakMap(), r = new WeakMap(), T = new WeakMap(), S = new WeakMap(), w = new WeakMap(), C = new WeakMap(), P = new WeakMap(), b = new WeakMap(), k = new WeakMap(), F = new WeakMap(), x = new WeakMap(), y = new WeakMap(), p = new WeakMap(), E = new WeakMap(), $ = new WeakSet(), Hn = function(te) {
- const Te = this.thickness;
- this.addCommands({
- cmd: () => {
- this.thickness = te, K(this, J, gt).call(this);
- },
- undo: () => {
- this.thickness = Te, K(this, J, gt).call(this);
- },
- mustExec: !0,
- type: n.AnnotationEditorParamsType.INK_THICKNESS,
- overwriteIfSameType: !0,
- keepUndo: !0
- });
- }, m = new WeakSet(), Un = function(te) {
- const Te = this.color;
- this.addCommands({
- cmd: () => {
- this.color = te, K(this, z, ot).call(this);
- },
- undo: () => {
- this.color = Te, K(this, z, ot).call(this);
- },
- mustExec: !0,
- type: n.AnnotationEditorParamsType.INK_COLOR,
- overwriteIfSameType: !0,
- keepUndo: !0
- });
- }, D = new WeakSet(), jn = function(te) {
- te /= 100;
- const Te = this.opacity;
- this.addCommands({
- cmd: () => {
- this.opacity = te, K(this, z, ot).call(this);
- },
- undo: () => {
- this.opacity = Te, K(this, z, ot).call(this);
- },
- mustExec: !0,
- type: n.AnnotationEditorParamsType.INK_OPACITY,
- overwriteIfSameType: !0,
- keepUndo: !0
- });
- }, G = new WeakSet(), Gn = function() {
- const {
- parentRotation: te,
- parentDimensions: [Te, Ne]
- } = this;
- switch (te) {
- case 90:
- return [0, Ne, Ne, Te];
- case 180:
- return [Te, Ne, Te, Ne];
- case 270:
- return [Te, 0, Ne, Te];
- default:
- return [0, 0, Te, Ne];
- }
- }, B = new WeakSet(), dn = function() {
- const {
- ctx: te,
- color: Te,
- opacity: Ne,
- thickness: ke,
- parentScale: $e,
- scaleFactor: Be
- } = this;
- te.lineWidth = ke * $e / Be, te.lineCap = "round", te.lineJoin = "round", te.miterLimit = 10, te.strokeStyle = `${Te}${(0, _.opacityToHex)(Ne)}`;
- }, Y = new WeakSet(), Wn = function(te, Te) {
- this.canvas.addEventListener("contextmenu", h.noContextMenu), this.canvas.addEventListener("pointerleave", a(this, S)), this.canvas.addEventListener("pointermove", a(this, T)), this.canvas.addEventListener("pointerup", a(this, w)), this.canvas.removeEventListener("pointerdown", a(this, C)), this.isEditing = !0, a(this, F) || (oe(this, F, !0), K(this, Fe, pt).call(this), this.thickness || (this.thickness = Se._defaultThickness), this.color || (this.color = Se._defaultColor || s.AnnotationEditor._defaultLineColor), this.opacity ?? (this.opacity = Se._defaultOpacity)), this.currentPath.push([te, Te]), oe(this, k, !1), K(this, B, dn).call(this), oe(this, E, () => {
- K(this, g, Xn).call(this), a(this, E) && window.requestAnimationFrame(a(this, E));
- }), window.requestAnimationFrame(a(this, E));
- }, le = new WeakSet(), pn = function(te, Te) {
- const [Ne, ke] = this.currentPath.at(-1);
- if (this.currentPath.length > 1 && te === Ne && Te === ke)
- return;
- const $e = this.currentPath;
- let Be = a(this, P);
- if ($e.push([te, Te]), oe(this, k, !0), $e.length <= 2) {
- Be.moveTo(...$e[0]), Be.lineTo(te, Te);
- return;
- }
- $e.length === 3 && (oe(this, P, Be = new Path2D()), Be.moveTo(...$e[0])), K(this, v, Vn).call(this, Be, ...$e.at(-3), ...$e.at(-2), te, Te);
- }, we = new WeakSet(), qn = function() {
- if (this.currentPath.length === 0)
- return;
- const te = this.currentPath.at(-1);
- a(this, P).lineTo(...te);
- }, R = new WeakSet(), zn = function(te, Te) {
- oe(this, E, null), te = Math.min(Math.max(te, 0), this.canvas.width), Te = Math.min(Math.max(Te, 0), this.canvas.height), K(this, le, pn).call(this, te, Te), K(this, we, qn).call(this);
- let Ne;
- if (this.currentPath.length !== 1)
- Ne = K(this, O, Yn).call(this);
- else {
- const Ae = [te, Te];
- Ne = [[Ae, Ae.slice(), Ae.slice(), Ae]];
- }
- const ke = a(this, P), $e = this.currentPath;
- this.currentPath = [], oe(this, P, new Path2D());
- const Be = () => {
- this.allRawPaths.push($e), this.paths.push(Ne), this.bezierPath2D.push(ke), this.rebuild();
- }, Qe = () => {
- this.allRawPaths.pop(), this.paths.pop(), this.bezierPath2D.pop(), this.paths.length === 0 ? this.remove() : (this.canvas || (K(this, ue, Ft).call(this), K(this, fe, It).call(this)), K(this, J, gt).call(this));
- };
- this.addCommands({
- cmd: Be,
- undo: Qe,
- mustExec: !0
- });
- }, g = new WeakSet(), Xn = function() {
- if (!a(this, k))
- return;
- oe(this, k, !1);
- const te = Math.ceil(this.thickness * this.parentScale), Te = this.currentPath.slice(-3), Ne = Te.map((Be) => Be[0]), ke = Te.map((Be) => Be[1]);
- Math.min(...Ne) - te, Math.max(...Ne) + te, Math.min(...ke) - te, Math.max(...ke) + te;
- const {
- ctx: $e
- } = this;
- $e.save(), $e.clearRect(0, 0, this.canvas.width, this.canvas.height);
- for (const Be of this.bezierPath2D)
- $e.stroke(Be);
- $e.stroke(a(this, P)), $e.restore();
- }, v = new WeakSet(), Vn = function(te, Te, Ne, ke, $e, Be, Qe) {
- const Ae = (Te + ke) / 2, Ke = (Ne + $e) / 2, Oe = (ke + Be) / 2, U = ($e + Qe) / 2;
- te.bezierCurveTo(Ae + 2 * (ke - Ae) / 3, Ke + 2 * ($e - Ke) / 3, Oe + 2 * (ke - Oe) / 3, U + 2 * ($e - U) / 3, Oe, U);
- }, O = new WeakSet(), Yn = function() {
- const te = this.currentPath;
- if (te.length <= 2)
- return [[te[0], te[0], te.at(-1), te.at(-1)]];
- const Te = [];
- let Ne, [ke, $e] = te[0];
- for (Ne = 1; Ne < te.length - 2; Ne++) {
- const [u, L] = te[Ne], [j, V] = te[Ne + 1], Z = (u + j) / 2, he = (L + V) / 2, ye = [ke + 2 * (u - ke) / 3, $e + 2 * (L - $e) / 3], Me = [Z + 2 * (u - Z) / 3, he + 2 * (L - he) / 3];
- Te.push([[ke, $e], ye, Me, [Z, he]]), [ke, $e] = [Z, he];
- }
- const [Be, Qe] = te[Ne], [Ae, Ke] = te[Ne + 1], Oe = [ke + 2 * (Be - ke) / 3, $e + 2 * (Qe - $e) / 3], U = [Ae + 2 * (Be - Ae) / 3, Ke + 2 * (Qe - Ke) / 3];
- return Te.push([[ke, $e], Oe, U, [Ae, Ke]]), Te;
- }, z = new WeakSet(), ot = function() {
- if (this.isEmpty()) {
- K(this, ie, Lt).call(this);
- return;
- }
- K(this, B, dn).call(this);
- const {
- canvas: te,
- ctx: Te
- } = this;
- Te.setTransform(1, 0, 0, 1, 0, 0), Te.clearRect(0, 0, te.width, te.height), K(this, ie, Lt).call(this);
- for (const Ne of this.bezierPath2D)
- Te.stroke(Ne);
- }, Q = new WeakSet(), gn = function(te) {
- this.canvas.removeEventListener("pointerleave", a(this, S)), this.canvas.removeEventListener("pointermove", a(this, T)), this.canvas.removeEventListener("pointerup", a(this, w)), this.canvas.addEventListener("pointerdown", a(this, C)), setTimeout(() => {
- this.canvas.removeEventListener("contextmenu", h.noContextMenu);
- }, 10), K(this, R, zn).call(this, te.offsetX, te.offsetY), this.addToAnnotationStorage(), this.setInBackground();
- }, ue = new WeakSet(), Ft = function() {
- this.canvas = document.createElement("canvas"), this.canvas.width = this.canvas.height = 0, this.canvas.className = "inkEditorCanvas", s.AnnotationEditor._l10nPromise.get("editor_ink_canvas_aria_label").then((te) => {
- var Te;
- return (Te = this.canvas) == null ? void 0 : Te.setAttribute("aria-label", te);
- }), this.div.append(this.canvas), this.ctx = this.canvas.getContext("2d");
- }, fe = new WeakSet(), It = function() {
- oe(this, x, new ResizeObserver((te) => {
- const Te = te[0].contentRect;
- Te.width && Te.height && this.setDimensions(Te.width, Te.height);
- })), a(this, x).observe(this.div);
- }, Fe = new WeakSet(), pt = function() {
- if (!a(this, F))
- return;
- const [te, Te] = this.parentDimensions;
- this.canvas.width = Math.ceil(this.width * te), this.canvas.height = Math.ceil(this.height * Te), K(this, ie, Lt).call(this);
- }, De = new WeakSet(), _n = function(te, Te) {
- const Ne = K(this, de, Dt).call(this), ke = (te - Ne) / a(this, r), $e = (Te - Ne) / a(this, o);
- this.scaleFactor = Math.min(ke, $e);
- }, ie = new WeakSet(), Lt = function() {
- const te = K(this, de, Dt).call(this) / 2;
- this.ctx.setTransform(this.scaleFactor, 0, 0, this.scaleFactor, this.translationX * this.scaleFactor + te, this.translationY * this.scaleFactor + te);
- }, ge = new WeakSet(), Kn = function(te) {
- const Te = new Path2D();
- for (let Ne = 0, ke = te.length; Ne < ke; Ne++) {
- const [$e, Be, Qe, Ae] = te[Ne];
- Ne === 0 && Te.moveTo(...$e), Te.bezierCurveTo(Be[0], Be[1], Qe[0], Qe[1], Ae[0], Ae[1]);
- }
- return Te;
- }, xe = new WeakSet(), mn = function(te, Te, Ne) {
- const [ke, $e, Be, Qe] = Te;
- switch (Ne) {
- case 0:
- for (let Ae = 0, Ke = te.length; Ae < Ke; Ae += 2)
- te[Ae] += ke, te[Ae + 1] = Qe - te[Ae + 1];
- break;
- case 90:
- for (let Ae = 0, Ke = te.length; Ae < Ke; Ae += 2) {
- const Oe = te[Ae];
- te[Ae] = te[Ae + 1] + ke, te[Ae + 1] = Oe + $e;
- }
- break;
- case 180:
- for (let Ae = 0, Ke = te.length; Ae < Ke; Ae += 2)
- te[Ae] = Be - te[Ae], te[Ae + 1] += $e;
- break;
- case 270:
- for (let Ae = 0, Ke = te.length; Ae < Ke; Ae += 2) {
- const Oe = te[Ae];
- te[Ae] = Be - te[Ae + 1], te[Ae + 1] = Qe - Oe;
- }
- break;
- default:
- throw new Error("Invalid rotation");
- }
- return te;
- }, We = new WeakSet(), Jn = function(te, Te, Ne) {
- const [ke, $e, Be, Qe] = Te;
- switch (Ne) {
- case 0:
- for (let Ae = 0, Ke = te.length; Ae < Ke; Ae += 2)
- te[Ae] -= ke, te[Ae + 1] = Qe - te[Ae + 1];
- break;
- case 90:
- for (let Ae = 0, Ke = te.length; Ae < Ke; Ae += 2) {
- const Oe = te[Ae];
- te[Ae] = te[Ae + 1] - $e, te[Ae + 1] = Oe - ke;
- }
- break;
- case 180:
- for (let Ae = 0, Ke = te.length; Ae < Ke; Ae += 2)
- te[Ae] = Be - te[Ae], te[Ae + 1] -= $e;
- break;
- case 270:
- for (let Ae = 0, Ke = te.length; Ae < Ke; Ae += 2) {
- const Oe = te[Ae];
- te[Ae] = Qe - te[Ae + 1], te[Ae + 1] = Be - Oe;
- }
- break;
- default:
- throw new Error("Invalid rotation");
- }
- return te;
- }, ze = new WeakSet(), Qn = function(te, Te, Ne, ke) {
- var Ke, Oe;
- const $e = [], Be = this.thickness / 2, Qe = te * Te + Be, Ae = te * Ne + Be;
- for (const U of this.paths) {
- const u = [], L = [];
- for (let j = 0, V = U.length; j < V; j++) {
- const [Z, he, ye, Me] = U[j], Re = te * Z[0] + Qe, qe = te * Z[1] + Ae, Ie = te * he[0] + Qe, Le = te * he[1] + Ae, He = te * ye[0] + Qe, Ve = te * ye[1] + Ae, Je = te * Me[0] + Qe, Ze = te * Me[1] + Ae;
- j === 0 && (u.push(Re, qe), L.push(Re, qe)), u.push(Ie, Le, He, Ve, Je, Ze), L.push(Ie, Le), j === V - 1 && L.push(Je, Ze);
- }
- $e.push({
- bezier: K(Ke = Se, xe, mn).call(Ke, u, ke, this.rotation),
- points: K(Oe = Se, xe, mn).call(Oe, L, ke, this.rotation)
- });
- }
- return $e;
- }, Ge = new WeakSet(), bn = function() {
- let te = 1 / 0, Te = -1 / 0, Ne = 1 / 0, ke = -1 / 0;
- for (const $e of this.paths)
- for (const [Be, Qe, Ae, Ke] of $e) {
- const Oe = n.Util.bezierBoundingBox(...Be, ...Qe, ...Ae, ...Ke);
- te = Math.min(te, Oe[0]), Ne = Math.min(Ne, Oe[1]), Te = Math.max(Te, Oe[2]), ke = Math.max(ke, Oe[3]);
- }
- return [te, Ne, Te, ke];
- }, de = new WeakSet(), Dt = function() {
- return a(this, b) ? Math.ceil(this.thickness * this.parentScale) : 0;
- }, J = new WeakSet(), gt = function(te = !1) {
- if (this.isEmpty())
- return;
- if (!a(this, b)) {
- K(this, z, ot).call(this);
- return;
- }
- const Te = K(this, Ge, bn).call(this), Ne = K(this, de, Dt).call(this);
- oe(this, r, Math.max(s.AnnotationEditor.MIN_SIZE, Te[2] - Te[0])), oe(this, o, Math.max(s.AnnotationEditor.MIN_SIZE, Te[3] - Te[1]));
- const ke = Math.ceil(Ne + a(this, r) * this.scaleFactor), $e = Math.ceil(Ne + a(this, o) * this.scaleFactor), [Be, Qe] = this.parentDimensions;
- this.width = ke / Be, this.height = $e / Qe, this.setAspectRatio(ke, $e);
- const Ae = this.translationX, Ke = this.translationY;
- this.translationX = -Te[0], this.translationY = -Te[1], K(this, Fe, pt).call(this), K(this, z, ot).call(this), oe(this, y, ke), oe(this, p, $e), this.setDims(ke, $e);
- const Oe = te ? Ne / this.scaleFactor / 2 : 0;
- this.translate(Ae - this.translationX - Oe, Ke - this.translationY - Oe);
- }, W(Se, ge), W(Se, xe), W(Se, We), nt(Se, "_defaultColor", null), nt(Se, "_defaultOpacity", 1), nt(Se, "_defaultThickness", 1), nt(Se, "_type", "ink");
- let c = Se;
- e.InkEditor = c;
- },
- /* 34 */
- /***/
- (t, e, i) => {
- var c, o, r, T, S, w, C, P, b, k, F, Et, y, St, E, Ot, M, yn, N, Zn, X, ei, I, vn, ee, Nt, q, ti;
- Object.defineProperty(e, "__esModule", {
- value: !0
- }), e.StampEditor = void 0;
- var n = i(1), s = i(4), l = i(6), h = i(29);
- const pe = class pe extends s.AnnotationEditor {
- constructor(R) {
- super({
- ...R,
- name: "stampEditor"
- });
- W(this, F);
- W(this, y);
- W(this, E);
- W(this, M);
- W(this, N);
- W(this, X);
- W(this, I);
- W(this, ee);
- W(this, q);
- W(this, c, null);
- W(this, o, null);
- W(this, r, null);
- W(this, T, null);
- W(this, S, null);
- W(this, w, null);
- W(this, C, null);
- W(this, P, null);
- W(this, b, !1);
- W(this, k, !1);
- oe(this, T, R.bitmapUrl), oe(this, S, R.bitmapFile);
- }
- static initialize(R) {
- s.AnnotationEditor.initialize(R);
- }
- static get supportedTypes() {
- const R = ["apng", "avif", "bmp", "gif", "jpeg", "png", "svg+xml", "webp", "x-icon"];
- return (0, n.shadow)(this, "supportedTypes", R.map((d) => `image/${d}`));
- }
- static get supportedTypesStr() {
- return (0, n.shadow)(this, "supportedTypesStr", this.supportedTypes.join(","));
- }
- static isHandlingMimeForPasting(R) {
- return this.supportedTypes.includes(R);
- }
- static paste(R, d) {
- d.pasteEditor(n.AnnotationEditorType.STAMP, {
- bitmapFile: R.getAsFile()
- });
- }
- remove() {
- var R, d;
- a(this, o) && (oe(this, c, null), this._uiManager.imageManager.deleteId(a(this, o)), (R = a(this, w)) == null || R.remove(), oe(this, w, null), (d = a(this, C)) == null || d.disconnect(), oe(this, C, null)), super.remove();
- }
- rebuild() {
- if (!this.parent) {
- a(this, o) && K(this, E, Ot).call(this);
- return;
- }
- super.rebuild(), this.div !== null && (a(this, o) && K(this, E, Ot).call(this), this.isAttachedToDOM || this.parent.add(this));
- }
- onceAdded() {
- this._isDraggable = !0, this.div.focus();
- }
- isEmpty() {
- return !(a(this, r) || a(this, c) || a(this, T) || a(this, S));
- }
- get isResizable() {
- return !0;
- }
- render() {
- if (this.div)
- return this.div;
- let R, d;
- if (this.width && (R = this.x, d = this.y), super.render(), this.div.hidden = !0, a(this, c) ? K(this, M, yn).call(this) : K(this, E, Ot).call(this), this.width) {
- const [g, f] = this.parentDimensions;
- this.setAt(R * g, d * f, this.width * g, this.height * f);
- }
- return this.div;
- }
- static deserialize(R, d, g) {
- if (R instanceof h.StampAnnotationElement)
- return null;
- const f = super.deserialize(R, d, g), {
- rect: v,
- bitmapUrl: A,
- bitmapId: O,
- isSvg: H,
- accessibilityData: z
- } = R;
- O && g.imageManager.isValidId(O) ? oe(f, o, O) : oe(f, T, A), oe(f, b, H);
- const [ae, Q] = f.pageDimensions;
- return f.width = (v[2] - v[0]) / ae, f.height = (v[3] - v[1]) / Q, z && (f.altTextData = z), f;
- }
- serialize(R = !1, d = null) {
- if (this.isEmpty())
- return null;
- const g = {
- annotationType: n.AnnotationEditorType.STAMP,
- bitmapId: a(this, o),
- pageIndex: this.pageIndex,
- rect: this.getRect(0, 0),
- rotation: this.rotation,
- isSvg: a(this, b),
- structTreeParentId: this._structTreeParentId
- };
- if (R)
- return g.bitmapUrl = K(this, ee, Nt).call(this, !0), g.accessibilityData = this.altTextData, g;
- const {
- decorative: f,
- altText: v
- } = this.altTextData;
- if (!f && v && (g.accessibilityData = {
- type: "Figure",
- alt: v
- }), d === null)
- return g;
- d.stamps || (d.stamps = /* @__PURE__ */ new Map());
- const A = a(this, b) ? (g.rect[2] - g.rect[0]) * (g.rect[3] - g.rect[1]) : null;
- if (!d.stamps.has(a(this, o)))
- d.stamps.set(a(this, o), {
- area: A,
- serialized: g
- }), g.bitmap = K(this, ee, Nt).call(this, !1);
- else if (a(this, b)) {
- const O = d.stamps.get(a(this, o));
- A > O.area && (O.area = A, O.serialized.bitmap.close(), O.serialized.bitmap = K(this, ee, Nt).call(this, !1));
- }
- return g;
- }
- };
- c = new WeakMap(), o = new WeakMap(), r = new WeakMap(), T = new WeakMap(), S = new WeakMap(), w = new WeakMap(), C = new WeakMap(), P = new WeakMap(), b = new WeakMap(), k = new WeakMap(), F = new WeakSet(), Et = function(R, d = !1) {
- if (!R) {
- this.remove();
- return;
- }
- oe(this, c, R.bitmap), d || (oe(this, o, R.id), oe(this, b, R.isSvg)), K(this, M, yn).call(this);
- }, y = new WeakSet(), St = function() {
- oe(this, r, null), this._uiManager.enableWaiting(!1), a(this, w) && this.div.focus();
- }, E = new WeakSet(), Ot = function() {
- if (a(this, o)) {
- this._uiManager.enableWaiting(!0), this._uiManager.imageManager.getFromId(a(this, o)).then((d) => K(this, F, Et).call(this, d, !0)).finally(() => K(this, y, St).call(this));
- return;
- }
- if (a(this, T)) {
- const d = a(this, T);
- oe(this, T, null), this._uiManager.enableWaiting(!0), oe(this, r, this._uiManager.imageManager.getFromUrl(d).then((g) => K(this, F, Et).call(this, g)).finally(() => K(this, y, St).call(this)));
- return;
- }
- if (a(this, S)) {
- const d = a(this, S);
- oe(this, S, null), this._uiManager.enableWaiting(!0), oe(this, r, this._uiManager.imageManager.getFromFile(d).then((g) => K(this, F, Et).call(this, g)).finally(() => K(this, y, St).call(this)));
- return;
- }
- const R = document.createElement("input");
- R.type = "file", R.accept = pe.supportedTypesStr, oe(this, r, new Promise((d) => {
- R.addEventListener("change", async () => {
- if (!R.files || R.files.length === 0)
- this.remove();
- else {
- this._uiManager.enableWaiting(!0);
- const g = await this._uiManager.imageManager.getFromFile(R.files[0]);
- K(this, F, Et).call(this, g);
- }
- d();
- }), R.addEventListener("cancel", () => {
- this.remove(), d();
- });
- }).finally(() => K(this, y, St).call(this))), R.click();
- }, M = new WeakSet(), yn = function() {
- const {
- div: R
- } = this;
- let {
- width: d,
- height: g
- } = a(this, c);
- const [f, v] = this.pageDimensions, A = 0.75;
- if (this.width)
- d = this.width * f, g = this.height * v;
- else if (d > A * f || g > A * v) {
- const ae = Math.min(A * f / d, A * v / g);
- d *= ae, g *= ae;
- }
- const [O, H] = this.parentDimensions;
- this.setDims(d * O / f, g * H / v), this._uiManager.enableWaiting(!1);
- const z = oe(this, w, document.createElement("canvas"));
- R.append(z), R.hidden = !1, K(this, I, vn).call(this, d, g), K(this, q, ti).call(this), a(this, k) || (this.parent.addUndoableEditor(this), oe(this, k, !0)), this._uiManager._eventBus.dispatch("reporttelemetry", {
- source: this,
- details: {
- type: "editing",
- subtype: this.editorType,
- data: {
- action: "inserted_image"
- }
- }
- }), this.addAltTextButton();
- }, N = new WeakSet(), Zn = function(R, d) {
- var A;
- const [g, f] = this.parentDimensions;
- this.width = R / g, this.height = d / f, this.setDims(R, d), (A = this._initialOptions) != null && A.isCentered ? this.center() : this.fixAndSetPosition(), this._initialOptions = null, a(this, P) !== null && clearTimeout(a(this, P)), oe(this, P, setTimeout(() => {
- oe(this, P, null), K(this, I, vn).call(this, R, d);
- }, 200));
- }, X = new WeakSet(), ei = function(R, d) {
- const {
- width: g,
- height: f
- } = a(this, c);
- let v = g, A = f, O = a(this, c);
- for (; v > 2 * R || A > 2 * d; ) {
- const H = v, z = A;
- v > 2 * R && (v = v >= 16384 ? Math.floor(v / 2) - 1 : Math.ceil(v / 2)), A > 2 * d && (A = A >= 16384 ? Math.floor(A / 2) - 1 : Math.ceil(A / 2));
- const ae = new OffscreenCanvas(v, A);
- ae.getContext("2d").drawImage(O, 0, 0, H, z, 0, 0, v, A), O = ae.transferToImageBitmap();
- }
- return O;
- }, I = new WeakSet(), vn = function(R, d) {
- R = Math.ceil(R), d = Math.ceil(d);
- const g = a(this, w);
- if (!g || g.width === R && g.height === d)
- return;
- g.width = R, g.height = d;
- const f = a(this, b) ? a(this, c) : K(this, X, ei).call(this, R, d), v = g.getContext("2d");
- v.filter = this._uiManager.hcmFilter, v.drawImage(f, 0, 0, f.width, f.height, 0, 0, R, d);
- }, ee = new WeakSet(), Nt = function(R) {
- if (R) {
- if (a(this, b)) {
- const f = this._uiManager.imageManager.getSvgUrl(a(this, o));
- if (f)
- return f;
- }
- const d = document.createElement("canvas");
- return {
- width: d.width,
- height: d.height
- } = a(this, c), d.getContext("2d").drawImage(a(this, c), 0, 0), d.toDataURL();
- }
- if (a(this, b)) {
- const [d, g] = this.pageDimensions, f = Math.round(this.width * d * l.PixelsPerInch.PDF_TO_CSS_UNITS), v = Math.round(this.height * g * l.PixelsPerInch.PDF_TO_CSS_UNITS), A = new OffscreenCanvas(f, v);
- return A.getContext("2d").drawImage(a(this, c), 0, 0, a(this, c).width, a(this, c).height, 0, 0, f, v), A.transferToImageBitmap();
- }
- return structuredClone(a(this, c));
- }, q = new WeakSet(), ti = function() {
- oe(this, C, new ResizeObserver((R) => {
- const d = R[0].contentRect;
- d.width && d.height && K(this, N, Zn).call(this, d.width, d.height);
- })), a(this, C).observe(this.div);
- }, nt(pe, "_type", "stamp");
- let _ = pe;
- e.StampEditor = _;
- }
- /******/
- ], __webpack_module_cache__ = {};
- function __w_pdfjs_require__(t) {
- var e = __webpack_module_cache__[t];
- if (e !== void 0)
- return e.exports;
- var i = __webpack_module_cache__[t] = {
- /******/
- // no module.id needed
- /******/
- // no module.loaded needed
- /******/
- exports: {}
- /******/
- };
- return __webpack_modules__[t](i, i.exports, __w_pdfjs_require__), i.exports;
- }
- var __webpack_exports__ = {};
- return (() => {
- var t = __webpack_exports__;
- Object.defineProperty(t, "__esModule", {
- value: !0
- }), Object.defineProperty(t, "AbortException", {
- enumerable: !0,
- get: function() {
- return e.AbortException;
- }
- }), Object.defineProperty(t, "AnnotationEditorLayer", {
- enumerable: !0,
- get: function() {
- return l.AnnotationEditorLayer;
- }
- }), Object.defineProperty(t, "AnnotationEditorParamsType", {
- enumerable: !0,
- get: function() {
- return e.AnnotationEditorParamsType;
- }
- }), Object.defineProperty(t, "AnnotationEditorType", {
- enumerable: !0,
- get: function() {
- return e.AnnotationEditorType;
- }
- }), Object.defineProperty(t, "AnnotationEditorUIManager", {
- enumerable: !0,
- get: function() {
- return h.AnnotationEditorUIManager;
- }
- }), Object.defineProperty(t, "AnnotationLayer", {
- enumerable: !0,
- get: function() {
- return _.AnnotationLayer;
- }
- }), Object.defineProperty(t, "AnnotationMode", {
- enumerable: !0,
- get: function() {
- return e.AnnotationMode;
- }
- }), Object.defineProperty(t, "CMapCompressionType", {
- enumerable: !0,
- get: function() {
- return e.CMapCompressionType;
- }
- }), Object.defineProperty(t, "DOMSVGFactory", {
- enumerable: !0,
- get: function() {
- return n.DOMSVGFactory;
- }
- }), Object.defineProperty(t, "FeatureTest", {
- enumerable: !0,
- get: function() {
- return e.FeatureTest;
- }
- }), Object.defineProperty(t, "GlobalWorkerOptions", {
- enumerable: !0,
- get: function() {
- return c.GlobalWorkerOptions;
- }
- }), Object.defineProperty(t, "ImageKind", {
- enumerable: !0,
- get: function() {
- return e.ImageKind;
- }
- }), Object.defineProperty(t, "InvalidPDFException", {
- enumerable: !0,
- get: function() {
- return e.InvalidPDFException;
- }
- }), Object.defineProperty(t, "MissingPDFException", {
- enumerable: !0,
- get: function() {
- return e.MissingPDFException;
- }
- }), Object.defineProperty(t, "OPS", {
- enumerable: !0,
- get: function() {
- return e.OPS;
- }
- }), Object.defineProperty(t, "PDFDataRangeTransport", {
- enumerable: !0,
- get: function() {
- return i.PDFDataRangeTransport;
- }
- }), Object.defineProperty(t, "PDFDateString", {
- enumerable: !0,
- get: function() {
- return n.PDFDateString;
- }
- }), Object.defineProperty(t, "PDFWorker", {
- enumerable: !0,
- get: function() {
- return i.PDFWorker;
- }
- }), Object.defineProperty(t, "PasswordResponses", {
- enumerable: !0,
- get: function() {
- return e.PasswordResponses;
- }
- }), Object.defineProperty(t, "PermissionFlag", {
- enumerable: !0,
- get: function() {
- return e.PermissionFlag;
- }
- }), Object.defineProperty(t, "PixelsPerInch", {
- enumerable: !0,
- get: function() {
- return n.PixelsPerInch;
- }
- }), Object.defineProperty(t, "PromiseCapability", {
- enumerable: !0,
- get: function() {
- return e.PromiseCapability;
- }
- }), Object.defineProperty(t, "RenderingCancelledException", {
- enumerable: !0,
- get: function() {
- return n.RenderingCancelledException;
- }
- }), Object.defineProperty(t, "SVGGraphics", {
- enumerable: !0,
- get: function() {
- return i.SVGGraphics;
- }
- }), Object.defineProperty(t, "UnexpectedResponseException", {
- enumerable: !0,
- get: function() {
- return e.UnexpectedResponseException;
- }
- }), Object.defineProperty(t, "Util", {
- enumerable: !0,
- get: function() {
- return e.Util;
- }
- }), Object.defineProperty(t, "VerbosityLevel", {
- enumerable: !0,
- get: function() {
- return e.VerbosityLevel;
- }
- }), Object.defineProperty(t, "XfaLayer", {
- enumerable: !0,
- get: function() {
- return o.XfaLayer;
- }
- }), Object.defineProperty(t, "build", {
- enumerable: !0,
- get: function() {
- return i.build;
- }
- }), Object.defineProperty(t, "createValidAbsoluteUrl", {
- enumerable: !0,
- get: function() {
- return e.createValidAbsoluteUrl;
- }
- }), Object.defineProperty(t, "getDocument", {
- enumerable: !0,
- get: function() {
- return i.getDocument;
- }
- }), Object.defineProperty(t, "getFilenameFromUrl", {
- enumerable: !0,
- get: function() {
- return n.getFilenameFromUrl;
- }
- }), Object.defineProperty(t, "getPdfFilenameFromUrl", {
- enumerable: !0,
- get: function() {
- return n.getPdfFilenameFromUrl;
- }
- }), Object.defineProperty(t, "getXfaPageViewport", {
- enumerable: !0,
- get: function() {
- return n.getXfaPageViewport;
- }
- }), Object.defineProperty(t, "isDataScheme", {
- enumerable: !0,
- get: function() {
- return n.isDataScheme;
- }
- }), Object.defineProperty(t, "isPdfFile", {
- enumerable: !0,
- get: function() {
- return n.isPdfFile;
- }
- }), Object.defineProperty(t, "loadScript", {
- enumerable: !0,
- get: function() {
- return n.loadScript;
- }
- }), Object.defineProperty(t, "noContextMenu", {
- enumerable: !0,
- get: function() {
- return n.noContextMenu;
- }
- }), Object.defineProperty(t, "normalizeUnicode", {
- enumerable: !0,
- get: function() {
- return e.normalizeUnicode;
- }
- }), Object.defineProperty(t, "renderTextLayer", {
- enumerable: !0,
- get: function() {
- return s.renderTextLayer;
- }
- }), Object.defineProperty(t, "setLayerDimensions", {
- enumerable: !0,
- get: function() {
- return n.setLayerDimensions;
- }
- }), Object.defineProperty(t, "shadow", {
- enumerable: !0,
- get: function() {
- return e.shadow;
- }
- }), Object.defineProperty(t, "updateTextLayer", {
- enumerable: !0,
- get: function() {
- return s.updateTextLayer;
- }
- }), Object.defineProperty(t, "version", {
- enumerable: !0,
- get: function() {
- return i.version;
- }
- });
- var e = __w_pdfjs_require__(1), i = __w_pdfjs_require__(2), n = __w_pdfjs_require__(6), s = __w_pdfjs_require__(26), l = __w_pdfjs_require__(27), h = __w_pdfjs_require__(5), _ = __w_pdfjs_require__(29), c = __w_pdfjs_require__(14), o = __w_pdfjs_require__(32);
- })(), __webpack_exports__;
- })()
- ));
-})(pdf);
-var pdfExports = pdf.exports;
-const pdfjsLib = /* @__PURE__ */ getDefaultExportFromCjs(pdfExports), Index_svelte_svelte_type_style_lang = "", {
- SvelteComponent,
- append,
- assign,
- attr,
- binding_callbacks,
- check_outros,
- create_component,
- destroy_component,
- detach,
- element,
- empty,
- get_spread_object,
- get_spread_update,
- group_outros,
- init,
- insert,
- mount_component,
- safe_not_equal,
- set_data,
- set_style,
- space,
- text,
- transition_in,
- transition_out
-} = window.__gradio__svelte__internal, { tick } = window.__gradio__svelte__internal;
-function create_if_block_1(t) {
- let e, i;
- const n = [
- {
- autoscroll: (
- /*gradio*/
- t[11].autoscroll
- )
- },
- { i18n: (
- /*gradio*/
- t[11].i18n
- ) },
- /*loading_status*/
- t[10]
- ];
- let s = {};
- for (let l = 0; l < n.length; l += 1)
- s = assign(s, n[l]);
- return e = new Static({ props: s }), {
- c() {
- create_component(e.$$.fragment);
- },
- m(l, h) {
- mount_component(e, l, h), i = !0;
- },
- p(l, h) {
- const _ = h & /*gradio, loading_status*/
- 3072 ? get_spread_update(n, [
- h & /*gradio*/
- 2048 && {
- autoscroll: (
- /*gradio*/
- l[11].autoscroll
- )
- },
- h & /*gradio*/
- 2048 && { i18n: (
- /*gradio*/
- l[11].i18n
- ) },
- h & /*loading_status*/
- 1024 && get_spread_object(
- /*loading_status*/
- l[10]
- )
- ]) : {};
- e.$set(_);
- },
- i(l) {
- i || (transition_in(e.$$.fragment, l), i = !0);
- },
- o(l) {
- transition_out(e.$$.fragment, l), i = !1;
- },
- d(l) {
- destroy_component(e, l);
- }
- };
-}
-function create_else_block(t) {
- let e, i;
- return e = new Upload({
- props: {
- filetype: "application/pdf",
- file_count: "single",
- root: (
- /*root*/
- t[7]
- ),
- $$slots: { default: [create_default_slot_3] },
- $$scope: { ctx: t }
- }
- }), e.$on(
- "load",
- /*handle_upload*/
- t[17]
- ), {
- c() {
- create_component(e.$$.fragment);
- },
- m(n, s) {
- mount_component(e, n, s), i = !0;
- },
- p(n, s) {
- const l = {};
- s & /*root*/
- 128 && (l.root = /*root*/
- n[7]), s & /*$$scope*/
- 134217728 && (l.$$scope = { dirty: s, ctx: n }), e.$set(l);
- },
- i(n) {
- i || (transition_in(e.$$.fragment, n), i = !0);
- },
- o(n) {
- transition_out(e.$$.fragment, n), i = !1;
- },
- d(n) {
- destroy_component(e, n);
- }
- };
-}
-function create_if_block(t) {
- let e, i, n, s, l, h, _, c, o, r, T, S, w, C, P;
- return e = new ModifyUpload({
- props: {
- i18n: (
- /*gradio*/
- t[11].i18n
- ),
- absolute: !0
- }
- }), e.$on(
- "clear",
- /*handle_clear*/
- t[16]
- ), _ = new Button({
- props: {
- $$slots: { default: [create_default_slot_2] },
- $$scope: { ctx: t }
- }
- }), _.$on(
- "click",
- /*prev_page*/
- t[19]
- ), C = new Button({
- props: {
- $$slots: { default: [create_default_slot_1] },
- $$scope: { ctx: t }
- }
- }), C.$on(
- "click",
- /*next_page*/
- t[18]
- ), {
- c() {
- create_component(e.$$.fragment), i = space(), n = element("div"), s = element("canvas"), l = space(), h = element("div"), create_component(_.$$.fragment), c = space(), o = element("span"), r = text(
- /*currentPage*/
- t[14]
- ), T = text(" / "), S = text(
- /*numPages*/
- t[13]
- ), w = space(), create_component(C.$$.fragment), attr(n, "class", "pdf-canvas svelte-qxsbof"), set_style(
- n,
- "height",
- /*height*/
- t[1] + "px"
- ), attr(o, "class", "page-count svelte-qxsbof"), attr(h, "class", "button-row svelte-qxsbof");
- },
- m(b, k) {
- mount_component(e, b, k), insert(b, i, k), insert(b, n, k), append(n, s), t[22](s), insert(b, l, k), insert(b, h, k), mount_component(_, h, null), append(h, c), append(h, o), append(o, r), append(o, T), append(o, S), append(h, w), mount_component(C, h, null), P = !0;
- },
- p(b, k) {
- const F = {};
- k & /*gradio*/
- 2048 && (F.i18n = /*gradio*/
- b[11].i18n), e.$set(F), (!P || k & /*height*/
- 2) && set_style(
- n,
- "height",
- /*height*/
- b[1] + "px"
- );
- const x = {};
- k & /*$$scope*/
- 134217728 && (x.$$scope = { dirty: k, ctx: b }), _.$set(x), (!P || k & /*currentPage*/
- 16384) && set_data(
- r,
- /*currentPage*/
- b[14]
- ), (!P || k & /*numPages*/
- 8192) && set_data(
- S,
- /*numPages*/
- b[13]
- );
- const y = {};
- k & /*$$scope*/
- 134217728 && (y.$$scope = { dirty: k, ctx: b }), C.$set(y);
- },
- i(b) {
- P || (transition_in(e.$$.fragment, b), transition_in(_.$$.fragment, b), transition_in(C.$$.fragment, b), P = !0);
- },
- o(b) {
- transition_out(e.$$.fragment, b), transition_out(_.$$.fragment, b), transition_out(C.$$.fragment, b), P = !1;
- },
- d(b) {
- b && (detach(i), detach(n), detach(l), detach(h)), destroy_component(e, b), t[22](null), destroy_component(_), destroy_component(C);
- }
- };
-}
-function create_default_slot_3(t) {
- let e, i;
- return e = new PdfUploadText({}), {
- c() {
- create_component(e.$$.fragment);
- },
- m(n, s) {
- mount_component(e, n, s), i = !0;
- },
- i(n) {
- i || (transition_in(e.$$.fragment, n), i = !0);
- },
- o(n) {
- transition_out(e.$$.fragment, n), i = !1;
- },
- d(n) {
- destroy_component(e, n);
- }
- };
-}
-function create_default_slot_2(t) {
- let e;
- return {
- c() {
- e = text("⬅️");
- },
- m(i, n) {
- insert(i, e, n);
- },
- d(i) {
- i && detach(e);
- }
- };
-}
-function create_default_slot_1(t) {
- let e;
- return {
- c() {
- e = text("➡️");
- },
- m(i, n) {
- insert(i, e, n);
- },
- d(i) {
- i && detach(e);
- }
- };
-}
-function create_default_slot(t) {
- let e, i, n, s, l, h, _, c = (
- /*loading_status*/
- t[10] && create_if_block_1(t)
- );
- i = new BlockLabel({
- props: {
- show_label: (
- /*label*/
- t[8] !== null
- ),
- Icon: File$1,
- float: (
- /*value*/
- t[0] === null
- ),
- label: (
- /*label*/
- t[8] || "File"
- )
- }
- });
- const o = [create_if_block, create_else_block], r = [];
- function T(S, w) {
- return (
- /*_value*/
- S[12] ? 0 : 1
- );
- }
- return s = T(t), l = r[s] = o[s](t), {
- c() {
- c && c.c(), e = space(), create_component(i.$$.fragment), n = space(), l.c(), h = empty();
- },
- m(S, w) {
- c && c.m(S, w), insert(S, e, w), mount_component(i, S, w), insert(S, n, w), r[s].m(S, w), insert(S, h, w), _ = !0;
- },
- p(S, w) {
- /*loading_status*/
- S[10] ? c ? (c.p(S, w), w & /*loading_status*/
- 1024 && transition_in(c, 1)) : (c = create_if_block_1(S), c.c(), transition_in(c, 1), c.m(e.parentNode, e)) : c && (group_outros(), transition_out(c, 1, 1, () => {
- c = null;
- }), check_outros());
- const C = {};
- w & /*label*/
- 256 && (C.show_label = /*label*/
- S[8] !== null), w & /*value*/
- 1 && (C.float = /*value*/
- S[0] === null), w & /*label*/
- 256 && (C.label = /*label*/
- S[8] || "File"), i.$set(C);
- let P = s;
- s = T(S), s === P ? r[s].p(S, w) : (group_outros(), transition_out(r[P], 1, 1, () => {
- r[P] = null;
- }), check_outros(), l = r[s], l ? l.p(S, w) : (l = r[s] = o[s](S), l.c()), transition_in(l, 1), l.m(h.parentNode, h));
- },
- i(S) {
- _ || (transition_in(c), transition_in(i.$$.fragment, S), transition_in(l), _ = !0);
- },
- o(S) {
- transition_out(c), transition_out(i.$$.fragment, S), transition_out(l), _ = !1;
- },
- d(S) {
- S && (detach(e), detach(n), detach(h)), c && c.d(S), destroy_component(i, S), r[s].d(S);
- }
- };
-}
-function create_fragment(t) {
- let e, i;
- return e = new Block({
- props: {
- visible: (
- /*visible*/
- t[4]
- ),
- elem_id: (
- /*elem_id*/
- t[2]
- ),
- elem_classes: (
- /*elem_classes*/
- t[3]
- ),
- container: (
- /*container*/
- t[5]
- ),
- scale: (
- /*scale*/
- t[6]
- ),
- min_width: (
- /*min_width*/
- t[9]
- ),
- $$slots: { default: [create_default_slot] },
- $$scope: { ctx: t }
- }
- }), {
- c() {
- create_component(e.$$.fragment);
- },
- m(n, s) {
- mount_component(e, n, s), i = !0;
- },
- p(n, [s]) {
- const l = {};
- s & /*visible*/
- 16 && (l.visible = /*visible*/
- n[4]), s & /*elem_id*/
- 4 && (l.elem_id = /*elem_id*/
- n[2]), s & /*elem_classes*/
- 8 && (l.elem_classes = /*elem_classes*/
- n[3]), s & /*container*/
- 32 && (l.container = /*container*/
- n[5]), s & /*scale*/
- 64 && (l.scale = /*scale*/
- n[6]), s & /*min_width*/
- 512 && (l.min_width = /*min_width*/
- n[9]), s & /*$$scope, numPages, currentPage, height, canvasRef, gradio, _value, root, label, value, loading_status*/
- 134282627 && (l.$$scope = { dirty: s, ctx: n }), e.$set(l);
- },
- i(n) {
- i || (transition_in(e.$$.fragment, n), i = !0);
- },
- o(n) {
- transition_out(e.$$.fragment, n), i = !1;
- },
- d(n) {
- destroy_component(e, n);
- }
- };
-}
-function instance(t, e, i) {
- let { elem_id: n = "" } = e, { elem_classes: s = [] } = e, { visible: l = !0 } = e, { value: h = null } = e, { container: _ = !0 } = e, { scale: c = null } = e, { root: o } = e, { height: r = 500 } = e, { label: T } = e, { proxy_url: S } = e, { min_width: w = void 0 } = e, { loading_status: C } = e, { gradio: P } = e;
- pdfjsLib.GlobalWorkerOptions.workerSrc = "https://cdn.bootcss.com/pdf.js/3.11.174/pdf.worker.js";
- let b = h, k = b, F, x = 1, y = 1, p;
- async function E() {
- i(12, b = null), await tick(), P.dispatch("change");
- }
- async function $({ detail: G }) {
- i(0, h = G), await tick(), P.dispatch("change"), P.dispatch("upload");
- }
- async function M(G) {
- F = await pdfjsLib.getDocument(G.url).promise, i(13, x = F.numPages), m();
- }
- function m() {
- F.getPage(y).then((G) => {
- const I = p.getContext("2d");
- I.clearRect(0, 0, p.width, p.height);
- let B = G.getViewport({ scale: 1 }), ee = r / B.height;
- B = G.getViewport({ scale: ee });
- const Y = { canvasContext: I, viewport: B };
- i(15, p.width = B.width, p), i(15, p.height = B.height, p), G.render(Y);
- });
- }
- function N() {
- y >= x || (i(14, y++, y), m());
- }
- function D() {
- y != 1 && (i(14, y--, y), m());
- }
- function X(G) {
- binding_callbacks[G ? "unshift" : "push"](() => {
- p = G, i(15, p);
- });
- }
- return t.$$set = (G) => {
- "elem_id" in G && i(2, n = G.elem_id), "elem_classes" in G && i(3, s = G.elem_classes), "visible" in G && i(4, l = G.visible), "value" in G && i(0, h = G.value), "container" in G && i(5, _ = G.container), "scale" in G && i(6, c = G.scale), "root" in G && i(7, o = G.root), "height" in G && i(1, r = G.height), "label" in G && i(8, T = G.label), "proxy_url" in G && i(20, S = G.proxy_url), "min_width" in G && i(9, w = G.min_width), "loading_status" in G && i(10, C = G.loading_status), "gradio" in G && i(11, P = G.gradio);
- }, t.$$.update = () => {
- t.$$.dirty & /*height*/
- 2 && i(1, r = r || 500), t.$$.dirty & /*value, root, proxy_url*/
- 1048705 && i(12, b = normalise_file(h, o, S)), t.$$.dirty & /*old_value, _value, gradio*/
- 2103296 && JSON.stringify(k) != JSON.stringify(b) && (b && M(b), i(21, k = b), P.dispatch("change"));
- }, [
- h,
- r,
- n,
- s,
- l,
- _,
- c,
- o,
- T,
- w,
- C,
- P,
- b,
- x,
- y,
- p,
- E,
- $,
- N,
- D,
- S,
- k,
- X
- ];
-}
-class Index extends SvelteComponent {
- constructor(e) {
- super(), init(this, e, instance, create_fragment, safe_not_equal, {
- elem_id: 2,
- elem_classes: 3,
- visible: 4,
- value: 0,
- container: 5,
- scale: 6,
- root: 7,
- height: 1,
- label: 8,
- proxy_url: 20,
- min_width: 9,
- loading_status: 10,
- gradio: 11
- });
- }
-}
-export {
- Index as I,
- require$$5$1 as r
-};
diff --git a/spaces/frncscp/bullerengue/musika/22kHz/musika.py b/spaces/frncscp/bullerengue/musika/22kHz/musika.py
deleted file mode 100644
index d77c1791015ef544e7c1345220e53fcee51dff87..0000000000000000000000000000000000000000
--- a/spaces/frncscp/bullerengue/musika/22kHz/musika.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from parse_test import parse_args
-from models import Models_functions
-from utils import Utils_functions
-
-if __name__ == "__main__":
-
- # parse args
- args = parse_args()
-
- # initialize networks
- M = Models_functions(args)
- models_ls = M.get_networks()
-
- # test musika
- U = Utils_functions(args)
- U.render_gradio(models_ls, train=False)
diff --git a/spaces/ganning/asl-gloss/app.py b/spaces/ganning/asl-gloss/app.py
deleted file mode 100644
index a1b2018d53adff2c0ff09594b5e20268b6a8752d..0000000000000000000000000000000000000000
--- a/spaces/ganning/asl-gloss/app.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import pandas as pd
-import numpy as np
-import string
-import numpy as np
-import pandas as pd
-from keras.models import Sequential
-from keras.layers import Dense, LSTM, Embedding, RepeatVector
-from keras.preprocessing.text import Tokenizer
-from keras.callbacks import ModelCheckpoint
-from keras.preprocessing.sequence import pad_sequences
-from keras.models import load_model
-from tensorflow.keras import optimizers
-from sklearn.model_selection import train_test_split
-import gradio as gr
-import joblib
-
-# function to build a tokenizer
-def tokenization(lines):
- tokenizer = Tokenizer()
- tokenizer.fit_on_texts(lines)
- return tokenizer
-
-# prepare english tokenizer
-en_tokenizer = joblib.load('en_tokenizer.pkl')
-en_vocab_size = joblib.load('en_vocab_size.pkl')
-
-en_length = 8
-print('English Vocabulary Size: %d' % en_vocab_size)
-
-gloss_tokenizer = joblib.load('gloss_tokenizer.pkl')
-gloss_vocab_size = joblib.load('gloss_vocab_size.pkl')
-
-gloss_length = 8
-print('Gloss Vocabulary Size: %d' % gloss_vocab_size)
-
- # encode and pad sequences
-def encode_sequences(tokenizer, length, lines):
- # integer encode sequences
- seq = tokenizer.texts_to_sequences(lines)
- # pad sequences with 0 values
- seq = pad_sequences(seq, maxlen=length, padding='post')
- return seq
-
-
-model = load_model('en_to_gloss_model')
-
-
-def get_word(n, tokenizer):
- for word, index in tokenizer.word_index.items():
- if index == n:
- return word
- return None
-
-def main(text):
- testX = testX = encode_sequences(en_tokenizer, en_length, [text])
- preds = model.predict(testX.reshape(testX.shape[0], testX.shape[1], 1))
- y_pred = np.argmax(preds, axis=-1)
- preds_text = []
- for i in y_pred:
- temp = []
- for j in range(len(i)):
- t = get_word(i[j], gloss_tokenizer)
- if j > 0:
- if (t == get_word(i[j-1], gloss_tokenizer)) or (t == None):
- temp.append('')
- else:
- temp.append(t)
- else:
- if(t == None):
- temp.append('')
- else:
- temp.append(t)
-
- preds_text.append(' '.join(temp))
-
- return preds_text
-
-
-gr.Interface(
- fn=main,
- inputs=[gr.inputs.Textbox(lines=5, placeholder="English here...")],
- outputs=["text"],
- theme="huggingface").launch(debug=False, share=False)
\ No newline at end of file
diff --git a/spaces/ggwvits/vits-uma-genshin-honkai/text/cleaners.py b/spaces/ggwvits/vits-uma-genshin-honkai/text/cleaners.py
deleted file mode 100644
index d26581deb399609163518054718ad80ecca5d934..0000000000000000000000000000000000000000
--- a/spaces/ggwvits/vits-uma-genshin-honkai/text/cleaners.py
+++ /dev/null
@@ -1,475 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-
-'''
-Cleaners are transformations that run over the input text at both training and eval time.
-
-Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
-hyperparameter. Some cleaners are English-specific. You'll typically want to use:
- 1. "english_cleaners" for English text
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
- the symbols in symbols.py to match your data).
-'''
-
-import re
-from unidecode import unidecode
-import pyopenjtalk
-from jamo import h2j, j2hcj
-from pypinyin import lazy_pinyin, BOPOMOFO
-import jieba, cn2an
-
-
-# This is a list of Korean classifiers preceded by pure Korean numerals.
-_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
-
-# Regular expression matching whitespace:
-_whitespace_re = re.compile(r'\s+')
-
-# Regular expression matching Japanese without punctuation marks:
-_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# Regular expression matching non-Japanese characters or punctuation marks:
-_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# List of (regular expression, replacement) pairs for abbreviations:
-_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
- ('mrs', 'misess'),
- ('mr', 'mister'),
- ('dr', 'doctor'),
- ('st', 'saint'),
- ('co', 'company'),
- ('jr', 'junior'),
- ('maj', 'major'),
- ('gen', 'general'),
- ('drs', 'doctors'),
- ('rev', 'reverend'),
- ('lt', 'lieutenant'),
- ('hon', 'honorable'),
- ('sgt', 'sergeant'),
- ('capt', 'captain'),
- ('esq', 'esquire'),
- ('ltd', 'limited'),
- ('col', 'colonel'),
- ('ft', 'fort'),
-]]
-
-# List of (hangul, hangul divided) pairs:
-_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄳ', 'ㄱㅅ'),
- ('ㄵ', 'ㄴㅈ'),
- ('ㄶ', 'ㄴㅎ'),
- ('ㄺ', 'ㄹㄱ'),
- ('ㄻ', 'ㄹㅁ'),
- ('ㄼ', 'ㄹㅂ'),
- ('ㄽ', 'ㄹㅅ'),
- ('ㄾ', 'ㄹㅌ'),
- ('ㄿ', 'ㄹㅍ'),
- ('ㅀ', 'ㄹㅎ'),
- ('ㅄ', 'ㅂㅅ'),
- ('ㅘ', 'ㅗㅏ'),
- ('ㅙ', 'ㅗㅐ'),
- ('ㅚ', 'ㅗㅣ'),
- ('ㅝ', 'ㅜㅓ'),
- ('ㅞ', 'ㅜㅔ'),
- ('ㅟ', 'ㅜㅣ'),
- ('ㅢ', 'ㅡㅣ'),
- ('ㅑ', 'ㅣㅏ'),
- ('ㅒ', 'ㅣㅐ'),
- ('ㅕ', 'ㅣㅓ'),
- ('ㅖ', 'ㅣㅔ'),
- ('ㅛ', 'ㅣㅗ'),
- ('ㅠ', 'ㅣㅜ')
-]]
-
-# List of (Latin alphabet, hangul) pairs:
-_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', '에이'),
- ('b', '비'),
- ('c', '시'),
- ('d', '디'),
- ('e', '이'),
- ('f', '에프'),
- ('g', '지'),
- ('h', '에이치'),
- ('i', '아이'),
- ('j', '제이'),
- ('k', '케이'),
- ('l', '엘'),
- ('m', '엠'),
- ('n', '엔'),
- ('o', '오'),
- ('p', '피'),
- ('q', '큐'),
- ('r', '아르'),
- ('s', '에스'),
- ('t', '티'),
- ('u', '유'),
- ('v', '브이'),
- ('w', '더블유'),
- ('x', '엑스'),
- ('y', '와이'),
- ('z', '제트')
-]]
-
-# List of (Latin alphabet, bopomofo) pairs:
-_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', 'ㄟˉ'),
- ('b', 'ㄅㄧˋ'),
- ('c', 'ㄙㄧˉ'),
- ('d', 'ㄉㄧˋ'),
- ('e', 'ㄧˋ'),
- ('f', 'ㄝˊㄈㄨˋ'),
- ('g', 'ㄐㄧˋ'),
- ('h', 'ㄝˇㄑㄩˋ'),
- ('i', 'ㄞˋ'),
- ('j', 'ㄐㄟˋ'),
- ('k', 'ㄎㄟˋ'),
- ('l', 'ㄝˊㄛˋ'),
- ('m', 'ㄝˊㄇㄨˋ'),
- ('n', 'ㄣˉ'),
- ('o', 'ㄡˉ'),
- ('p', 'ㄆㄧˉ'),
- ('q', 'ㄎㄧㄡˉ'),
- ('r', 'ㄚˋ'),
- ('s', 'ㄝˊㄙˋ'),
- ('t', 'ㄊㄧˋ'),
- ('u', 'ㄧㄡˉ'),
- ('v', 'ㄨㄧˉ'),
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
- ('y', 'ㄨㄞˋ'),
- ('z', 'ㄗㄟˋ')
-]]
-
-
-# List of (bopomofo, romaji) pairs:
-_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('ㄅㄛ', 'p⁼wo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p⁼'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't⁼'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k⁼'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'h'),
- ('ㄐ', 'ʧ⁼'),
- ('ㄑ', 'ʧʰ'),
- ('ㄒ', 'ʃ'),
- ('ㄓ', 'ʦ`⁼'),
- ('ㄔ', 'ʦ`ʰ'),
- ('ㄕ', 's`'),
- ('ㄖ', 'ɹ`'),
- ('ㄗ', 'ʦ⁼'),
- ('ㄘ', 'ʦʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ə'),
- ('ㄝ', 'e'),
- ('ㄞ', 'ai'),
- ('ㄟ', 'ei'),
- ('ㄠ', 'au'),
- ('ㄡ', 'ou'),
- ('ㄧㄢ', 'yeNN'),
- ('ㄢ', 'aNN'),
- ('ㄧㄣ', 'iNN'),
- ('ㄣ', 'əNN'),
- ('ㄤ', 'aNg'),
- ('ㄧㄥ', 'iNg'),
- ('ㄨㄥ', 'uNg'),
- ('ㄩㄥ', 'yuNg'),
- ('ㄥ', 'əNg'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'ɥ'),
- ('ˉ', '→'),
- ('ˊ', '↑'),
- ('ˇ', '↓↑'),
- ('ˋ', '↓'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-
-def expand_abbreviations(text):
- for regex, replacement in _abbreviations:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def lowercase(text):
- return text.lower()
-
-
-def collapse_whitespace(text):
- return re.sub(_whitespace_re, ' ', text)
-
-
-def convert_to_ascii(text):
- return unidecode(text)
-
-
-def japanese_to_romaji_with_accent(text):
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
- sentences = re.split(_japanese_marks, text)
- marks = re.findall(_japanese_marks, text)
- text = ''
- for i, sentence in enumerate(sentences):
- if re.match(_japanese_characters, sentence):
- if text!='':
- text+=' '
- labels = pyopenjtalk.extract_fullcontext(sentence)
- for n, label in enumerate(labels):
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
- if phoneme not in ['sil','pau']:
- text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
- else:
- continue
- n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
- a2_next=-1
- else:
- a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
- # Accent phrase boundary
- if a3 == 1 and a2_next == 1:
- text += ' '
- # Falling
- elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
- text += '↓'
- # Rising
- elif a2 == 1 and a2_next == 2:
- text += '↑'
- if iGta San Andreas Copland Pc Game Torrent 41
-
-
-
-
- )
-}
diff --git a/spaces/hf-task-exploration/ExploreACMnaacl/posts/model_exploration.py b/spaces/hf-task-exploration/ExploreACMnaacl/posts/model_exploration.py
deleted file mode 100644
index 64e39a784c28a624e69b4d96c7ab33976bb0dd46..0000000000000000000000000000000000000000
--- a/spaces/hf-task-exploration/ExploreACMnaacl/posts/model_exploration.py
+++ /dev/null
@@ -1,319 +0,0 @@
-import json
-import random
-import sys
-
-import numpy as np
-import pandas as pd
-import streamlit as st
-# from transformers import AutoTokenizer, AutoModelForSequenceClassification
-from transformers import pipeline
-
-title = "Model Exploration"
-description = "Comparison of hate speech detection models"
-date = "2022-01-26"
-thumbnail = "images/robot.png"
-
-__HATE_DETECTION = """
-Once the data has been collected using the definitions identified for the
-task, you can start training your model. At training, the model takes in
-the data with labels and learns the associated context in the input data
-for each label. Depending on the task design, the labels may be binary like
-'hateful' and 'non-hateful' or multiclass like 'neutral', 'offensive', and
-'attack'.
-
-When presented with a new input string, the model then predicts the
-likelihood that the input is classified as each of the available labels and
-returns the label with the highest likelihood as well as how confident the
-model is in its selection using a score from 0 to 1.
-
-Neural models such as transformers are frequently trained as general
-language models and then fine-tuned on specific classification tasks.
-These models can vary in their architecture and the optimization
-algorithms, sometimes resulting in very different output for the same
-input text.
-
-The models used below include:
-- [RoBERTa trained on FRENK dataset](https://huggingface.co/classla/roberta-base-frenk-hate)
-- [RoBERTa trained on Twitter Hate Speech](https://huggingface.co/cardiffnlp/twitter-roberta-base-hate)
-- [DeHateBERT model (trained on Twitter and StormFront)](https://huggingface.co/Hate-speech-CNERG/dehatebert-mono-english)
-- [RoBERTa trained on 11 English hate speech datasets](https://huggingface.co/facebook/roberta-hate-speech-dynabench-r1-target)
-- [RoBERTa trained on 11 English hate speech datasets and Round 1 of the Dynamically Generated Hate Speech Dataset](https://huggingface.co/facebook/roberta-hate-speech-dynabench-r2-target)
-- [RoBERTa trained on 11 English hate speech datasets and Rounds 1 and 2 of the Dynamically Generated Hate Speech Dataset](https://huggingface.co/facebook/roberta-hate-speech-dynabench-r3-target)
-- [RoBERTa trained on 11 English hate speech datasets and Rounds 1, 2, and 3 of the Dynamically Generated Hate Speech Dataset](https://huggingface.co/facebook/roberta-hate-speech-dynabench-r4-target)
-"""
-
-__HATECHECK = """
-[Röttinger et al. (2021)](https://aclanthology.org/2021.acl-long.4.pdf)
-developed a list of 3,901 test cases for hate speech detection models called
-HateCheck. HateCheck provides a number of templates long with placeholders for
-identity categories and hateful terms along with labels indicating whether a
-model should or should not categorize the instance as hate speech. For each
-case, they created several examples with different
-identity attributes to test models' abilities to detect hate speech towards
-a range of groups of people. Additionally, they used more difficult
-linguistic contexts such as adding negation or more nuanced words to try to fool the
-model. See some of there examples using the button or try to make
-your own examples to test the models in the tools below.
-
-*** Warning: these examples may include hateful and violent content as
-well as slurs and other offensive languages ***
-"""
-
-__RANKING = """
-When models process a given input, they calculate the probability of
-that input being labeled with each of the possible labels (in binary
-cases for example, either 'hateful' or 'not hateful'). The label with
-the highest probably is returned. If we test multiple input sentences
-for a given model, we can see which input sentences have the
-highest probabilities, indicating which examples the model is most
-confident in classifying.
-
-Try comparing different input sentences for a given model
-using the tool below.
-"""
-
-__COMPARISON = """
-Depending on their training data and parameters, models can return very
-different outputs for the same input. Knowing how models differ in
-their behavior can help with choosing an appropriate model for your
-given use case.
-
-Additionally, models trained on one kind of data can perform very
-differently when tested on novel data. To show the models' performance
-in a variety of settings, we also show the results of each model on
-four datasets: the entirety of the [HateCheck Dataset](https://github.com/paul-rottger/hatecheck-data),
-the [Dynamically Generated Hate Speech Dataset](https://github.com/bvidgen/Dynamically-Generated-Hate-Speech-Dataset),
-the [Hateful Memes Dataset](https://huggingface.co/datasets/limjiayi/hateful_memes_expanded)
-(without the images), and a 2000-instance subsection of the
-[Open Subtitles](https://opus.nlpl.eu/OpenSubtitles-v2018.php)
-(English) Dataset. While the first three are hate speech datasets and have
-labels for whether or not each instance is hateful, the Open Subtitles dataset
-contains movie subtitles and is not labeled for instances of hate.
-
-Try comparing two different models' output for the same comparison
-sentence using the tool below.
-"""
-
-__MODELS = [
- "classla/roberta-base-frenk-hate",
- "cardiffnlp/twitter-roberta-base-hate",
- "Hate-speech-CNERG/dehatebert-mono-english",
- "facebook/roberta-hate-speech-dynabench-r1-target",
- "facebook/roberta-hate-speech-dynabench-r2-target",
- "facebook/roberta-hate-speech-dynabench-r3-target",
- "facebook/roberta-hate-speech-dynabench-r4-target",
-]
-
-autopop_ranking = [
- [
- "I don't hate black people.",
- "Black people should die.",
- "My friend likes pancakes.",
- ],
- [
- "Those dirty redskins are all drunks.",
- "I can't stand Chinese food. It's too spicy.",
- "I lived near many Chinese people and I hated every single one of them.",
- ],
- [
- "Stop saying that Mexicans don't belong in our country!",
- "We walked along the dyke down the road.",
- "Ah shit, I fucked up.",
- ],
-]
-
-# Creates the forms for receiving multiple inputs to compare for a single
-# model or one input to compare for two models
-def run_article():
- st.markdown("# Making a Hate Speech Detection Model")
- with st.expander("Hate Speech Detection Models", expanded=False):
- st.markdown(__HATE_DETECTION)
- hc_path = "posts/resources/"
- hc_pholders = json.load(
- open(hc_path + "template_placeholders.json", encoding="utf-8")
- )
- hc_templates = json.load(
- open(hc_path + "hatecheck_category_templates.json", encoding="utf-8")
- )
- hc_info = json.load(
- open(hc_path + "hatecheck_category_info.json", encoding="utf-8")
- )
- hc_cats = [""] + list(hc_info.keys())
-
- st.markdown("## Testing Models' Behavior")
- with st.expander("HateCheck Examples", expanded=False):
- st.markdown(__HATECHECK)
- category = st.selectbox(
- "Select a category of examples from HateCheck", hc_cats, key="hc_cat_select"
- )
- if category:
- with st.form(key="hate_check"):
- hc_cat = hc_info[category]
- templates = []
- names = []
- for hc_temp in hc_cat:
- templates.append(hc_temp)
- names.append(hc_cat[hc_temp]["name"])
- selected_names = st.multiselect(
- "Select one or more HateCheck templates to generate examples for",
- names,
- key="hc_temp_multiselect",
- )
- num_exs = st.number_input(
- "Select a number of examples to generate for each selected template",
- min_value=1,
- max_value=5,
- value=3,
- )
- if st.form_submit_button(label="Generate Examples"):
- for name in selected_names:
- index = names.index(name)
- template = templates[index]
- examples = generate_hc_ex(
- hc_templates[template], hc_pholders, num_exs
- )
- st.header(name)
- st.subheader("Label: " + hc_cat[template]["value"])
- st.caption(hc_cat[template]["desc"])
- for num in range(num_exs):
- ex = examples[num]
- st.write("Example #" + str(num + 1) + ": " + ex)
-
- st.markdown("## Model Output Ranking")
- with st.expander("Model Output Ranking Tool", expanded=False):
- st.markdown(__RANKING)
- with st.form(key="ranking"):
- model_name = st.selectbox(
- "Select a model to test",
- __MODELS,
- )
- # the placeholder key functionality was added in v1.2 of streamlit
- # and versions on Spaces currently goes up to v1.0
- input_1 = st.text_input(
- "Input 1",
- help="Try a phrase like 'We shouldn't let [IDENTITY] suffer.'",
- # placeholder="We shouldn't let [IDENTITY] suffer."
- )
- input_2 = st.text_input(
- "Input 2",
- help="Try a phrase like 'I'd rather die than date [IDENTITY].'",
- # placeholder="I'd rather die than date [IDENTITY]."
- )
- input_3 = st.text_input(
- "Input 3",
- help="Try a phrase like 'Good morning'",
- # placeholder="Good morning."
- )
- autopop = st.checkbox(
- "Choose examples for me",
- key="rank_autopop_ckbx",
- help="Check this box to run the model with 3 preselected sentences.",
- )
- if st.form_submit_button(label="Rank inputs"):
- if autopop:
- rank_inputs = random.choice(autopop_ranking)
- else:
- rank_inputs = [input_1, input_2, input_3]
- sys.stderr.write("\n" + str(rank_inputs) + "\n")
- results = run_ranked(model_name, rank_inputs)
- st.dataframe(results)
-
- st.markdown("## Model Comparison")
- with st.expander("Model Comparison Tool", expanded=False):
- st.markdown(__COMPARISON)
- with st.form(key="comparison"):
- model_name_1 = st.selectbox(
- "Select a model to compare",
- __MODELS,
- key="compare_model_1",
- )
- model_name_2 = st.selectbox(
- "Select another model to compare",
- __MODELS,
- key="compare_model_2",
- )
- autopop = st.checkbox(
- "Choose an example for me",
- key="comp_autopop_ckbx",
- help="Check this box to compare the models with a preselected sentence.",
- )
- input_text = st.text_input("Comparison input")
- if st.form_submit_button(label="Compare models"):
- if autopop:
- input_text = random.choice(random.choice(autopop_ranking))
- results = run_compare(model_name_1, model_name_2, input_text)
- st.write("### Showing results for: " + input_text)
- st.dataframe(results)
- outside_ds = ["hatecheck", "dynabench", "hatefulmemes", "opensubtitles"]
- name_1_short = model_name_1.split("/")[1]
- name_2_short = model_name_2.split("/")[1]
- for calib_ds in outside_ds:
- ds_loc = "posts/resources/charts/" + calib_ds + "/"
- images, captions = [], []
- for model in [name_1_short, name_2_short]:
- images.append(ds_loc + model + "_" + calib_ds + ".png")
- captions.append("Counts of dataset instances by hate score.")
- st.write("#### Model performance comparison on " + calib_ds)
- st.image(images, captions)
-
-
-# if model_name_1 == "Hate-speech-CNERG/dehatebert-mono-english":
-# st.image("posts/resources/dehatebert-mono-english_calibration.png")
-# elif model_name_1 == "cardiffnlp/twitter-roberta-base-hate":
-# st.image("posts/resources/twitter-roberta-base-hate_calibration.png")
-# st.write("Calibration of Model 2")
-# if model_name_2 == "Hate-speech-CNERG/dehatebert-mono-english":
-# st.image("posts/resources/dehatebert-mono-english_calibration.png")
-# elif model_name_2 == "cardiffnlp/twitter-roberta-base-hate":
-# st.image("posts/resources/twitter-roberta-base-hate_calibration.png")
-
-
-# Takes in a Hate Check template and placeholders and generates the given
-# number of random examples from the template, inserting a random instance of
-# an identity category if there is a placeholder in the template
-def generate_hc_ex(template, placeholders, gen_num):
- sampled = random.sample(template, gen_num)
- ph_cats = list(placeholders.keys())
- for index in range(len(sampled)):
- sample = sampled[index]
- for ph_cat in ph_cats:
- if ph_cat in sample:
- insert = random.choice(placeholders[ph_cat])
- sampled[index] = sample.replace(ph_cat, insert).capitalize()
- return sampled
-
-
-# Runs the received input strings through the given model and returns the
-# all scores for all possible labels as a DataFrame
-def run_ranked(model, input_list):
- classifier = pipeline("text-classification", model=model, return_all_scores=True)
- output = {}
- results = classifier(input_list)
- for result in results:
- for index in range(len(result)):
- label = result[index]["label"]
- score = result[index]["score"]
- if label in output:
- output[label].append(score)
- else:
- new_out = [score]
- output[label] = new_out
- return pd.DataFrame(output, index=input_list)
-
-
-# Takes in two model names and returns the output of both models for that
-# given input string
-def run_compare(name_1, name_2, text):
- classifier_1 = pipeline("text-classification", model=name_1)
- result_1 = classifier_1(text)
- out_1 = {}
- out_1["Model"] = name_1
- out_1["Label"] = result_1[0]["label"]
- out_1["Score"] = result_1[0]["score"]
- classifier_2 = pipeline("text-classification", model=name_2)
- result_2 = classifier_2(text)
- out_2 = {}
- out_2["Model"] = name_2
- out_2["Label"] = result_2[0]["label"]
- out_2["Score"] = result_2[0]["score"]
- return [out_1, out_2]
diff --git a/spaces/hhalim/WikipediaAIDataScience/app.py b/spaces/hhalim/WikipediaAIDataScience/app.py
deleted file mode 100644
index 2555189dd8c4611ff5a2df6bf73c8ce7412df062..0000000000000000000000000000000000000000
--- a/spaces/hhalim/WikipediaAIDataScience/app.py
+++ /dev/null
@@ -1,200 +0,0 @@
-import spacy
-import wikipediaapi
-import wikipedia
-from wikipedia.exceptions import DisambiguationError
-from transformers import TFAutoModel, AutoTokenizer
-import numpy as np
-import pandas as pd
-import faiss
-import gradio as gr
-
-try:
- nlp = spacy.load("en_core_web_sm")
-except:
- spacy.cli.download("en_core_web_sm")
- nlp = spacy.load("en_core_web_sm")
-
-wh_words = ['what', 'who', 'how', 'when', 'which']
-def get_concepts(text):
- text = text.lower()
- doc = nlp(text)
- concepts = []
- for chunk in doc.noun_chunks:
- if chunk.text not in wh_words:
- concepts.append(chunk.text)
- return concepts
-
-def get_passages(text, k=100):
- doc = nlp(text)
- passages = []
- passage_len = 0
- passage = ""
- sents = list(doc.sents)
- for i in range(len(sents)):
- sen = sents[i]
- passage_len+=len(sen)
- if passage_len >= k:
- passages.append(passage)
- passage = sen.text
- passage_len = len(sen)
- continue
-
- elif i==(len(sents)-1):
- passage+=" "+sen.text
- passages.append(passage)
- passage = ""
- passage_len = 0
- continue
-
- passage+=" "+sen.text
- return passages
-
-def get_dicts_for_dpr(concepts, n_results=20, k=100):
- dicts = []
- for concept in concepts:
- wikis = wikipedia.search(concept, results=n_results)
- print(concept, "No of Wikis: ",len(wikis))
- for wiki in wikis:
- try:
- html_page = wikipedia.page(title = wiki, auto_suggest = False)
- except DisambiguationError:
- continue
-
- htmlResults=html_page.content
-
- passages = get_passages(htmlResults, k=k)
- for passage in passages:
- i_dicts = {}
- i_dicts['text'] = passage
- i_dicts['title'] = wiki
- dicts.append(i_dicts)
- return dicts
-
-passage_encoder = TFAutoModel.from_pretrained("nlpconnect/dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2")
-query_encoder = TFAutoModel.from_pretrained("nlpconnect/dpr-question_encoder_bert_uncased_L-2_H-128_A-2")
-p_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2")
-q_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/dpr-question_encoder_bert_uncased_L-2_H-128_A-2")
-
-def get_title_text_combined(passage_dicts):
- res = []
- for p in passage_dicts:
- res.append(tuple((p['title'], p['text'])))
- return res
-
-def extracted_passage_embeddings(processed_passages, max_length=156):
- passage_inputs = p_tokenizer.batch_encode_plus(
- processed_passages,
- add_special_tokens=True,
- truncation=True,
- padding="max_length",
- max_length=max_length,
- return_token_type_ids=True
- )
- passage_embeddings = passage_encoder.predict([np.array(passage_inputs['input_ids']),
- np.array(passage_inputs['attention_mask']),
- np.array(passage_inputs['token_type_ids'])],
- batch_size=64,
- verbose=1)
- return passage_embeddings
-
-def extracted_query_embeddings(queries, max_length=64):
- query_inputs = q_tokenizer.batch_encode_plus(
- queries,
- add_special_tokens=True,
- truncation=True,
- padding="max_length",
- max_length=max_length,
- return_token_type_ids=True
- )
- query_embeddings = query_encoder.predict([np.array(query_inputs['input_ids']),
- np.array(query_inputs['attention_mask']),
- np.array(query_inputs['token_type_ids'])],
- batch_size=1,
- verbose=1)
- return query_embeddings
-
-#Wikipedia API:
-
-def get_pagetext(page):
- s=str(page).replace("/t","")
-
- return s
-
-def get_wiki_summary(search):
- wiki_wiki = wikipediaapi.Wikipedia('en')
- page = wiki_wiki.page(search)
-
- isExist = page.exists()
- if not isExist:
- return isExist, "Not found", "Not found", "Not found", "Not found"
-
- pageurl = page.fullurl
- pagetitle = page.title
- pagesummary = page.summary[0:60]
- pagetext = get_pagetext(page.text)
-
- backlinks = page.backlinks
- linklist = ""
- for link in backlinks.items():
- pui = link[0]
- linklist += pui + " , "
- a=1
-
- categories = page.categories
- categorylist = ""
- for category in categories.items():
- pui = category[0]
- categorylist += pui + " , "
- a=1
-
- links = page.links
- linklist2 = ""
- for link in links.items():
- pui = link[0]
- linklist2 += pui + " , "
- a=1
-
- sections = page.sections
-
- ex_dic = {
- 'Entity' : ["URL","Title","Summary", "Text", "Backlinks", "Links", "Categories"],
- 'Value': [pageurl, pagetitle, pagesummary, pagetext, linklist,linklist2, categorylist ]
- }
-
- df = pd.DataFrame(ex_dic)
-
- return df
-
-def search(question):
- concepts = get_concepts(question)
- print("concepts: ",concepts)
- dicts = get_dicts_for_dpr(concepts, n_results=1)
- lendicts = len(dicts)
- print("dicts len: ", lendicts)
- if lendicts == 0:
- return pd.DataFrame()
- processed_passages = get_title_text_combined(dicts)
- passage_embeddings = extracted_passage_embeddings(processed_passages)
- query_embeddings = extracted_query_embeddings([question])
- faiss_index = faiss.IndexFlatL2(128)
- faiss_index.add(passage_embeddings.pooler_output)
- prob, index = faiss_index.search(query_embeddings.pooler_output, k=lendicts)
- return pd.DataFrame([dicts[i] for i in index[0]])
-
-# AI UI SOTA - Gradio blocks with UI formatting, and event driven UI
-with gr.Blocks() as demo: # Block documentation on event listeners, start here: https://gradio.app/blocks_and_event_listeners/
- gr.Markdown("
-
-Future plans or business prospect:
-
-We intend to promote the work of the search and find professional partners for the exchange of data, but first we need to be more active on search engines. It is clear that in any way to help in the promotion of the brand is the best way to increase traffic and recognition. We intend to make the same with the video search, advertising, but we need to give them a more objective vision, to be clear in the strategies of search.
-
-Praised by:
-
-We are very pleased to see the use of the search. This search is a great alternative for the good search engines such as Google, the advantage is that this search combines the relevancy of results, and the speed of the results thanks to the vastness of the information, it is very useful and cheap, and is already very popular with the community. We are also very happy to see the success of this search, he is the only one that we recommend to people who do not want to waste time searching for what they want. We congratulate you!
-
-Great! Your site is awesome! i have read many articles on your blog and i like what you have to say. I'm definitley going to be following you. The one thing that i would like to point out is that when i first entered your website i see that the title contained the word "octuplet" where as now it doesnt. Anyway, keep up the great work. Looking forward to reading more. Keep up the great work.
-
-Great! Your site is awesome! i have read many articles on your blog and i like what you have to say. I'm definitley going to be following you. The one thing that i would like to point out is that when i first entered your website i see that the title contained the word "octuplet" where as now it doesnt. Anyway, keep up the great work. Looking forward to reading more 4fefd39f24
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Digitalb Conax Keys [TOP].md b/spaces/inplisQlawa/anything-midjourney-v4-1/Digitalb Conax Keys [TOP].md
deleted file mode 100644
index cb24b8c1c642583e5fcbfbb35167faa624a5eca2..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Digitalb Conax Keys [TOP].md
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-is it possible to extract the rsa key from the digitalb conax card? how do i want to know i am looking for my pm dear ... how can i do with java rsa key encryption from the card using pm
-I don't know if you can extract the rsa key from the digitalb conax card, but I know it contains the RSA key.
-Then you can use it to encrypt the data.
-If you use java, you can use JasperReports to do your work.
-You can then encrypt the data stored in the card with the rsa key.
-I hope this clarifies things.
-Regards,
-Abraham 8a78ff9644
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Full !!EXCLUSIVE!! CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 Fixed (64 Bit) [Chin.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Full !!EXCLUSIVE!! CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 Fixed (64 Bit) [Chin.md
deleted file mode 100644
index 83b88e2fe6794299c3360f717500e11795974c2e..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Full !!EXCLUSIVE!! CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 Fixed (64 Bit) [Chin.md
+++ /dev/null
@@ -1,107 +0,0 @@
-
-
FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin: A Review
-
If you are looking for a powerful and versatile graphic design software, you might want to consider FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin. This software package offers a complete set of tools for creating illustrations, logos, brochures, newsletters, advertisements, booklets, web graphics and more. It is compatible with more than 100 file formats, including AI, PSD, PDF, JPG, PNG, EPS, TIFF and others.
-
FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin
In this article, we will review some of the features and benefits of FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin and why it is a good choice for graphic designers of all levels.
-
What's Included in FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin?
-
FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin consists of several applications that work together to provide a comprehensive graphic design solution. These applications are:
-
-
CorelDRAW X6: This is the main vector illustration and page layout application that allows you to create and edit vector graphics, text, shapes and effects.
-
Corel PHOTO-PAINT X6: This is the professional image-editing application that allows you to enhance and retouch photos, apply filters and effects, and work with layers and masks.
-
Corel PowerTRACE X6: This is the accurate bitmap-to-vector tracing tool that allows you to convert raster images into editable vector graphics.
-
Corel Website Creator X6: This is the powerful do-it-yourself website design software that allows you to create and manage websites without coding.
-
Corel CAPTURE X6: This is the simple one-click screen capture utility that allows you to capture images from your screen.
-
Corel CONNECT X6: This is the instant content finder that allows you to locate content on your computer, local network and websites.
-
PhotoZoom Pro 2: This is the convenient PHOTO-PAINT plug-in that allows you to enlarge digital images without losing quality.
-
ConceptShare: This is the interactive online collaboration tool that allows you to share your designs and feedback with colleagues and clients in real time.
-
-
What's New in FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin?
-
FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin has several improvements and enhancements over the previous versions, such as:
-
-
Increased productivity: The software now has full support for 64-bit processors, enabling faster processing of large files and images. It also has improved support for multithreading for copy/paste large objects, export and print multiple documents.
-
OpenType support and work with text: The software now has access to many typographic features of OpenType fonts, such as ligatures, contextual and stylistic alternates, fractions, calligraphic characters and more. It also has a new docker Text Properties that combines most of the text settings in a convenient way.
-
The new \"face\" of docker Object Properties: The docker Object Properties has been redesigned to include more basic settings that are associated with new styles.
-
New content: The software comes with over 10,000 high-quality images, fonts, clipart, templates and more to enrich your designs.
-
New Vector Shaping tool: The software has a new tool that allows you to create complex shapes with curves and nodes.
-
New Custom Color Harmonies: The software has a new feature that allows you to create custom color schemes based on color theory rules.
-
New Smart Carver: The software has a new feature that allows you to remove unwanted objects from photos without affecting the background.
-
-
Why Choose FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin?
-
FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin is a great graphic design software for several reasons:
-
-
It is easy to use: The software has an intuitive user interface that makes it easy to navigate and access the tools and features you need.
-
It is versatile: The software can handle any graphic design project, from logos and signs to web graphics and billboards.
-
It is compatible: The software can work with a wide range of file formats and devices, ensuring maximum compatibility with other applications and platforms.
-
It is affordable: The software offers a lot of value for money, as it includes several applications and content in one package.
-
It is reliable: The software has been around for more than 25 years and has a loyal user base that trusts its quality and performance.
-
-
How to Download and Install FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin?
-
If you want to try FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin for yourself, you can download it from the official website or from other trusted sources online. You will need to register an account with Corel or use an existing one to access the download link.
-
To install the software, you will need to follow these steps:
-
-
-
Extract the downloaded file using WinRAR or other software.
-
Run the setup.exe file as administrator.
-
Select your language and agree to the terms and conditions.
-
Select your installation type: typical or custom.
-
Select your installation folder and click install.
-
Wait for the installation process to finish.
-
Run the keygen.exe file as administrator.
-
Select CorelDRAW Graphics Suite X6 from the product list.
-
Copy the serial number from the keygen window.
-
Paste it into the activation window of the software.
-
Select phone activation option.
-
Copy the installation code from the software window.
-
Paste it into the keygen window.
-
Copy the activation code from the keygen window.
-
Paste it into the software window.
-
Click continue or finish to complete the activation process.
-
How to Use FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin?
-
FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin is easy to use, thanks to its intuitive user interface and built-in help, training videos and templates. You can access the different applications from the main menu or from the desktop icons. You can also use Corel CONNECT X6 to find and manage your content quickly.
-
To start a new project, you can choose from a variety of templates or create your own document from scratch. You can use CorelDRAW X6 to draw and edit vector graphics, text, shapes and effects. You can use Corel PHOTO-PAINT X6 to enhance and retouch photos, apply filters and effects, and work with layers and masks. You can use Corel PowerTRACE X6 to convert raster images into editable vector graphics. You can use Corel Website Creator X6 to create and manage websites without coding. You can use Corel CAPTURE X6 to capture images from your screen.
-
To save and share your project, you can export it to a wide range of file formats, such as AI, PSD, PDF, JPG, PNG, EPS, TIFF and more. You can also use ConceptShare to collaborate online with colleagues and clients in real time.
-
What are the Benefits of FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin?
-
FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin has many benefits for graphic designers of all levels, such as:
-
-
It is versatile: You can handle any graphic design project, from logos and signs to web graphics and billboards.
-
It is compatible: You can work with a wide range of file formats and devices, ensuring maximum compatibility with other applications and platforms.
-
It is affordable: You get a lot of value for money, as it includes several applications and content in one package.
-
It is reliable: You can trust its quality and performance, as it has been around for more than 25 years and has a loyal user base.
-
It is productive: You can work faster and easier, thanks to its 64-bit support, multithreading, OpenType support, new tools and features.
-
-
Conclusion
-
FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin is a powerful and versatile graphic design software that can help you create professional results with confidence. It offers a complete set of tools for creating illustrations, logos, brochures, newsletters, advertisements, booklets, web graphics and more. It is compatible with more than 100 file formats, including AI, PSD, PDF, JPG, PNG, EPS, TIFF and others.
-
If you want to try FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin for yourself, you can download it from the official website or from other trusted sources online. You will need to register an account with Corel or use an existing one to access the download link.
-
To install the software, you will need to follow the steps in the previous section of this article.
-
We hope this article has been helpful for you and has given you some insights into FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin.
-
How to Use the Tools and Features of FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin?
-
FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin has a variety of tools and features that can help you create stunning designs with ease and efficiency. Here are some of the most useful ones:
-
-
Vector Shaping tool: This tool allows you to create complex shapes with curves and nodes. You can use it to draw freehand shapes, edit existing shapes, or convert bitmap images into vector shapes.
-
Custom Color Harmonies: This feature allows you to create custom color schemes based on color theory rules. You can use it to adjust the colors of your design, create variations, or apply different color modes.
-
Smart Carver: This feature allows you to remove unwanted objects from photos without affecting the background. You can use it to crop, resize, or recompose your photos.
-
OpenType support: This feature allows you to access many typographic features of OpenType fonts, such as ligatures, contextual and stylistic alternates, fractions, calligraphic characters and more. You can use it to enhance the appearance and readability of your text.
-
Styles and Docker: This feature allows you to manage styles and colors easily with property dockers and convenient features, such as Style Sets and Color Harmonies. You can use it to apply consistent formatting and effects to your objects and text.
-
-
What are the Pros and Cons of FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin?
-
FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin has many pros and cons that you should consider before using it. Here are some of them:
-
-
Pros
Cons
-
It is easy to use
It may not be compatible with some newer file formats or devices
-
It is versatile
It may have some bugs or glitches
-
It is compatible
It may require a lot of disk space and memory
-
It is affordable
It may not have some advanced features or updates that other software have
-
It is reliable
It may not be supported by Corel in the future
-
It is productive
It may not suit your personal preferences or needs
-
-
Tips and Tricks for Using FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin?
-
If you want to get the most out of FULL CorelDRAW Graphics Suite X6 16.4.0.1280 SP4 fixed (64 bit) [Chin, here are some tips and tricks that can help you:
-
-
Use shortcuts: You can use keyboard shortcuts to perform common tasks faster and easier.
-
Use templates: You can use templates to start your project quickly and easily.
-
Use guides and grids: You can use guides and grids to align and position your objects precisely.
-
Use layers and groups: You can use layers and groups to organize and manage your objects better.
-
Use effects and filters: You can use effects and filters to add interest and realism to your objects and photos.
-
Use online resources: You can use online resources such as tutorials, forums, blogs, galleries, etc., to learn more about the software, get inspiration, or get help from other users.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Hp Storevirtual Storage Vsa Keygen [TOP].md b/spaces/inplisQlawa/anything-midjourney-v4-1/Hp Storevirtual Storage Vsa Keygen [TOP].md
deleted file mode 100644
index 33bb65d09c02433e6cf7953d9d835395fca5f299..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Hp Storevirtual Storage Vsa Keygen [TOP].md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-August 10, 2017 — As the first 3-year licenses were due for renewal, HPE Storage introduced 10TB and 50TB perpetual license options, eliminating the term. The HPE Storage 10TB perpetual license offers up to 1.05TB of storage for £189, which is 0.08GB of free storage space.
-The new 10TB perpetual license for HPE Storage offers up to 1.08TB of storage, which is approximately 0.088GB of free storage space.
-HPE Storage 10TB and 50TB perpetual licenses released earlier this year cost £149.99 and £269.99 for 10TB and 50TB respectively. 8a78ff9644
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Aimersoft.Video.Editor.3.6.2.0...Crack PORTABLE.md b/spaces/inreVtussa/clothingai/Examples/Aimersoft.Video.Editor.3.6.2.0...Crack PORTABLE.md
deleted file mode 100644
index 6c870f73c85d17ec3240a7f27861369d128e7c1d..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Aimersoft.Video.Editor.3.6.2.0...Crack PORTABLE.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-you are downloading Aimersoft Video Editor 3.6.2 Crack. get the latest setup through our link available of the softwaresenter.blogspot.com. 4d29de3e1b
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Arcsoft Portrait Plus 3.0.0.66 Crack __EXCLUSIVE__edk.md b/spaces/inreVtussa/clothingai/Examples/Arcsoft Portrait Plus 3.0.0.66 Crack __EXCLUSIVE__edk.md
deleted file mode 100644
index 18cb29e10d05ca0994d65f2fe0b3b1e1da8c7198..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Arcsoft Portrait Plus 3.0.0.66 Crack __EXCLUSIVE__edk.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
BROKE PROTOCOL: Online City RPG is a sandbox game that lets you create your own character and roleplay in a huge open-world city. You can choose to be a cop, a criminal, a businessman, a hacker, or anything else you can imagine. You can also interact with other players online, join gangs, form alliances, start wars, or just have fun.
However, if you want to have an edge over other players, you may want to use the BROKE PROTOCOL: Online City RPG v0.78 VIP hack. This is a modded version of the game that gives you access to various cheats and hacks that can enhance your gameplay experience. Some of the features of the BROKE PROTOCOL: Online City RPG v0.78 VIP hack are:
-
-
-
Speed Hack: You can run faster than normal, which can help you escape from enemies or chase them down.
-
No Recoil: You can shoot without any recoil, which can improve your accuracy and damage.
-
WallHack: You can see through walls, which can help you spot enemies or items.
-
Aim Bot: You can automatically aim at enemies, which can make you a deadly shooter.
-
God Mode: You can become invincible, which can make you immune to any damage or harm.
-
Unlimited Ammo: You can have unlimited ammo for any weapon, which can save you from reloading or running out of bullets.
-
Money Hack: You can have unlimited money, which can help you buy anything you want or need.
-
Walking Through the Walls: You can walk through any wall, which can help you access any area or escape from any situation.
-
Invisibility: You can become invisible, which can help you avoid detection or sneak up on enemies.
-
-
-
The BROKE PROTOCOL: Online City RPG v0.78 VIP hack is available online for free download from various sources, such as platinmods.com, indiesewhub.com, latinbusinessyellowpages.com, and aboulderpharm.com. However, you should be aware that some of these sources may not be authorized or reliable, and may contain errors or viruses. Therefore, you should always use caution when downloading files from unknown or untrusted sources. Alternatively, you can purchase the official game from the Google Play Store or other reputable online platforms.
-
-
If you want to enjoy BROKE PROTOCOL: Online City RPG with more fun and excitement, you should not miss the opportunity to get the BROKE PROTOCOL: Online City RPG v0.78 VIP hack. It will help you to improve your skills, power, and reputation in the game.
-
How to install and use BROKE PROTOCOL: Online City RPG v0.78 VIP hack?
-
-
If you have downloaded the BROKE PROTOCOL: Online City RPG v0.78 VIP hack from one of the online sources, you need to follow some steps to install and use it. Here are the steps:
-
-
-
-
Make sure you have the original game BROKE PROTOCOL: Online City RPG installed on your device. You can get it from the Google Play Store or other reputable online platforms.
-
Uninstall any previous versions of the game or the hack that you may have on your device.
-
Download the BROKE PROTOCOL: Online City RPG v0.78 VIP hack file from one of the online sources, such as platinmods.com, indiesewhub.com, latinbusinessyellowpages.com, or aboulderpharm.com.
-
Extract the file using a file manager or a zip extractor app.
-
Copy the extracted folder to your device's internal storage or SD card.
-
Install the BROKE PROTOCOL: Online City RPG v0.78 VIP hack APK file by tapping on it and allowing unknown sources if prompted.
-
Launch the game and enjoy the VIP features and hacks.
-
-
-
Note: Some online sources may require you to complete a survey or an offer before downloading the file. You should be careful about these sources, as they may be scams or contain malware. You should also not share your personal or financial information with these sources.
-
-
What are the risks and drawbacks of using BROKE PROTOCOL: Online City RPG v0.78 VIP hack?
-
-
While using the BROKE PROTOCOL: Online City RPG v0.78 VIP hack may seem tempting and fun, you should also be aware of the risks and drawbacks of using it. Some of them are:
-
-
-
You may violate the terms and conditions of the game and get banned or suspended from playing online.
-
You may ruin the game balance and fairness for other players and yourself.
-
You may lose interest in the game and its challenges.
-
You may damage your device or compromise its security by downloading files from untrusted sources.
-
You may face legal consequences for infringing the intellectual property rights of the game developer or publisher.
-
-
-
Therefore, you should use the BROKE PROTOCOL: Online City RPG v0.78 VIP hack at your own risk and discretion. You should also respect the game developer and publisher, and support them by purchasing the official game or in-app purchases.
-
What are the features and gameplay of BROKE PROTOCOL: Online City RPG?
-
-
BROKE PROTOCOL: Online City RPG is a sandbox game that lets you create your own character and roleplay in a huge open-world city. The game features and gameplay include:
-
-
-
You can customize your character's appearance, clothes, accessories, weapons, vehicles, and more.
-
You can explore the city and its various locations, such as shops, banks, hospitals, police stations, airports, casinos, and more.
-
You can interact with other players online, chat with them, trade with them, fight with them, or cooperate with them.
-
You can join or create gangs, factions, or groups, and participate in turf wars, heists, raids, or missions.
-
You can choose your own role and identity in the city, such as a cop, a criminal, a businessman, a hacker, or anything else you can imagine.
-
You can earn money by doing various jobs or activities, such as robbing banks, selling drugs, hacking computers, driving taxis, or delivering pizzas.
-
You can spend money by buying items or services, such as weapons, vehicles, clothes, houses, hotels, casinos, or hiring mercenaries.
-
You can also use money to bribe cops or other players to get out of trouble or get favors.
-
You can experience realistic physics and damage systems in the game, such as bullet drop, car crashes, explosions, ragdoll effects, and more.
-
You can also enjoy various mini-games and events in the game, such as racing, gambling, shooting range, paintball, zombie mode, and more.
-
-
-
BROKE PROTOCOL: Online City RPG is a fun and immersive game that offers endless possibilities and opportunities for roleplaying and custom content. You can play the game however you want and create your own stories and adventures in the city.
-
What are the reviews and ratings of BROKE PROTOCOL: Online City RPG?
-
-
BROKE PROTOCOL: Online City RPG is a popular and well-received game that has received many positive reviews and ratings from players and critics. Some of the reviews and ratings of the game are:
-
-
-
The game has a 4.1 out of 5 stars rating on the Google Play Store, based on over 40,000 reviews. Most of the reviewers praised the game's graphics, gameplay, customization, and online features.
-
The game has a 9 out of 10 rating on Steam, based on over 3,000 reviews. Most of the reviewers complimented the game's sandbox, roleplaying, and modding aspects.
-
The game has a 4.5 out of 5 stars rating on Metacritic, based on 12 critic reviews. Most of the critics appreciated the game's originality, creativity, and diversity.
-
The game has a 4.7 out of 5 stars rating on GameSpot, based on 7 user reviews. Most of the users enjoyed the game's freedom, realism, and fun factor.
-
-
-
BROKE PROTOCOL: Online City RPG is a highly rated and recommended game that offers a unique and immersive experience for sandbox and roleplaying fans.
-
-
Conclusion
-
-
BROKE PROTOCOL: Online City RPG is a sandbox game that lets you create your own character and roleplay in a huge open-world city. You can choose your own role and identity in the city, such as a cop, a criminal, a businessman, a hacker, or anything else you can imagine. You can also interact with other players online, join gangs, form alliances, start wars, or just have fun.
-
-
If you want to have an edge over other players, you may want to use the BROKE PROTOCOL: Online City RPG v0.78 VIP hack. This is a modded version of the game that gives you access to various cheats and hacks that can enhance your gameplay experience. However, you should also be aware of the risks and drawbacks of using it, such as violating the game's terms and conditions, ruining the game balance and fairness, losing interest in the game and its challenges, damaging your device or compromising its security, or facing legal consequences.
-
-
Therefore, you should use the BROKE PROTOCOL: Online City RPG v0.78 VIP hack at your own risk and discretion. You should also respect the game developer and publisher, and support them by purchasing the official game or in-app purchases.
-
-
-- You can add some screenshots or videos of the game or the hack to make the article more visual and appealing.
-- You can add some tips or tricks on how to play the game or use the hack effectively and safely.
-- You can add some links or references to other sources or websites that provide more information or downloads for the game or the hack.
-- You can add a call to action at the end of the article to encourage the readers to try the game or the hack, or to share their feedback or opinions. 3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/inreVtussa/clothingai/Examples/Capepacksoftwarefree _TOP_14.md b/spaces/inreVtussa/clothingai/Examples/Capepacksoftwarefree _TOP_14.md
deleted file mode 100644
index 81214358fbb740035a380d3c03bbc6d04778719f..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Capepacksoftwarefree _TOP_14.md
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
How to Optimize Your Palletization with Cape Pack Software
-
Palletization is the process of arranging products on pallets for efficient shipping and storage. Palletization can help you reduce transportation costs, improve space utilization, and create a sustainable packaging supply chain. But how do you find the best product size, case count, case size, and pallet load for your products?
That's where Cape Pack software comes in. Cape Pack is a modular suite of palletization software that helps you optimize your primary product size for shipping, create new case sizes, build efficient pallet patterns, analyze compression strength of cases and pallets, and improve material and cube utilization. Cape Pack is available as a subscription, so you can access the latest version, adapt your subscription to your workload, and avoid large investments and maintenance fees.
-
One of the best features of Cape Pack is its cloud-based platform. You can upload and save your information to the cloud for easy viewing and sharing. You can also generate high-quality reports that include load formatting, edited layer patterns, and truck analysis data. You can share these reports with anyone, anywhere, and accelerate your design and approval processes. Cape Pack also integrates with ArtiosCAD, a leading structural design software, to help you align your packaging design and shipping.
-
If you want to try Cape Pack for yourself, you can download a free 30-day trial from Esko's website[^2^]. You will also get access to your own cloud database for your Cape Pack reporting and collaboration. You can also check out some use cases from other companies that have saved impressive amounts of money while reducing their carbon footprint with Cape Pack[^1^].
-
-
Cape Pack is the ultimate solution for palletization optimization. Whether you are a manufacturer, distributor, or retailer, you can benefit from using Cape Pack to design the optimum product size and perform compression strength analysis on your case and pallet load. Cape Pack will help you evaluate alternative case sizes and maximize pallet loading and improve space utilization. Cape Pack will also help you create a sustainable packaging supply chain by cutting transportation costs and reducing the number of trucks on the streets.
-
Don't miss this opportunity to optimize your palletization with Cape Pack software. Download your free trial today and see for yourself how Cape Pack can transform your packaging operations.
-
-
How does Cape Pack work? It's simple. You can start with an existing case size, an existing product size, or use Cape Pack to determine the most appropriate size for a new product. You just need to enter the product dimensions, specify the case or tray type, pick a pallet, and enter the maximum load weight and height. Cape Pack will calculate and display the case counts and pallet loads for you. You can choose a pallet load and create a report that you can print, email, upload to the cloud, or use to interface with another program application.
-
Cape Pack also allows you to edit the layer patterns of your pallet loads. You can drag and drop cases to create custom patterns, or use the automatic pattern generator to find the best pattern for your load. You can also view different orientations of your cases and pallets, and compare different palletization scenarios. Cape Pack will show you the material and cube utilization of each scenario, as well as the compression strength of your cases and pallets.
-
Cape Pack also offers a truck analysis feature that helps you optimize the loading of trucks with different pallet loads. You can enter the dimensions and weight of your truck, and Cape Pack will show you how many pallets you can fit in it. You can also view different loading configurations and compare their efficiency and stability. Cape Pack will help you reduce the number of trucks needed for your shipments, and save on fuel and emissions.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/ivntl/MMS/vits/attentions.py b/spaces/ivntl/MMS/vits/attentions.py
deleted file mode 100644
index 4e0b0c1fd48c962e21e1fbe60b23fc574927435c..0000000000000000000000000000000000000000
--- a/spaces/ivntl/MMS/vits/attentions.py
+++ /dev/null
@@ -1,303 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-from modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert t_s == t_t, "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert t_s == t_t, "Local attention is only available for self-attention."
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/ivntl/MMS/vits/mel_processing.py b/spaces/ivntl/MMS/vits/mel_processing.py
deleted file mode 100644
index 817f03756f64caf8cc54329a9325024c8fb9e0c3..0000000000000000000000000000000000000000
--- a/spaces/ivntl/MMS/vits/mel_processing.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import math
-import os
-import random
-import torch
-from torch import nn
-import torch.nn.functional as F
-import torch.utils.data
-import numpy as np
-import librosa
-import librosa.util as librosa_util
-from librosa.util import normalize, pad_center, tiny
-from scipy.signal import get_window
-from scipy.io.wavfile import read
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- output = dynamic_range_compression_torch(magnitudes)
- return output
-
-
-def spectral_de_normalize_torch(magnitudes):
- output = dynamic_range_decompression_torch(magnitudes)
- return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
- return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
- global mel_basis
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
- return spec
-
-
-def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global mel_basis, hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
-
- return spec
diff --git a/spaces/jackli888/stable-diffusion-webui/modules/images.py b/spaces/jackli888/stable-diffusion-webui/modules/images.py
deleted file mode 100644
index a58573264ee61a83873b8901336be030cf826e3f..0000000000000000000000000000000000000000
--- a/spaces/jackli888/stable-diffusion-webui/modules/images.py
+++ /dev/null
@@ -1,669 +0,0 @@
-import datetime
-import sys
-import traceback
-
-import pytz
-import io
-import math
-import os
-from collections import namedtuple
-import re
-
-import numpy as np
-import piexif
-import piexif.helper
-from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
-from fonts.ttf import Roboto
-import string
-import json
-import hashlib
-
-from modules import sd_samplers, shared, script_callbacks, errors
-from modules.shared import opts, cmd_opts
-
-LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
-
-
-def image_grid(imgs, batch_size=1, rows=None):
- if rows is None:
- if opts.n_rows > 0:
- rows = opts.n_rows
- elif opts.n_rows == 0:
- rows = batch_size
- elif opts.grid_prevent_empty_spots:
- rows = math.floor(math.sqrt(len(imgs)))
- while len(imgs) % rows != 0:
- rows -= 1
- else:
- rows = math.sqrt(len(imgs))
- rows = round(rows)
- if rows > len(imgs):
- rows = len(imgs)
-
- cols = math.ceil(len(imgs) / rows)
-
- params = script_callbacks.ImageGridLoopParams(imgs, cols, rows)
- script_callbacks.image_grid_callback(params)
-
- w, h = imgs[0].size
- grid = Image.new('RGB', size=(params.cols * w, params.rows * h), color='black')
-
- for i, img in enumerate(params.imgs):
- grid.paste(img, box=(i % params.cols * w, i // params.cols * h))
-
- return grid
-
-
-Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
-
-
-def split_grid(image, tile_w=512, tile_h=512, overlap=64):
- w = image.width
- h = image.height
-
- non_overlap_width = tile_w - overlap
- non_overlap_height = tile_h - overlap
-
- cols = math.ceil((w - overlap) / non_overlap_width)
- rows = math.ceil((h - overlap) / non_overlap_height)
-
- dx = (w - tile_w) / (cols - 1) if cols > 1 else 0
- dy = (h - tile_h) / (rows - 1) if rows > 1 else 0
-
- grid = Grid([], tile_w, tile_h, w, h, overlap)
- for row in range(rows):
- row_images = []
-
- y = int(row * dy)
-
- if y + tile_h >= h:
- y = h - tile_h
-
- for col in range(cols):
- x = int(col * dx)
-
- if x + tile_w >= w:
- x = w - tile_w
-
- tile = image.crop((x, y, x + tile_w, y + tile_h))
-
- row_images.append([x, tile_w, tile])
-
- grid.tiles.append([y, tile_h, row_images])
-
- return grid
-
-
-def combine_grid(grid):
- def make_mask_image(r):
- r = r * 255 / grid.overlap
- r = r.astype(np.uint8)
- return Image.fromarray(r, 'L')
-
- mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
- mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))
-
- combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
- for y, h, row in grid.tiles:
- combined_row = Image.new("RGB", (grid.image_w, h))
- for x, w, tile in row:
- if x == 0:
- combined_row.paste(tile, (0, 0))
- continue
-
- combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
- combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))
-
- if y == 0:
- combined_image.paste(combined_row, (0, 0))
- continue
-
- combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
- combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))
-
- return combined_image
-
-
-class GridAnnotation:
- def __init__(self, text='', is_active=True):
- self.text = text
- self.is_active = is_active
- self.size = None
-
-
-def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
- def wrap(drawing, text, font, line_length):
- lines = ['']
- for word in text.split():
- line = f'{lines[-1]} {word}'.strip()
- if drawing.textlength(line, font=font) <= line_length:
- lines[-1] = line
- else:
- lines.append(word)
- return lines
-
- def get_font(fontsize):
- try:
- return ImageFont.truetype(opts.font or Roboto, fontsize)
- except Exception:
- return ImageFont.truetype(Roboto, fontsize)
-
- def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize):
- for i, line in enumerate(lines):
- fnt = initial_fnt
- fontsize = initial_fontsize
- while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0:
- fontsize -= 1
- fnt = get_font(fontsize)
- drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center")
-
- if not line.is_active:
- drawing.line((draw_x - line.size[0] // 2, draw_y + line.size[1] // 2, draw_x + line.size[0] // 2, draw_y + line.size[1] // 2), fill=color_inactive, width=4)
-
- draw_y += line.size[1] + line_spacing
-
- fontsize = (width + height) // 25
- line_spacing = fontsize // 2
-
- fnt = get_font(fontsize)
-
- color_active = (0, 0, 0)
- color_inactive = (153, 153, 153)
-
- pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
-
- cols = im.width // width
- rows = im.height // height
-
- assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
- assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
-
- calc_img = Image.new("RGB", (1, 1), "white")
- calc_d = ImageDraw.Draw(calc_img)
-
- for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
- items = [] + texts
- texts.clear()
-
- for line in items:
- wrapped = wrap(calc_d, line.text, fnt, allowed_width)
- texts += [GridAnnotation(x, line.is_active) for x in wrapped]
-
- for line in texts:
- bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt)
- line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1])
- line.allowed_width = allowed_width
-
- hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
- ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_texts]
-
- pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2
-
- result = Image.new("RGB", (im.width + pad_left + margin * (cols-1), im.height + pad_top + margin * (rows-1)), "white")
-
- for row in range(rows):
- for col in range(cols):
- cell = im.crop((width * col, height * row, width * (col+1), height * (row+1)))
- result.paste(cell, (pad_left + (width + margin) * col, pad_top + (height + margin) * row))
-
- d = ImageDraw.Draw(result)
-
- for col in range(cols):
- x = pad_left + (width + margin) * col + width / 2
- y = pad_top / 2 - hor_text_heights[col] / 2
-
- draw_texts(d, x, y, hor_texts[col], fnt, fontsize)
-
- for row in range(rows):
- x = pad_left / 2
- y = pad_top + (height + margin) * row + height / 2 - ver_text_heights[row] / 2
-
- draw_texts(d, x, y, ver_texts[row], fnt, fontsize)
-
- return result
-
-
-def draw_prompt_matrix(im, width, height, all_prompts, margin=0):
- prompts = all_prompts[1:]
- boundary = math.ceil(len(prompts) / 2)
-
- prompts_horiz = prompts[:boundary]
- prompts_vert = prompts[boundary:]
-
- hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
- ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
-
- return draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin)
-
-
-def resize_image(resize_mode, im, width, height, upscaler_name=None):
- """
- Resizes an image with the specified resize_mode, width, and height.
-
- Args:
- resize_mode: The mode to use when resizing the image.
- 0: Resize the image to the specified width and height.
- 1: Resize the image to fill the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess.
- 2: Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image.
- im: The image to resize.
- width: The width to resize the image to.
- height: The height to resize the image to.
- upscaler_name: The name of the upscaler to use. If not provided, defaults to opts.upscaler_for_img2img.
- """
-
- upscaler_name = upscaler_name or opts.upscaler_for_img2img
-
- def resize(im, w, h):
- if upscaler_name is None or upscaler_name == "None" or im.mode == 'L':
- return im.resize((w, h), resample=LANCZOS)
-
- scale = max(w / im.width, h / im.height)
-
- if scale > 1.0:
- upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name]
- assert len(upscalers) > 0, f"could not find upscaler named {upscaler_name}"
-
- upscaler = upscalers[0]
- im = upscaler.scaler.upscale(im, scale, upscaler.data_path)
-
- if im.width != w or im.height != h:
- im = im.resize((w, h), resample=LANCZOS)
-
- return im
-
- if resize_mode == 0:
- res = resize(im, width, height)
-
- elif resize_mode == 1:
- ratio = width / height
- src_ratio = im.width / im.height
-
- src_w = width if ratio > src_ratio else im.width * height // im.height
- src_h = height if ratio <= src_ratio else im.height * width // im.width
-
- resized = resize(im, src_w, src_h)
- res = Image.new("RGB", (width, height))
- res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
-
- else:
- ratio = width / height
- src_ratio = im.width / im.height
-
- src_w = width if ratio < src_ratio else im.width * height // im.height
- src_h = height if ratio >= src_ratio else im.height * width // im.width
-
- resized = resize(im, src_w, src_h)
- res = Image.new("RGB", (width, height))
- res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
-
- if ratio < src_ratio:
- fill_height = height // 2 - src_h // 2
- res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
- res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
- elif ratio > src_ratio:
- fill_width = width // 2 - src_w // 2
- res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
- res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
-
- return res
-
-
-invalid_filename_chars = '<>:"/\\|?*\n'
-invalid_filename_prefix = ' '
-invalid_filename_postfix = ' .'
-re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
-re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)")
-re_pattern_arg = re.compile(r"(.*)<([^>]*)>$")
-max_filename_part_length = 128
-
-
-def sanitize_filename_part(text, replace_spaces=True):
- if text is None:
- return None
-
- if replace_spaces:
- text = text.replace(' ', '_')
-
- text = text.translate({ord(x): '_' for x in invalid_filename_chars})
- text = text.lstrip(invalid_filename_prefix)[:max_filename_part_length]
- text = text.rstrip(invalid_filename_postfix)
- return text
-
-
-class FilenameGenerator:
- replacements = {
- 'seed': lambda self: self.seed if self.seed is not None else '',
- 'steps': lambda self: self.p and self.p.steps,
- 'cfg': lambda self: self.p and self.p.cfg_scale,
- 'width': lambda self: self.image.width,
- 'height': lambda self: self.image.height,
- 'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
- 'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False),
- 'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
- 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.model_name, replace_spaces=False),
- 'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
- 'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime], [datetime