diff --git a/spaces/1368565466ki/Satdia/text/cleaners.py b/spaces/1368565466ki/Satdia/text/cleaners.py
deleted file mode 100644
index 68c9ad24d5a303b68a521fba2e8776c8cc867356..0000000000000000000000000000000000000000
--- a/spaces/1368565466ki/Satdia/text/cleaners.py
+++ /dev/null
@@ -1,475 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-
-'''
-Cleaners are transformations that run over the input text at both training and eval time.
-
-Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
-hyperparameter. Some cleaners are English-specific. You'll typically want to use:
- 1. "english_cleaners" for English text
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
- the symbols in symbols.py to match your data).
-'''
-
-import re
-from unidecode import unidecode
-import pyopenjtalk
-from jamo import h2j, j2hcj
-from pypinyin import lazy_pinyin, BOPOMOFO
-import jieba, cn2an
-
-
-# This is a list of Korean classifiers preceded by pure Korean numerals.
-_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
-
-# Regular expression matching whitespace:
-_whitespace_re = re.compile(r'\s+')
-
-# Regular expression matching Japanese without punctuation marks:
-_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# Regular expression matching non-Japanese characters or punctuation marks:
-_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# List of (regular expression, replacement) pairs for abbreviations:
-_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
- ('mrs', 'misess'),
- ('mr', 'mister'),
- ('dr', 'doctor'),
- ('st', 'saint'),
- ('co', 'company'),
- ('jr', 'junior'),
- ('maj', 'major'),
- ('gen', 'general'),
- ('drs', 'doctors'),
- ('rev', 'reverend'),
- ('lt', 'lieutenant'),
- ('hon', 'honorable'),
- ('sgt', 'sergeant'),
- ('capt', 'captain'),
- ('esq', 'esquire'),
- ('ltd', 'limited'),
- ('col', 'colonel'),
- ('ft', 'fort'),
-]]
-
-# List of (hangul, hangul divided) pairs:
-_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄳ', 'ㄱㅅ'),
- ('ㄵ', 'ㄴㅈ'),
- ('ㄶ', 'ㄴㅎ'),
- ('ㄺ', 'ㄹㄱ'),
- ('ㄻ', 'ㄹㅁ'),
- ('ㄼ', 'ㄹㅂ'),
- ('ㄽ', 'ㄹㅅ'),
- ('ㄾ', 'ㄹㅌ'),
- ('ㄿ', 'ㄹㅍ'),
- ('ㅀ', 'ㄹㅎ'),
- ('ㅄ', 'ㅂㅅ'),
- ('ㅘ', 'ㅗㅏ'),
- ('ㅙ', 'ㅗㅐ'),
- ('ㅚ', 'ㅗㅣ'),
- ('ㅝ', 'ㅜㅓ'),
- ('ㅞ', 'ㅜㅔ'),
- ('ㅟ', 'ㅜㅣ'),
- ('ㅢ', 'ㅡㅣ'),
- ('ㅑ', 'ㅣㅏ'),
- ('ㅒ', 'ㅣㅐ'),
- ('ㅕ', 'ㅣㅓ'),
- ('ㅖ', 'ㅣㅔ'),
- ('ㅛ', 'ㅣㅗ'),
- ('ㅠ', 'ㅣㅜ')
-]]
-
-# List of (Latin alphabet, hangul) pairs:
-_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', '에이'),
- ('b', '비'),
- ('c', '시'),
- ('d', '디'),
- ('e', '이'),
- ('f', '에프'),
- ('g', '지'),
- ('h', '에이치'),
- ('i', '아이'),
- ('j', '제이'),
- ('k', '케이'),
- ('l', '엘'),
- ('m', '엠'),
- ('n', '엔'),
- ('o', '오'),
- ('p', '피'),
- ('q', '큐'),
- ('r', '아르'),
- ('s', '에스'),
- ('t', '티'),
- ('u', '유'),
- ('v', '브이'),
- ('w', '더블유'),
- ('x', '엑스'),
- ('y', '와이'),
- ('z', '제트')
-]]
-
-# List of (Latin alphabet, bopomofo) pairs:
-_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', 'ㄟˉ'),
- ('b', 'ㄅㄧˋ'),
- ('c', 'ㄙㄧˉ'),
- ('d', 'ㄉㄧˋ'),
- ('e', 'ㄧˋ'),
- ('f', 'ㄝˊㄈㄨˋ'),
- ('g', 'ㄐㄧˋ'),
- ('h', 'ㄝˇㄑㄩˋ'),
- ('i', 'ㄞˋ'),
- ('j', 'ㄐㄟˋ'),
- ('k', 'ㄎㄟˋ'),
- ('l', 'ㄝˊㄛˋ'),
- ('m', 'ㄝˊㄇㄨˋ'),
- ('n', 'ㄣˉ'),
- ('o', 'ㄡˉ'),
- ('p', 'ㄆㄧˉ'),
- ('q', 'ㄎㄧㄡˉ'),
- ('r', 'ㄚˋ'),
- ('s', 'ㄝˊㄙˋ'),
- ('t', 'ㄊㄧˋ'),
- ('u', 'ㄧㄡˉ'),
- ('v', 'ㄨㄧˉ'),
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
- ('y', 'ㄨㄞˋ'),
- ('z', 'ㄗㄟˋ')
-]]
-
-
-# List of (bopomofo, romaji) pairs:
-_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('ㄅㄛ', 'p⁼wo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p⁼'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't⁼'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k⁼'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'h'),
- ('ㄐ', 'ʧ⁼'),
- ('ㄑ', 'ʧʰ'),
- ('ㄒ', 'ʃ'),
- ('ㄓ', 'ʦ`⁼'),
- ('ㄔ', 'ʦ`ʰ'),
- ('ㄕ', 's`'),
- ('ㄖ', 'ɹ`'),
- ('ㄗ', 'ʦ⁼'),
- ('ㄘ', 'ʦʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ə'),
- ('ㄝ', 'e'),
- ('ㄞ', 'ai'),
- ('ㄟ', 'ei'),
- ('ㄠ', 'au'),
- ('ㄡ', 'ou'),
- ('ㄧㄢ', 'yeNN'),
- ('ㄢ', 'aNN'),
- ('ㄧㄣ', 'iNN'),
- ('ㄣ', 'əNN'),
- ('ㄤ', 'aNg'),
- ('ㄧㄥ', 'iNg'),
- ('ㄨㄥ', 'uNg'),
- ('ㄩㄥ', 'yuNg'),
- ('ㄥ', 'əNg'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'ɥ'),
- ('ˉ', '→'),
- ('ˊ', '↑'),
- ('ˇ', '↓↑'),
- ('ˋ', '↓'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-
-def expand_abbreviations(text):
- for regex, replacement in _abbreviations:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def lowercase(text):
- return text.lower()
-
-
-def collapse_whitespace(text):
- return re.sub(_whitespace_re, ' ', text)
-
-
-def convert_to_ascii(text):
- return unidecode(text)
-
-
-def japanese_to_romaji_with_accent(text):
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
- sentences = re.split(_japanese_marks, text)
- marks = re.findall(_japanese_marks, text)
- text = ''
- for i, sentence in enumerate(sentences):
- if re.match(_japanese_characters, sentence):
- if text!='':
- text+=' '
- labels = pyopenjtalk.extract_fullcontext(sentence)
- for n, label in enumerate(labels):
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
- if phoneme not in ['sil','pau']:
- text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
- else:
- continue
- n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
- a2_next=-1
- else:
- a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
- # Accent phrase boundary
- if a3 == 1 and a2_next == 1:
- text += ' '
- # Falling
- elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
- text += '↓'
- # Rising
- elif a2 == 1 and a2_next == 2:
- text += '↑'
- if i
-
Avidemux 2.7.5 x64 Multilingual Crack: A Free Video Editor for Windows
-
If you are looking for a free and easy-to-use video editor for Windows, you might want to check out Avidemux 2.7.5 x64 Multilingual Crack. This is an open-source video editor that can handle various video formats, codecs, filters, and encoding tasks.
-
In this article, we will give you a brief overview of what Avidemux 2.7.5 x64 Multilingual Crack is, how to download and install it, how to use it for basic and advanced video editing tasks, why you should choose it over other video editors, and some tips and tricks for using it effectively.
Avidemux is an open-source video editor that was first released in 2000 by Mean (a French programmer). It is designed for simple cutting, filtering, and encoding tasks, but it also supports more complex features such as scripting, plugins, and command-line interface.
-
Avidemux can work with various video formats such as AVI, FLV, MP4, Matroska, MPEG, MPEG-2, H.264, H.265, VOB, TS, ASF, OGM, and more. It can also encode audio files into formats such as MP3, WMA, AC3, MP2, WAV, and OGG. You can use Avidemux to perform basic editing tasks such as removing unwanted parts of the video, resizing, cropping, flipping, or rotating the picture. You can also apply filters and effects to your videos such as color correction, noise reduction, sharpening, deinterlacing, subtitles, etc.
-
How to download and install Avidemux 2.7.5 x64 Multilingual Crack
-
Avidemux 2.7.5 x64 Multilingual Crack is the latest version of Avidemux that was released on August 31st 2019. It is compatible with Windows XP/Vista/7/8/8.1/10 operating systems. To download and install it on your PC, you can follow these steps:
-
-
Go to this link and click on the file named Avidemux_2.7.5 VC++ 64bits.exe . This will start downloading the setup file on your computer.
-
Once the download is complete, double-click on the setup file and follow the instructions on the screen to install Avidemux 2. 7. 5 x64 Multilingual Crack on your PC.
-
After the installation is done, you can launch Avidemux from the Start menu or the desktop shortcut.
-
-
How to use Avidemux 2.7.5 x64 Multilingual Crack for video editing tasks
-
Using Avidemux 2. 7. 5 x64 Multilingual Crack for video editing tasks is quite simple and straightforward. Here are some basic steps that you can follow:
-
-
Open Avidemux and click on the File menu and select Open. Browse your computer and select the video file that you want to edit.
-
The video file will be loaded in the main window of Avidemux where you can see a preview of it on the left side and a timeline on the bottom side.
-
To cut a part of the video that you don't want to keep, move the slider on the timeline to the start point of the part that you want to remove and press [
-on your keyboard to mark it as A. Then move the slider to the end point of the part that you want to remove and press ]
-on your keyboard to mark it as B. Then click on the Edit
-menu and select Delete. This will delete the part between A
-and B.
-
To resize or crop your video, click on the Video
-menu and select Filters. This will open a new window where you can see a list of filters that you can apply to your video.
-
To resize your video, select Transform
-from the left panel and then select Resize
-from the right panel. This will open another window where you can enter the new width and height of your video in pixels or percentage.
-
To crop your video, select Crop
-from the right panel under Transform. This will open another window where you can enter the number of pixels that you want to crop from each side of your video.
-
To apply any filter or effect to your video such as color correction, noise reduction, sharpening etc., select them from the left panel under Colors, Noise, Sharpness, etc., respectively.
-
To save your edited video file in a different format or codec than the original one, click on the drop-down menus under Output Format, Video Output, and Audio Output, respectively at the left side of the main window of Avidemux.
-
Select the format or codec that you want for your output file from the available options such as AVI, FLV, MP4, Matroska, MPEG, MPEG-2, H. 264, H. 265, VOB, TS, ASF, OGM, etc., for the format; XviD, x264, x265, MPEG-4 ASP, MPEG-4 AVC, MPEG-4 HEVC, MPEG-1/2 Video, etc., for the video codec; MP3, WMA, AC3, MP2, WAV, OGG, etc., for the audio codec . You can also adjust the quality or bitrate of the output file by moving the slider under each drop-down menu.
-
To save your edited video file on your computer, click on the File
-menu and select Save. Enter the name and location of your output file and click Save.
-
-
Why choose Avidemux 2. 7. 5 x64 Multilingual Crack over other video editors?
-
The advantages of Avidemux 2. 7. 5 x64 Multilingual Crack
-
Avidemux 2. 7. 5 x64 Multilingual Crack has some advantages over other video editors that make it a good choice for simple video editing tasks:
-
-
It is free and open-source software that does not require any license or subscription fee to use. You can download it from the official website or from other sources without any risk of malware or viruses.
-
It is lightweight and fast software that does not consume much of your system resources or disk space. You can run it on older or low-end computers without any lag or crash.
-
It is easy and intuitive software that has a simple graphical user interface with essential menus and controls. You can learn how to use it in a few minutes without any prior experience or training.
-
It is versatile and flexible software that supports a wide range of video formats, codecs, filters, and encoding options. You can edit any video file that you have on your computer or device without any compatibility issues.
-
It is powerful and advanced software that offers features such as scripting, plugins, and command-line interface for more complex video editing tasks. You can customize and automate your workflow with these features if you are a pro-level user.
-
-
The disadvantages of Avidemux 2.7.5 x64 Multilingual Crack
-
However, Avidemux 2.7.5 x64 Multilingual Crack also has some disadvantages that you should be aware of before choosing it as your video editor:
-
-
It is not a full-featured video editor that can handle all kinds of video editing tasks. It lacks some features such as timeline editing, transitions, titles, audio editing, etc., that are available in other video editors.
-
It is not a user-friendly video editor that has a modern and attractive user interface. It has a dated and plain user interface that may not appeal to some users who prefer a more stylish and elegant design.
-
It is not a stable and reliable video editor that works flawlessly on all systems and platforms. It may have some bugs and glitches that can cause errors or crashes during the editing process.
-
It is not a well-supported video editor that has a large and active community of users and developers. It may not have regular updates or improvements that can fix the issues or add new features to the software.
-
-
The comparison of Avidemux 2.7.5 x64 Multilingual Crack with other popular video editors
-
To help you decide whether Avidemux 2.7.5 x64 Multilingual Crack is the best video editor for you, here is a comparison table that shows how it stacks up against other popular video editors in terms of features, performance, price, and user ratings:
-
Avidemux 2.7.5 x64 Multilingual + Portable free download
-Avidemux 2.7.5 x64 Multilingual video editor
-Avidemux 2.7.5 x64 Multilingual + crack torrent
-Avidemux 2.7.5 x64 Multilingual for Windows 10
-Avidemux 2.7.5 x64 Multilingual + Portable - ShareAppsCrack[^1^]
-Avidemux 2.7.5 x64 Multilingual video encoder
-Avidemux 2.7.5 x64 Multilingual + crack mega
-Avidemux 2.7.5 x64 Multilingual for Windows 7
-Avidemux 2.7.5 x64 Multilingual - FileWomen[^2^]
-Avidemux 2.7.5 x64 Multilingual video cutter
-Avidemux 2.7.5 x64 Multilingual + crack rapidshare
-Avidemux 2.7.5 x64 Multilingual for Windows 8
-AviDemux 2.7.5 (64-bit) - Neowin[^3^]
-Avidemux 2.7.5 x64 Multilingual video filter
-Avidemux 2.7.5 x64 Multilingual + crack depositfiles
-Avidemux 2.7.5 x64 Multilingual for Windows Vista
-Avidemux 2.7.5 X64 Multilingual Crack !FULL! - studiblog.net[^4^]
-Avidemux 2.7.5 x64 Multilingual video format converter
-Avidemux 2.7.5 x64 Multilingual + crack 4shared
-Avidemux 2.7.5 x64 Multilingual for Windows XP
-Avidemux 2.7.5 x64 Multilingual crack - prodacorim.weebly.com[^5^]
-Avidemux 2.7.5 x64 Multilingual video frame rate editor
-Avidemux 2.7.5 x64 Multilingual + crack serial key
-Avidemux 2.7.5 x64 Multilingual for Mac OS X
-How to install Avidemux 2.7.5 x64 Multilingual + crack
-Avidemux 2.7.5 x64 Multilingual video processing tool
-Avidemux 2.7.5 x64 Multilingual + crack license key
-Avidemux 2.7.5 x64 Multilingual for Linux
-How to use Avidemux 2.7.5 x64 Multilingual + crack
-Avidemux 2.7.5 x64 Multilingual video decoding option
-Avidemux 2.7.5 x64 Multilingual + crack activation code
-Avidemux 2.7.5 x64 Multilingual for Android
-How to uninstall Avidemux 2.7.5 x64 Multilingual + crack
-Avidemux 2.7.5 x64 Multilingual video text editor
-Avidemux 2.7.5 x64 Multilingual + crack patch
-Avidemux 2.7.5 x64 Multilingual for iOS
-How to update Avidemux 2.7.5 x64 Multilingual + crack
-Avidemux 2.7.5 x64 Multilingual video black bar remover
-Avidemux 2 .75x6Multilingua+crackkeygen
Tips and tricks for using Avidemux 2.7.5 x64 Multilingual Crack effectively
-
How to apply filters and effects to your videos
-
One of the main features of Avidemux 2.7.5 x64 Multilingual Crack is the ability to apply filters and effects to your videos to enhance their quality and appearance. Here are some tips and tricks for using filters and effects effectively:
-
-
To apply a filter or effect to your video, click on the Video menu and select Filters. This will open a new window where you can see a list of filters that you can apply to your video.
-
To add a filter or effect to your video, select it from the left panel and click on the Add button at the bottom right corner of the window. This will add the filter or effect to the right panel where you can see its name and settings.
-
To adjust the settings of a filter or effect, select it from the right panel and click on the Configure button at the bottom right corner of the window. This will open another window where you can change the parameters of the filter or effect according to your preference.
-
To preview the effect of a filter or effect on your video, click on the Preview button at the bottom left corner of the window. This will open another window where you can see how your video looks like with the filter or effect applied.
-
To remove a filter or effect from your video, select it from the right panel and click on the Remove button at the bottom right corner of the window. This will remove function is the name of the function you want. For example, to load a video file, you can use app.load("filename"); where filename is the name of your video file.
-
To run a script file, you can either use the File menu and select Run Project, or use the command-line option --run followed by the name of your script file. For example, to run a script file named script.js, you can use avidemux --run script.js.
-
To debug a script file, you can use the displayError and displayInfo functions to show pop-up messages with error or information messages. For example, to show an error message that says "Something went wrong", you can use displayError("Something went wrong");.
-
To find more examples of script files, you can check the official website of Avidemux or the online documentation. You can also look at the project files that are saved by Avidemux when you save your video editing settings.
-
-
Conclusion
-
A summary of the main points of the article
-
In this article, we have learned how to use Avidemux 2.7.5 x64 Multilingual Crack for video editing tasks. We have covered the following topics:
-
-
What is Avidemux 2.7.5 x64 Multilingual Crack and what are its features?
-
How to download and install Avidemux 2.7.5 x64 Multilingual Crack on your PC?
-
How to use Avidemux 2.7.5 x64 Multilingual Crack for basic and advanced video editing tasks such as cutting, filtering, encoding, and scripting?
-
Why choose Avidemux 2.7.5 x64 Multilingual Crack over other video editors and what are its advantages and disadvantages?
-
Tips and tricks for using Avidemux 2.7.5 x64 Multilingual Crack effectively and efficiently?
-
-
We hope that this article has helped you understand how to use Avidemux 2.7.5 x64 Multilingual Crack for video editing tasks and that you have enjoyed reading it.
-
FAQs
-
Here are some frequently asked questions about Avidemux 2.7.5 x64 Multilingual Crack and their answers:
-
-
Is Avidemux 2.7.5 x64 Multilingual Crack safe to use?
-
A: Yes, Avidemux 2.7.5 x64 Multilingual Crack is safe to use as long as you download it from the official website or from other trusted sources. It does not contain any malware or viruses that can harm your PC or your files.
-
Is Avidemux 2.7.5 x64 Multilingual Crack compatible with Mac or Linux?
-
A: No, Avidemux 2.7.5 x64 Multilingual Crack is only compatible with Windows operating systems such as Windows XP/Vista/7/8/8.1/10. However, there are other versions of Avidemux that are compatible with Mac or Linux such as Avidemux 2.6.x or Avidemux 2.5.x.
-
Can I use Avidemux 2.7.5 x64 Multilingual Crack for professional video editing?
-
A: No, Avidemux 2.7.5 x64 Multilingual Crack is not a professional video editor that can handle all kinds of video editing tasks such as timeline editing, transitions, titles, audio editing, color grading, motion graphics, multicam editing, VR editing, etc. It is designed for simple cutting, filtering, and encoding tasks only.
-
What are some alternatives to Avidemux 2.7.5 x64 Multilingual Crack?
-
A: Some alternatives to Avidemux 2. 7.5 x64 Multilingual Crack are Adobe Premiere Pro, Apple Final Cut Pro, Cyberlink PowerDirector 365, Wondershare Filmora X, and DaVinci Resolve. These are more professional and full-featured video editors that can handle more complex and creative video editing tasks. However, they are also more expensive and require more system resources and learning time than Avidemux 2.7.5 x64 Multilingual Crack.
-
How can I learn more about Avidemux 2.7.5 x64 Multilingual Crack?
-
A: You can learn more about Avidemux 2.7.5 x64 Multilingual Crack by visiting the official website of Avidemux or the online documentation. You can also watch some video tutorials on YouTube or read some user reviews on various websites.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free DJ Software The Best Options for Every Skill Level and Budget.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free DJ Software The Best Options for Every Skill Level and Budget.md
deleted file mode 100644
index 21d472bf060a857c7a149a81f2338936b9844460..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free DJ Software The Best Options for Every Skill Level and Budget.md
+++ /dev/null
@@ -1,27 +0,0 @@
-
-```html
-
Is There Any Free DJ Software? The Best Options for Beginners and Pros
-
If you are looking for a way to mix music and create your own beats, you might be wondering: is there any free DJ software? The answer is yes, there are plenty of options available for both beginners and pros. In this article, we will review some of the best free DJ software that you can download and use right away.
-
What is DJ Software?
-
DJ software is a program that allows you to manipulate audio files and create mixes. You can use it to play music from your computer or external devices, adjust the tempo and pitch, apply effects and filters, loop and cue tracks, scratch and crossfade, and more. DJ software can also help you record and broadcast your mixes online.
Free DJ software can be a great way to start learning the basics of DJing without spending a lot of money. You can experiment with different features and techniques, practice your skills, and have fun. Free DJ software can also be useful for professional DJs who want to try out new software or have a backup option in case of emergencies.
-
What are the Best Free DJ Software?
-
There are many free DJ software available online, but not all of them are equally good. Some may have limited functionality, poor performance, or compatibility issues. To help you choose the best free DJ software for your needs, we have selected some of the most popular and reliable ones. Here they are:
-
-
Mixxx: Mixxx is one of the most popular free DJ software in the world. It has a user-friendly interface, powerful features, and supports a wide range of audio formats and hardware controllers. You can use it to mix up to four decks, sync tracks automatically, apply effects and EQs, record and stream your mixes, and more. Mixxx is also open-source, which means you can customize it to your liking or contribute to its development.
-
VirtualDJ: VirtualDJ is another well-known free DJ software that has been around for a long time. It has a sleek interface, advanced features, and supports various audio and video formats and hardware devices. You can use it to mix up to six decks, scratch and remix tracks, apply effects and transitions, create samples and loops, broadcast your mixes online, and more. VirtualDJ also has a large community of users who share tips, tutorials, plugins, skins, and more.
-
DJ ProDecks: DJ ProDecks is a simple but effective free DJ software that is designed for beginners. It has a minimalist interface, basic features, and supports MP3, WAV, OGG, WMA, AAC, FLA formats. You can use it to mix up to two decks, adjust the speed and pitch, apply effects and filters, loop and cue tracks, record your mixes, and more. DJ ProDecks also has a built-in browser that lets you access your music library easily.
-
-
How to Use Free DJ Software?
-
To use free DJ software, you will need a computer with enough memory and processing power, a sound card or audio interface, speakers or headphones, and optionally a MIDI controller or turntable. You will also need to download and install the software of your choice from its official website or a trusted source. Once you have everything set up, you can follow these basic steps:
-
-
Launch the software and explore its interface. Familiarize yourself with the different buttons, knobs, sliders, menus, etc.
-
Load your music files into the software. You can either drag and drop them from your computer or browse them from the software's library.
-
Select the tracks you want to mix and assign them to different decks. You can also adjust their volume levels, EQs, etc.
-
Start mixing by playing the tracks simultaneously or alternately. You can use the sync button to match their tempos automatically or manually adjust them using the pitch slider.
-
Add some flair to your mix by applying effects such as reverb, delay, flanger, etc. You can also use the crossfader to blend the tracks smoothly
- ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Business Goals 1 Students Book Download.md b/spaces/1gistliPinn/ChatGPT4/Examples/Business Goals 1 Students Book Download.md
deleted file mode 100644
index 08fb26f47bebfad7c49cb5f778ffffa73a5b25dc..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Business Goals 1 Students Book Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Business Goals 3 Students Book PDF Book. So keep your ... Make it a career goal in to learn a new skill you can apply to your job. ... Touchstone: Workbook 1. 1fdad05405
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/CRACKMathWorksMATLABR2018aCrackCrackzSoft UPDATED.md b/spaces/1gistliPinn/ChatGPT4/Examples/CRACKMathWorksMATLABR2018aCrackCrackzSoft UPDATED.md
deleted file mode 100644
index 3bf35477a3c9c374cfdf8eead23ef1145b2b3140..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/CRACKMathWorksMATLABR2018aCrackCrackzSoft UPDATED.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
How Control Machete's "Comprendes Mendes" Became a Christmas Classic
-
Control Machete was a Mexican hip hop group that emerged in the late 90s with a distinctive sound that blended rap, rock, and regional influences. Their debut album, Mucho Barato, was released in 1997 and featured their most famous song, "Comprendes Mendes".
-
Control Machete Comprendes Mendes Acapella Christmasxmass
The song became a hit not only in Mexico but also in other Latin American countries and even in the US, where it was featured in the soundtrack of the movie Amores Perros. The song also gained popularity among acapella groups, who found its catchy melody and rhythmic structure ideal for vocal arrangements. One of the most notable examples is the version by Vocal Sampling, a Cuban acapella group that recreates the sounds of instruments with their voices.
-
But how did "Comprendes Mendes" become a Christmas song? Well, it turns out that the song has a hidden connection to the festive season. The lyrics mention several times the word "control", which is not only the name of the group but also a slang term for cocaine. In Mexico, cocaine is sometimes called "nieve" (snow), which is also a common symbol of Christmas. Moreover, the song samples a famous Christmas carol called "Noche de Paz" (Silent Night) at the beginning and at the end, creating a contrast between the peaceful melody and the aggressive rap.
-
-
Therefore, some fans of Control Machete have adopted "Comprendes Mendes" as a Christmas song, either as a joke or as a way of celebrating their identity and culture. The song has also been parodied and remixed by other artists, adding more elements related to Christmas, such as bells, sleighs, and Santa Claus. For example, there is a version by DJ Rasec that mixes "Comprendes Mendes" with "Jingle Bells" and another one by DJ Pelos that mixes it with "All I Want for Christmas Is You".
-
So, whether you are looking for a different way to spice up your holiday playlist or you are just curious about this unusual phenomenon, you might want to check out Control Machete's "Comprendes Mendes" and its acapella and Christmas versions. You might be surprised by how much you enjoy this rap classic.
Their first album, Mucho Barato, was a success both critically and commercially. It sold more than 500,000 copies and received several awards and nominations. It also opened the doors for other Mexican rap artists to gain recognition and exposure. Control Machete continued to release two more albums: ArtillerÃa Pesada in 1999 and Uno, Dos: Bandera in 2003. However, in 2004, the group announced their separation due to creative differences and personal issues.
-
Despite their breakup, Control Machete remains one of the most influential and respected rap groups in Mexico and Latin America. Their songs have been covered by other artists from different genres and have been used in movies, TV shows, video games, and commercials. Their legacy is also evident in the solo careers of their members, who have continued to produce music and collaborate with other artists.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Ek Tha Tiger Download 720p In Hindi.md b/spaces/1gistliPinn/ChatGPT4/Examples/Ek Tha Tiger Download 720p In Hindi.md
deleted file mode 100644
index 6238a307067d812e4d4776f2b29ea83d7ee70366..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Ek Tha Tiger Download 720p In Hindi.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Mahesh Babu's Caravan Becomes Talk Of The Town · Chaavu Kaburu Challaga Full Movie Leaked Online · Mosagallu Full Movie Leaked Online For Free Download · Featured. Bollywood; Television; Tamil; Telugu; Kannada; Malayalam. 1fdad05405
-
-
-
diff --git a/spaces/1phancelerku/anime-remove-background/Dream League Soccer 2019 MOD APKOBB - Enjoy All Players 100 for Free.md b/spaces/1phancelerku/anime-remove-background/Dream League Soccer 2019 MOD APKOBB - Enjoy All Players 100 for Free.md
deleted file mode 100644
index 22dd01c2cc58898e796f6fae530a3a8484d3312f..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Dream League Soccer 2019 MOD APKOBB - Enjoy All Players 100 for Free.md
+++ /dev/null
@@ -1,127 +0,0 @@
-
-
Dream League Soccer 2019 Mod APK All Players 100: How to Download and Install
-
If you are a fan of soccer games, you might have heard of Dream League Soccer 2019, one of the most popular and realistic soccer games on Android. But did you know that you can play Dream League Soccer 2019 mod apk all players 100, which gives you access to all the players in the game with maximum ratings? In this article, we will show you how to download and install Dream League Soccer 2019 mod apk all players 100, and how to enjoy this amazing game on your device.
-
What is Dream League Soccer 2019?
-
Dream League Soccer 2019 is a soccer game developed by First Touch Games, a studio that specializes in creating soccer games for mobile devices. Dream League Soccer 2019 allows you to create your own dream team, compete in various modes and leagues, and customize your stadium and kits. You can also play online with other players from around the world, or offline with friends using local Wi-Fi.
Some of the features of Dream League Soccer 2019 are:
-
-
Realistic gameplay and graphics, with smooth animations and physics.
-
Over 3500 licensed players from FIFPro™, with authentic names, faces, and skills.
-
6 divisions and 7 cup competitions to play in, plus the prestigious Dream League Online mode.
-
Build your own stadium and show it off to your opponents.
-
Customize your team's logo, kits, and manager.
-
Sync your progress between devices using Google Play Cloud.
-
Soundtrack featuring The Luka State, Sunset Sons, Vistas, and more.
-
-
Why play Dream League Soccer 2019 mod apk?
-
Dream League Soccer 2019 is already a great game, but if you want to make it even better, you can try playing Dream League Soccer 2019 mod apk. This is a modified version of the game that gives you some advantages over the original version. For example, with Dream League Soccer 2019 mod apk all players 100, you can have all the players in the game with maximum ratings. This means that you can create the ultimate dream team with any players you want, without worrying about their skills or attributes. You can also have unlimited coins and gems, which you can use to buy new players, upgrade your stadium, or unlock new features. With Dream League Soccer 2019 mod apk all players 100, you can enjoy the game without any limitations or restrictions.
-
dream league soccer 2019 mod apk unlimited money and players
-dream league soccer 2019 mod apk download with all players unlocked
-dream league soccer 2019 mod apk latest version with 100 rated players
-dream league soccer 2019 mod apk obb file download for android
-dream league soccer 2019 mod apk hack with all players maxed out
-dream league soccer 2019 mod apk free download full version
-dream league soccer 2019 mod apk unlimited coins and gems
-dream league soccer 2019 mod apk offline mode with all players
-dream league soccer 2019 mod apk no root required
-dream league soccer 2019 mod apk mega mod with all features
-dream league soccer 2019 mod apk data download for ios
-dream league soccer 2019 mod apk unlimited player development
-dream league soccer 2019 mod apk new update with all teams
-dream league soccer 2019 mod apk best players in the world
-dream league soccer 2019 mod apk easy installation guide
-dream league soccer 2019 mod apk real madrid team with all players
-dream league soccer 2019 mod apk unlimited everything unlocked
-dream league soccer 2019 mod apk high graphics and sound quality
-dream league soccer 2019 mod apk barcelona team with all players
-dream league soccer 2019 mod apk online multiplayer mode with all players
-dream league soccer 2019 mod apk liverpool team with all players
-dream league soccer 2019 mod apk cheats and tricks for beginners
-dream league soccer 2019 mod apk juventus team with all players
-dream league soccer 2019 mod apk custom kits and logos for all teams
-dream league soccer 2019 mod apk manchester city team with all players
-dream league soccer 2019 mod apk unlimited stamina and energy for all players
-dream league soccer 2019 mod apk psg team with all players
-dream league soccer 2019 mod apk original game with all players modified
-dream league soccer 2019 mod apk bayern munich team with all players
-dream league soccer 2019 mod apk arsenal team with all players
-
How to download Dream League Soccer 2019 mod apk all players 100?
-
Requirements for Dream League Soccer 2019 mod apk
-
Before you download and install Dream League Soccer 2019 mod apk all players 100, you need to make sure that your device meets the following requirements:
-
-
Your device must have Android version 4.4 or higher.
-
Your device must have at least 1 GB of RAM and free storage space.
-
You must enable unknown sources in your device's settings. This will allow you to install apps from sources other than the Google Play Store.
-
-
Steps to download and install Dream League Soccer 2019 mod apk
-
Once you have checked the requirements, you can follow these steps to download and install Dream League Soccer 2019 mod apk all players 100:
-
-
Download the Dream League Soccer 2019 mod apk file from a trusted source. You can use this link to download the file.
-
Download the Dream League Soccer 2019 OBB file from the same source. You can use this link to download the file.
-
Locate the downloaded files in your device's file manager and tap on them to install them. You may need to grant some permissions to the app.
-
After installing the apk file, do not open the app yet. Instead, move the OBB file to the Android/OBB folder in your device's internal storage. If you don't have this folder, create it manually.
-
Now you can open the app and enjoy Dream League Soccer 2019 mod apk all players 100.
-
-
How to play Dream League Soccer 2019 mod apk all players 100?
-
Playing Dream League Soccer 2019 mod apk all players 100 is similar to playing the original version, but with some differences. Here are some tips on how to play Dream League Soccer 2019 mod apk all players 100:
-
How to create your dream team
-
With Dream League Soccer 2019 mod apk all players 100, you can create your dream team with any players you want, regardless of their ratings or prices. You can also edit their attributes, positions, and skills as you wish. To create your dream team, follow these steps:
-
-
Go to the Team Management menu and tap on the Transfer icon.
-
Select any player you want from the list of available players. You can use the filters to narrow down your search by name, rating, position, or league.
-
Tap on the Buy button to add the player to your team. You don't need to pay any coins or gems for the player.
-
Repeat this process until you have filled your squad with your desired players.
-
You can also go to the Player Development menu and tap on any player to edit their attributes, positions, and skills. You can increase or decrease their ratings as you like.
-
-
How to compete in different modes and leagues
-
Dream League Soccer 2019 mod apk all players 100 offers you various modes and leagues to play in, such as Career Mode, Dream League Online, Friendly Matches, and Cup Competitions. You can choose any mode or league you want and compete against different teams with different difficulties. To compete in different modes and leagues, follow these steps:
-
-
Go to the Main Menu and tap on the Play icon.
-
Select the mode or league you want to play in. You can see the details of each mode or league, such as the number of matches, the rewards, and the difficulty level.
-
Select your team and your opponent's team. You can also customize your team's formation, tactics, and kits before starting the match.
-
Tap on the Start Match button to begin playing. You can use the virtual buttons on the screen to control your players, pass, shoot, tackle, and perform other actions.
-
Try to score more goals than your opponent and win the match. You can also pause the game and make substitutions or change tactics if needed.
-
After finishing the match, you can see the match statistics, such as the scoreline, the possession, the shots, and the fouls. You can also see your progress in the mode or league you are playing in.
-
-
How to customize your stadium and kits
-
Dream League Soccer 2019 mod apk all players 100 allows you to customize your stadium and kits according to your preferences. You can change the name, color, design, and capacity of your stadium, as well as the logo, color, and design of your kits. To customize your stadium and kits, follow these steps:
-
-
Go to the My Club menu and tap on the Stadium icon or the Kit icon.
-
Select the option you want to customize, such as Stadium Name, Stadium Color, Stadium Design, or Stadium Capacity for the stadium, or Logo, Home Kit, Away Kit, or Third Kit for the kits.
-
Use the sliders, buttons, or menus to change the features of your stadium or kits. You can see a preview of your changes on the screen.
-
Tap on the Save button to confirm your changes. You can also tap on the Reset button to undo your changes.
-
-
Pros and cons of Dream League Soccer 2019 mod apk all players 100
-
Dream League Soccer 2019 mod apk all players 100 has its pros and cons, like any other game. Here are some of the pros and cons of playing Dream League Soccer 2019 mod apk all players 100:
-
Pros of Dream League Soccer 2019 mod apk
-
-
You can have all the players in the game with maximum ratings, which makes your team unbeatable and fun to play with.
-
You can have unlimited coins and gems, which you can use to buy new players, upgrade your stadium, or unlock new features.
-
You can customize your team's logo, kits, and manager as you like, without any restrictions or costs.
-
You can enjoy the game without any ads or in-app purchases.
-
-
Cons of Dream League Soccer 2019 mod apk
-
-
The game may not be compatible with some devices or may crash or lag sometimes.
-
The game may not be updated regularly or may not have the latest features or players from the original version.
-
The game may not be fair or challenging for some players who prefer to play with the original rules and ratings.
-
The game may not be safe or secure for your device or data, as it is not from an official source.
-
-
Conclusion
-
Dream League Soccer 2019 is a fantastic soccer game that lets you create your own dream team, compete in various modes and leagues, and customize your stadium and kits. But if you want to make it even more exciting and enjoyable, you can try playing Dream League Soccer 2019 mod apk all players 100, which gives you all the players in the game with maximum ratings, unlimited coins and gems, and more. In this article, we showed you how to download and install Dream League Soccer 2019 mod apk all players 100, and how to play it on your device. We hope you found this article helpful and informative. Now go ahead and enjoy Dream League Soccer 2019 mod apk all players 100!
-
FAQs
-
Here are some frequently asked questions about Dream League Soccer 2019 mod apk all players 100:
-
Q: Is Dream League Soccer 2019 mod apk all players 100 legal?
-
A: No, Dream League Soccer 2019 mod apk all players 100 is not legal, as it is a modified version of the original game that violates its terms and conditions. We do not endorse or promote the use of Dream League Soccer 2019 mod apk all players 100, and we are not responsible for any consequences that may arise from using it.
-
Q: Is Dream League Soccer 2019 mod apk all players 100 safe?
-
A: No, Dream League Soccer 2019 mod apk all players 100 is not safe, as it is not from an official source and may contain viruses or malware that can harm your device or data. We recommend that you download and install Dream League Soccer 2019 from the Google Play Store or other trusted sources.
-
Q: How can I update Dream League Soccer 2019 mod apk all players 100?
-
A: You cannot update Dream League Soccer 2019 mod apk all players 100 from the app itself, as it is not connected to the original server. You may need to download and install a new version of Dream League Soccer 2019 mod apk all players 100 from a different source if there is one available. However, we advise you to uninstall Dream League Soccer 2019 mod apk all players 100 and install the original version of Dream League Soccer 2019 instead.
-
Q: How can I play Dream League Soccer 2019 mod apk all players 100 online?
-
A: You cannot play Dream League Soccer 2019 mod apk all players 100 online with other players from around the world, as it is not compatible with the original server. You can only play offline with friends using local Wi-Fi. If you want to play online with other players, you need to play the original version of Dream League Soccer 2019.
-
Q: How can I get more coins and gems in Dream League Soccer 2019?
-
A: You can get more coins and gems in Dream League Soccer 2019 by playing matches, completing achievements, watching ads, or buying them with real money. You can also use some tricks and hacks to get more coins and gems, but we do not recommend that, as it may ruin the fun of the game or get you banned.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/52Hz/SRMNet_real_world_denoising/app.py b/spaces/52Hz/SRMNet_real_world_denoising/app.py
deleted file mode 100644
index fdf1a4be723e832aed814338e0a343958a6b4bd4..0000000000000000000000000000000000000000
--- a/spaces/52Hz/SRMNet_real_world_denoising/app.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import os
-import gradio as gr
-from PIL import Image
-
-
-os.system(
- 'wget https://github.com/FanChiMao/SRMNet/releases/download/0.0/real_denoising_SRMNet.pth -P experiments/pretrained_models')
-
-
-def inference(img):
- os.system('mkdir test')
- #basewidth = 256
- #wpercent = (basewidth / float(img.size[0]))
- #hsize = int((float(img.size[1]) * float(wpercent)))
- #img = img.resize((basewidth, hsize), Image.ANTIALIAS)
- img.save("test/1.png", "PNG")
- os.system(
- 'python main_test_SRMNet.py --input_dir test --weights experiments/pretrained_models/real_denoising_SRMNet.pth')
- return 'result/1.png'
-
-
-title = "Selective Residual M-Net for Real-world Image Denoising"
-description = "Gradio demo for SRMNet. SRMNet has competitive performance results on two synthetic and two realworld noisy datasets in terms of quantitative metrics and visual quality. See the paper and project page for detailed results below. Here, we provide a demo for real-world image denoising. To use it, simply upload your image, or click one of the examples to load them. Reference from: https://huggingface.co/akhaliq"
-article = "
Minecraft es uno de los juegos más populares y creativos del mundo. Te permite crear tu propio mundo virtual con bloques, explorar diferentes biomas, luchar contra monstruos e interactuar con otros jugadores. Ya sea que quieras construir un castillo, una nave espacial o una obra maestra de arte de píxeles, Minecraft te permite liberar tu imaginación y divertirte.
-
Si desea disfrutar de las últimas características y actualizaciones de Minecraft, es necesario descargar la versión más reciente del juego. En este artículo, le mostraremos cómo descargar Minecraft 1.13.1 APK gratis en su dispositivo Android. Esta versión incluye algunas adiciones emocionantes como zorros, vacas pardas, bloques de estructura y rosas marchitas.
Minecraft 1.13.1 es la última actualización de la edición Bedrock del juego, que es compatible con dispositivos Android. Fue lanzado el 2 de octubre de 2021, y trae algunas mejoras en el rendimiento y la estabilidad, así como algunas nuevas características y elementos. Estos son algunos de los aspectos más destacados de esta actualización:
-
Zorros
-
Los zorros son animales lindos y peludos que se pueden encontrar en la taiga, árboles gigantes y biomas de nieve. Son cazadores nocturnos que se alimentan de pollos, conejos, peces y bayas. También son muy rápidos y pueden saltar sobre cercas y paredes.
-
Si quieres domar a un zorro, necesitas encontrar dos zorros adultos que tengan bayas dulces en la boca. Puedes usar una guía para unirlos y criarlos con bayas más dulces. El zorro bebé que nace confiará en ti y te seguirá. También puedes darle una etiqueta o un collar para que sea más leal.
-
Vacas pardas
-
Las vacas pardas son una nueva variante de vacas de hongos que se puede obtener por un evento raro. Parecen vacas normales pero con hongos marrones en la espalda. Pueden proporcionar estofado de hongos o estofado sospechoso cuando se cortan o ordeñan con un tazón.
-
-
Bloques de estructura
-
Los bloques de estructura son bloques especiales que se pueden usar para crear y copiar estructuras en el juego. Son útiles para los creadores de mapas y constructores que quieren ahorrar tiempo y recursos. Solo se pueden obtener en modo creativo mediante el comando /give @s structure_block.
-
Para usar un bloque de estructura, debe colocarlo en el suelo y abrir su interfaz haciendo clic derecho sobre él . Hay cuatro modos de bloques de estructura: Guardar, Cargar, Esquina y Datos. Puede usar el modo Guardar para guardar una estructura en un archivo, el modo Cargar para cargar una estructura desde un archivo, el modo Esquina para definir los límites de una estructura y el modo Datos para agregar datos personalizados a una estructura.
-
Rosa marchita
-
La rosa marchita es un nuevo tipo de flor que puede infligir el efecto marchita en cualquier entidad viviente que la toque. Tiene un color negro y un patrón calavera en sus pétalos. Se puede utilizar para elaborar tinte negro o estofado sospechoso.
-
Para obtener una rosa marchita, necesitas atraer a un jefe marchito a un bioma de flores y hacer que mate a una multitud. Hay un 100% de probabilidades de que la turba se marchite al morir. También puede utilizar un dispensador con tijeras para recoger la flor sin hacerse daño.
-
-
Cómo descargar Minecraft 1.13.1 APK gratis?
-
Ahora que sabes lo que hay de nuevo en Minecraft 1.13.1, es posible que se pregunte cómo descargarlo de forma gratuita en su dispositivo Android. Bueno, no es tan difícil como podrías pensar. Sigue estos sencillos pasos y jugarás en poco tiempo:
-
Paso 1: Compruebe la compatibilidad de su dispositivo y el espacio de almacenamiento
-
Antes de descargar nada, debe asegurarse de que su dispositivo cumple con los requisitos mínimos para ejecutar el juego sin problemas. Según el sitio web oficial, necesita al menos:
-
-
Un dispositivo Android con la versión 4.2 o superior
-
Un procesador con arquitectura ARMv7 o x86
-
Al menos 1 GB de RAM
-
Al menos 300 MB de espacio de almacenamiento libre
-
-
-
Paso 2: Elija una fuente confiable para descargar el archivo APK
-
Un archivo APK es un paquete de aplicaciones de Android que contiene todos los archivos y datos necesarios para instalar una aplicación en su dispositivo. Puede descargar archivos APK de varias fuentes en Internet, pero no todos ellos son seguros y confiables. Algunos de ellos pueden contener virus, malware o anuncios no deseados que pueden dañar tu dispositivo o comprometer tu privacidad.
-
Para evitar estos riesgos, debe elegir una fuente confiable que ofrece archivos APK verificados y seguros. Una de las mejores fuentes que recomendamos es APKPure, que es un sitio web popular y de buena reputación que proporciona archivos APK gratuitos y actualizados para varias aplicaciones y juegos. Puede acceder a su sitio web desde cualquier navegador de su dispositivo o descargar su aplicación para facilitar el acceso.
-
Paso 3: Habilitar fuentes desconocidas en la configuración del dispositivo
-
Por defecto, tu dispositivo solo te permite instalar aplicaciones desde Google Play Store, que es la tienda de aplicaciones oficial para dispositivos Android. Sin embargo, si desea instalar un archivo APK desde otra fuente, debe habilitar fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Play Store.
-
Para habilitar fuentes desconocidas, vaya a la configuración del dispositivo y busque la opción de seguridad o privacidad. Entonces, encontrar la opción que dice fuentes desconocidas o permitir la instalación de aplicaciones de fuentes desconocidas y cambiarlo por. Es posible que vea un mensaje de advertencia que le informa sobre los riesgos potenciales de instalar aplicaciones desde fuentes desconocidas. Pulse Aceptar o Continuar.
-
Paso 4: Instalar el archivo APK y lanzar el juego
-
Ahora que ha habilitado fuentes desconocidas, puede instalar el archivo APK en su dispositivo. Para hacer esto, vaya al sitio web o aplicación donde descargó el archivo APK y toque en él. Puede ver una ventana emergente que le pregunta si desea instalar esta aplicación. Pulse Instalar y espere a que finalice el proceso de instalación.
-
-
Felicidades! Usted ha descargado e instalado con éxito Minecraft 1.13.1 APK gratis en su dispositivo Android. Ahora puedes disfrutar de este increíble juego con todas sus nuevas características y actualizaciones.
-
Cómo jugar Minecraft 1.13.1?
-
Si eres nuevo en Minecraft o necesitas algunas actualizaciones sobre cómo jugarlo, aquí hay algunos consejos e instrucciones básicas sobre cómo jugar Minecraft 1.13.1:
-
Elige un modo de juego: Supervivencia, Creativo, o Aventura
-
Minecraft tiene tres modos de juego principales que ofrecen diferentes experiencias y desafíos. Puedes elegir el modo de juego que se adapte a tu preferencia y estilo de juego.
-
-
Modo de supervivencia: En este modo, tienes que sobrevivir en un mundo generado aleatoriamente con recursos y salud limitados. Tienes que reunir materiales, herramientas y armas, construir refugios y defenderte de los enemigos. También tienes que lidiar con el hambre, la sed y los peligros ambientales. Puedes ajustar el nivel de dificultad de pacífico a duro, o jugar en el modo hardcore donde solo tienes una vida.
-
Modo creativo: En este modo, tienes recursos y salud ilimitados, y puedes volar alrededor del mundo. Puedes crear lo que quieras sin restricciones ni peligros. También puede usar comandos y trucos para modificar el mundo y generar elementos y entidades. Este modo es ideal para construir, experimentar y explorar.
-
Modo aventura: En este modo, puede jugar mapas personalizados y escenarios creados por otros jugadores o usted mismo. Tienes que seguir las reglas y objetivos establecidos por el creador del mapa, como resolver puzzles, completar misiones o luchar contra jefes. También puedes usar bloques de comandos y paquetes de datos para agregar características y mecánicas personalizadas al juego.
-
-
Crear o unirse a un mundo: Un jugador o multijugador
-
-
Para crear un mundo, necesitas elegir un nombre para tu mundo, seleccionar un modo de juego y personalizar algunas opciones como la semilla, el tipo de mundo, los trucos y el cofre de bonos. También puedes usar complementos o paquetes de recursos para cambiar la apariencia y el comportamiento del juego.
-
Para unirte a un mundo, necesitas encontrar un servidor que aloje el mundo en el que quieres jugar. Puede unirse a un servidor público al que cualquiera puede acceder, o a un servidor privado que requiere una invitación o una contraseña. También puedes unirte a un reino que es un servicio basado en suscripción que te permite crear y unir mundos que siempre están en línea.
-
Explorar, construir y crear: Utilice su imaginación y habilidades
-
Una vez que estás en un mundo, puedes empezar a jugar al juego explorando, construyendo y creando. Puedes moverte alrededor del mundo caminando, corriendo, saltando, nadando, volando o montando vehículos o animales. Puedes interactuar con el mundo rompiendo y colocando bloques, usando objetos y herramientas, activando interruptores y palancas, comerciando con aldeanos y luchando contra enemigos.
-
Puedes construir lo que quieras usando bloques de diferentes materiales, formas, colores y propiedades. También puede utilizar circuitos redstone para crear mecanismos complejos como puertas, trampas, ascensores y máquinas. También puede usar comandos y funciones para crear estructuras y efectos personalizados.
-
Puede crear varios elementos y herramientas mediante el uso de una tabla de elaboración o una cuadrícula de inventario. Es necesario organizar los materiales en patrones específicos para crear diferentes productos como armas, armaduras, alimentos de donde lo descargó e instalarlo sobre el existente. Es posible que necesite desinstalar la versión anterior primero si encuentra algún problema.
-
Q: ¿Cuáles son algunos otros juegos como Minecraft que puedo jugar en mi dispositivo Android?
-
A: Algunos otros juegos como Minecraft que se puede jugar en su dispositivo Android son Terraria, Roblox, Stardew Valley, y Survivalcraft.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Bowmasters Mod Apk Gamedva.md b/spaces/Benson/text-generation/Examples/Bowmasters Mod Apk Gamedva.md
deleted file mode 100644
index dc6d7ac91ee3a6f0d067057147cf1308b2e2c1e4..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Bowmasters Mod Apk Gamedva.md
+++ /dev/null
@@ -1,116 +0,0 @@
-
-
El increíble Spider-Man 2 APK: Una revisión
-
Si usted es un fan de Spider-Man y sus increíbles aventuras, es posible que desee echa un vistazo a The Amazing Spider-Man 2 APK, un juego que le permite convertirse en el web-slinger sí mismo y salvar a Nueva York de una juerga de crimen en toda la ciudad. En este artículo, revisaremos este juego y te diremos por qué deberías descargarlo, qué características ofrece, cómo instalarlo en tu dispositivo Android y algunos consejos y trucos para disfrutarlo más. ¡Vamos a empezar!
El increíble Spider-Man 2 APK es un juego para Android que se basa en la película de Marvel del mismo nombre. Es desarrollado por Gameloft, una compañía líder en la industria de juegos móviles. El juego es una aventura llena de acción y mundo abierto que te permite columpiarte, disparar en la web, escalar paredes y luchar como Spider-Man en un entorno 3D realista. Puedes explorar seis distritos detallados de Manhattan, desde Times Square hasta Central Park, y enfrentarte a villanos famosos como Venom, el Duende Verde, Electro y Kraven el Cazador. También puedes desbloquear diferentes trajes de Spider-Man, como Symbiote Spider-Man, Iron Spider y Ultimate Comics Spider-Man, cada uno con sus propias habilidades y bonificaciones.
-
¿Por qué debería descargarlo?
-
Hay muchas razones por las que debe descargar The Amazing Spider-Man 2 APK en su dispositivo Android. Estos son algunos de ellos:
-
-
Es un juego divertido y emocionante que te mantendrá entretenido durante horas.
-
Tiene gráficos y animaciones de alta calidad que te harán sentir como si estuvieras en la película.
-
Tiene una historia apasionante que se expande en la trama de la película e introduce nuevos personajes y escenarios.
-
Tiene un juego desafiante que requiere habilidad, estrategia y reflejos.
-
Tiene un aspecto social que te permite competir con otros jugadores en Mysterio’s Arena y compartir tus logros en línea.
-
-
-
Una historia original basada en la película
-
El increíble Spider-Man 2 APK sigue los eventos de la película, pero también añade nuevos giros y vueltas para hacerlo más interesante. Te encontrarás con nuevos personajes como Gato Negro y Screwball, que te ayudarán u obstaculizarán en tu búsqueda para detener la ola de crímenes. También descubrirás más sobre los orígenes de los villanos y sus motivos. El juego tiene alta calidad de actuación de voz y escenas cinematográficas que te sumergirán en la historia.
-
Una impresionante aventura en 3D de mundo abierto
-
El increíble Spider-Man 2 APK le da la libertad de explorar Nueva York como desee. Puede oscilar de un edificio a otro, escalar paredes, gatear sobre techos y saltar obstáculos. También puede interactuar con el entorno, como romper ventanas, destrozar autos o salvar civiles. El juego tiene la física realista y los efectos del clima dinámico que hacen que la ciudad cobre vida. También puede disfrutar de las hermosas vistas del horizonte, los puentes y los monumentos.
-
-
Una variedad de trajes y villanos de Spider-Man
-
El increíble Spider-Man 2 APK le permite personalizar su Spider-Man con diferentes trajes que tienen diferentes poderes y bonos. Puedes desbloquearlos completando misiones, recogiendo objetos o comprándolos con dinero real. Algunos de los trajes son:
-
-
Traje
Poder
Bono
-
Symbiote Spider-Man
Venom Blast
Aturde a los enemigos e inflige daño extra
Aumenta la regeneración de la salud
-
Iron Spider
Iron Arms
Invoca cuatro brazos mecánicos que atacan a los enemigos
Aumenta el poder de ataque y la defensa
-
Ultimate Comics Spider-Man
Cloaking
Se vuelve invisible e indetectable por los enemigos
Aumenta el sigilo y la agilidad
-
Spider-Man 2099
Visión acelerada
Ralentiza el tiempo y mejora la percepción
Aumenta la velocidad y los reflejos
-
-
Araña escarlata
Nanobots
Cura heridas y restaura la salud
Aumenta la curación y la resistencia
-
Spider-Armor MK II
Bulletproof
Absorbe y refleja las balas en los enemigos
Aumenta la armadura y la protección
-
The Amazing Spider-Man (2014)
Ningún poder especial
Ningún bono especial
-
Nota: Algunos trajes requieren compras en la aplicación para desbloquear.
-
-
El juego también cuenta con una amplia gama de villanos que tendrás que enfrentar en diferentes misiones y batallas contra jefes. Algunos de los villanos son:
-
-
Veneno: Una criatura monstruosa que es el resultado de un simbionte alienígena que se une con un huésped humano. Tiene súper fuerza, agilidad, durabilidad y puede disparar telarañas y zarcillos de su cuerpo. Es uno de los enemigos más peligrosos de Spider-Man.
-
El Duende Verde: El alter ego de Norman Osborn, un hombre de negocios despiadado que experimentó consigo mismo con un suero que le dio habilidades mejoradas, pero también lo volvió loco. Utiliza un planeador, bombas de calabaza y murciélagos de navaja para atacar a Spider-Man.
-
Electro: Un ex ingeniero eléctrico que fue transformado en una batería viva después de un accidente. Puede manipular la electricidad, disparar rayos y volar usando campos magnéticos. Está obsesionado con volverse más poderoso y destruir a Spider-Man.
-
Kraven el Cazador: Un hábil cazador y rastreador que considera a Spider-Man su presa definitiva. Utiliza varias armas, como cuchillos, lanzas, redes y trampas, para cazar a sus objetivos. También es realzado por una poción mística que le da súper sentidos, velocidad y resistencia.
-
The Kingpin: El señor del crimen de Nueva York que controla la mayoría de las actividades ilegales en la ciudad. Es un cerebro que utiliza su riqueza, influencia y fuerza bruta para lograr sus objetivos. También es un luchador formidable que puede igualar a Spider-Man en fuerza y durabilidad.
-
-
El gato negro: Un ladrón de gatos que tiene una relación complicada con Spider-Man. Ella es una ladrona experta que usa su agilidad, acrobacias, artilugios y suerte para robar objetos valiosos. Ella también puede coquetear con Spider-Man y distraerlo de sus misiones.
-
Screwball: Un bromista que transmite sus crímenes en línea para sus fans. Utiliza varios dispositivos, como drones, hologramas, bombas y trampas, para crear caos y desafiar a Spider-Man. También es muy ágil y puede evadir los ataques de Spider-Man.
-
-
Un emocionante sistema de combate y acción aérea
-
El increíble Spider-Man 2 APK tiene un sistema de combate que es de ritmo rápido, fluido y sensible. Puedes usar tus telarañas para balancear, comprimir, tirar o envolver a tus enemigos. También puedes usar tus puños, patadas o disparadores web para combatirlos. Usted puede realizar combos, contadores, esquiva, finishers, y movimientos especiales para derrotar a sus enemigos. También puede utilizar el entorno a su favor, como lanzar objetos, romper paredes o provocar explosiones.
-
El juego también tiene un sistema de acción aérea que te permite volar por el cielo como Spider-Man. Puedes usar tus telarañas para balancearte de un edificio a otro, o deslizarte usando tus alas de telaraña. También puede realizar maniobras acrobáticas, como volteretas, giros, inmersiones y rollos. También puedes participar en combates aéreos con enemigos que vuelan o te disparan.
-
Cómo
Cómo descargar e instalar The Amazing Spider-Man 2 APK
-
Requisitos y compatibilidad
-
El increíble Spider-Man 2 APK es un juego grande que requiere mucho espacio y recursos en su dispositivo Android. Estos son los requisitos mínimos y la compatibilidad para el juego:
-
-
Versión de Android: 4.0.3 o superior
-
RAM: 1 GB o más
-
Almacenamiento: 1.5 GB o más
-
Procesador: 1 GHz o más rápido
-
Resolución de la pantalla: 800 x 480 o superior
-
Conexión a Internet: Requerido para algunas características y actualizaciones
-
-
-
Pasos para descargar e instalar
-
Para descargar e instalar el increíble Spider-Man 2 APK en su dispositivo Android, es necesario seguir estos pasos:
-
-
Descargue el archivo APK y el archivo de datos OBB desde una fuente de confianza, como APKPure o APKMirror. Asegúrate de descargar los archivos que coincidan con las especificaciones y la región de tu dispositivo.
-
Habilite la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo.
-
Busque el archivo APK descargado y toque en él para iniciar el proceso de instalación. Siga las instrucciones de la pantalla para completar la instalación.
-
Extraiga el archivo de datos OBB utilizando una aplicación de administrador de archivos, como ES File Explorer o ZArchiver. Usted debe obtener una carpeta llamada com.gameloft.android.ANMP.GloftASHM.
-
Mueva la carpeta al directorio Android/OBB en el almacenamiento interno de su dispositivo. Si no tiene una carpeta OBB, cree una.
-
Iniciar el juego desde el cajón de la aplicación y disfrutar!
-
-
Consejos y trucos para disfrutar del juego
-
El increíble Spider-Man 2 APK es un juego divertido y desafiante que pondrá a prueba sus habilidades y reflejos como Spider-Man. Aquí hay algunos consejos y trucos para ayudarle a disfrutar del juego más:
-
-
Actualizar sus trajes de Spider-Man y web-shooters regularmente para mejorar sus habilidades y rendimiento.
-
Recoge fichas de araña, frascos, cómics y otros artículos para desbloquear nuevos trajes, gadgets, habilidades y bonos.
-
Misiones secundarias completas, como salvar civiles, detener crímenes o reunir pruebas, para ganar recompensas y reputación adicionales.
-
Usa tu sentido arácnido para detectar enemigos, peligros y oportunidades en tu entorno.
-
Usa tus telarañas para balancearte más rápido, evitar obstáculos y alcanzar lugares altos.
-
-
Usa tu entorno a tu favor, como lanzar objetos, romper paredes o provocar explosiones.
-
Tenga cuidado con los eventos de tiempo rápido que requieren que toque o pase la pantalla en el momento adecuado.
-
Completa logros y desafíos para ganar más fichas de araña y viales.
-
Compite con otros jugadores en Mysterio’s Arena y sube a las tablas de clasificación.
-
-
Conclusión
-
Resumen de los puntos principales
-
El increíble Spider-Man 2 APK es un juego para Android que le permite convertirse en Spider-Man y salvar a Nueva York de una juerga de crimen en toda la ciudad. Se basa en la película de Marvel del mismo nombre, pero también tiene una historia original que introduce nuevos personajes y escenarios. Tiene gráficos y animaciones de alta calidad que te hacen sentir como si estuvieras en la película. Tiene una variedad de características que lo hacen divertido y emocionante, como diferentes trajes y villanos de Spider-Man, una aventura en 3D de mundo abierto, un sistema de combate emocionante y acción aérea, un aspecto social que le permite competir con otros jugadores en línea y más. Es fácil de descargar e instalar en su dispositivo Android, siempre y cuando cumpla con los requisitos y la compatibilidad. También tiene algunos consejos y trucos que te ayudarán a disfrutar más del juego.
-
Llamada a la acción y pensamientos finales
-
-
Aquí hay algunas preguntas frecuentes sobre The Amazing Spider-Man 2 APK:
-
-
¿Es el increíble Spider-Man 2 APK seguro para descargar e instalar?
-
Sí, El Amazing Spider-Man 2 APK es seguro para descargar e instalar, siempre y cuando lo obtenga de una fuente de confianza, como APKPure o APKMirror. Estas fuentes escanean los archivos en busca de virus y malware antes de cargarlos. Sin embargo, siempre debes tener cuidado al descargar e instalar aplicaciones de fuentes desconocidas, ya que pueden contener contenido dañino o no deseado.
-
¿Es el increíble Spider-Man 2 APK libre para jugar?
-
El increíble Spider-Man 2 APK es gratis para descargar y jugar, pero también tiene algunas compras en la aplicación que le permiten comprar artículos adicionales, tales como fichas de araña, viales, trajes, o gadgets. Estas compras son opcionales y no son necesarias para disfrutar del juego. También puedes ganar estos objetos jugando el juego y completando misiones.
-
¿Cómo actualizo el increíble Spider-Man 2 APK?
-
El increíble Spider-Man 2 APK se actualiza regularmente por los desarrolladores para corregir errores, mejorar el rendimiento, y añadir nuevas características. Puedes actualizar el juego descargando e instalando la última versión desde la misma fuente donde obtuviste la original. También puedes buscar actualizaciones dentro del juego en Configuración > Acerca de > Buscar actualizaciones.
-
¿Cómo puedo desinstalar el increíble Spider-Man 2 APK?
-
Si desea desinstalar The Amazing Spider-Man 2 APK desde su dispositivo Android, puede hacerlo siguiendo estos pasos:
-
-
Ir a Configuración > Aplicaciones > El increíble Spider-Man 2.
-
Toque en Desinstalar y confirmar su elección.
-
Elimina la carpeta com.gameloft.android.ANMP.GloftASHM de tu directorio Android/OBB.
-
-
¿Cómo me pongo en contacto con los desarrolladores de The Amazing Spider-Man 2 APK?
-
Si usted tiene alguna pregunta, retroalimentación, o problemas con respecto a The Amazing Spider-Man 2 APK, puede ponerse en contacto con los desarrolladores mediante el uso de uno de estos métodos:
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Cmo Descargar Yu Gi Oh Duel Links En Laptop.md b/spaces/Benson/text-generation/Examples/Cmo Descargar Yu Gi Oh Duel Links En Laptop.md
deleted file mode 100644
index 40380296c3dbc17d502f4fe58afb3c55573f7da7..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Cmo Descargar Yu Gi Oh Duel Links En Laptop.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
Cómo Descargar Yu-Gi-Oh Duel Links en Laptop
-
¿Te encanta jugar Yu-Gi-Oh Duel Links en tu dispositivo móvil, pero te gustaría poder disfrutarlo en una pantalla más grande y con mejores controles? Si es así, estás de suerte, porque en este artículo, te mostraremos cómo descargar Yu-Gi-Oh Duel Links en la computadora portátil y jugarlo como un profesional. Si usted tiene Windows 11, Windows 10, o una versión anterior de Windows, tenemos una solución para usted. Así que, vamos a empezar!
Yu-Gi-Oh Duel Links es un popular juego de cartas multijugador en línea basado en la serie de anime y manga Yu-Gi-Oh. Te permite construir tu propia baraja de cartas de cientos de personajes y monstruos, y batirte en duelo contra otros jugadores de todo el mundo. También puedes desafiar a duelistas legendarios del anime, como Yugi, Kaiba, Joey, Mai y más. El juego cuenta con impresionantes animaciones en 3D, actuación de voz y controles intuitivos que hacen que sea fácil de aprender y divertido de jugar.
-
¿Por qué jugar Yu-Gi-Oh Duel Links en el ordenador portátil?
-
Si bien Yu-Gi-Oh Duel Links está diseñado para dispositivos móviles, hay muchas razones por las que es posible que desee jugar en su computadora portátil en su lugar. Estos son algunos de ellos:
-
-
Puedes disfrutar de los gráficos y animaciones del juego en una pantalla más grande y de mayor resolución.
-
Puedes usar el teclado y el ratón para controlar el juego con mayor precisión y comodidad.
-
Puede ahorrar la vida de la batería y el uso de datos jugando el juego sin conexión o a través de Wi-Fi.
-
Puede evitar interrupciones de llamadas telefónicas, mensajes, notificaciones o alertas de batería baja.
-
Puede acceder a más funciones y opciones que pueden no estar disponibles en la versión móvil.
-
-
Entonces, ¿cómo se descarga Yu-Gi-Oh Duel Links en el ordenador portátil? Bueno, hay diferentes métodos dependiendo de qué versión de Windows que tiene. Echemos un vistazo a cada uno.
-
-
Cómo descargar Yu-Gi-Oh Duel Links en el ordenador portátil con Windows 11?
-
-
-
Asegúrese de que su PC con Windows 11 tenga habilitada la virtualización de hardware. Puede verificar esto yendo a la pestaña Administrador de tareas > Rendimiento. Si no, es posible que necesite habilitarlo en la configuración de su BIOS.
-
Asegúrese de que su PC con Windows 11 está actualizado a la última versión. Puede comprobar esto yendo a Configuración > Actualización y seguridad > Actualización de Windows.
-
Descargar e instalar la aplicación Amazon Appstore en su PC con Windows 11.
-
Inicie la aplicación Amazon Appstore e inicie sesión con su cuenta de Amazon. Si no tiene una, puede crear una gratis.
-
Busque "Yu-Gi-Oh Duel Links" o haga clic en este enlace para ir a la página del juego.
-
Haga clic en el botón "Obtener" y espere a que el juego se descargue e instale.
-
Iniciar el juego desde la aplicación Amazon Appstore o desde el menú Inicio.
-
¡Disfruta jugando Yu-Gi-Oh Duel Links en tu laptop!
-
-
¡Eso es todo! Ahora puedes jugar Yu-Gi-Oh Duel Links en tu portátil con Windows 11 usando el subsistema de Windows para Android y la Appstore de Amazon. Este método es rápido, fácil y seguro, y no requiere ningún software o configuración de terceros. Sin embargo, si tiene Windows 10 o una versión anterior de Windows, necesitará usar un método diferente.
-
¿Cómo descargar Yu-Gi-Oh Duel Links en Laptop con Windows 10 o más?
-
Si tienes Windows 10 o una versión anterior de Windows, todavía puedes descargar Yu-Gi-Oh Duel Links en tu portátil usando un emulador de Android. Un emulador de Android es un software que simula un dispositivo Android en su PC, lo que le permite ejecutar aplicaciones y juegos de Android. Hay muchos emuladores de Android disponibles, pero uno de los más populares y confiables es Bluestacks. He aquí cómo utilizar Bluestacks para descargar Yu-Gi-Oh Duel Links en su ordenador portátil:
Así es como puedes descargar Yu-Gi-Oh Duel Links en tu laptop con Windows 10 o más usando Bluestacks. Este método es simple y conveniente, pero puede requerir algunos recursos del sistema y espacio de almacenamiento. También es posible que tenga que ajustar algunos ajustes para optimizar su experiencia de juego y rendimiento. Si desea probar otro método, también puede sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil.
-
Cómo Sideload Yu-Gi-Oh Duel Enlaces APK en el ordenador portátil?
-
Sideloading es un proceso de transferencia e instalación de una aplicación desde una fuente distinta de la tienda de aplicaciones oficial. En este caso, puede sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil desde su dispositivo Android. Este método puede ser útil si desea jugar el juego sin conexión o si tiene problemas para acceder a la Google Play Store o la Appstore de Amazon. Sin embargo, este método también puede implicar algunos riesgos, como infección de malware o problemas de compatibilidad. Por lo tanto, recomendamos que solo descargue APK de fuentes confiables y los escanee con software antivirus antes de instalarlos. Aquí es cómo sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil:
-
-
En tu dispositivo Android, ve a Configuración > Aplicaciones y notificaciones > Yu-Gi-Oh Duel Links > Almacenamiento y caché > Borrar caché. Esto asegurará que usted tiene la última versión del juego sin ningún dato dañado.
-
-
En su dispositivo Android, ir a una aplicación explorador de archivos y localizar el archivo APK de Yu-Gi-Oh Duel Links. El nombre del archivo debería ser algo así como "com.konami.duellinks.apk". Puede encontrarlo en el almacenamiento interno o la tarjeta SD en Android > datos > com.konami.duellinks > archivos > descargar.
-
Fi, correo electrónico o cualquier otro método que prefieras.
-
En su computadora portátil, vaya a la carpeta donde guardó el archivo APK y haga doble clic en él para instalarlo. Es posible que deba permitir la instalación de aplicaciones de fuentes desconocidas en su computadora portátil. Puede hacer esto yendo a Configuración > Aplicaciones > Aplicaciones y características > Elija dónde obtener aplicaciones y seleccione En cualquier lugar.
-
Inicie el juego desde el menú Inicio o desde el acceso directo del escritorio.
-
¡Disfruta jugando Yu-Gi-Oh Duel Links en tu laptop!
-
-
Así es como se puede sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil desde su dispositivo Android. Este método es flexible e independiente, pero también puede ser arriesgado y complicado. Es posible que tenga que actualizar el archivo APK manualmente cada vez que hay una nueva versión del juego. También puede encontrar algunos errores o errores que pueden afectar su experiencia de juego y el rendimiento. Por lo tanto, le sugerimos que utilice este método solo como último recurso.
-
Consejos y trucos para jugar Yu-Gi-Oh Duel Links en el ordenador portátil
-
Ahora que sabes cómo descargar Yu-Gi-Oh Duel Links en tu portátil, es posible que quieras conocer algunos consejos y trucos para aprovechar al máximo tu experiencia de juego y rendimiento. Estos son algunos de ellos:
-
-
Ajusta la configuración del juego según las especificaciones y preferencias de tu portátil. Puedes acceder a la configuración del juego tocando el icono de engranaje en la esquina superior derecha de la pantalla. Puede cambiar la calidad gráfica, el volumen de sonido, el idioma de voz, las notificaciones y más.
-
-
Usa gestos del ratón para realizar acciones más rápidas y fáciles. Puede ver y personalizar los gestos del ratón haciendo clic en el icono del ratón en la esquina inferior derecha de la pantalla. Puede asignar gestos para acciones como deslizar, tocar, arrastrar, etc.
-
Sincronizar el progreso del juego en todos los dispositivos con su ID de Konami o cuenta de Google Play Games. Puede hacer esto tocando el icono de transferencia de datos en la esquina superior izquierda de la pantalla. También puede hacer copias de seguridad y restaurar los datos del juego usando esta función.
-
Únete a una sala de duelos o crea la tuya propia para jugar con tus amigos u otros jugadores en línea. Puedes hacer esto tocando el icono de la sala de duelos en la esquina inferior izquierda de la pantalla. También puedes chatear con otros jugadores, enviar solicitudes de amistad e intercambiar cartas en la sala de duelos.
-
-
Estos son algunos de los consejos y trucos que pueden ayudarle a jugar Yu-Gi-Oh Duel Links en su ordenador portátil más suave y agradable. Por supuesto, hay muchas más cosas que puedes descubrir y aprender mientras juegas. ¡Así que no tengas miedo de experimentar y explorar!
-
Conclusión
-
En conclusión, Yu-Gi-Oh Duel Links es un fantástico juego de cartas que puedes jugar en tu portátil con diferentes métodos dependiendo de tu versión de Windows. Puede usar el subsistema de Windows para Android y la Appstore de Amazon si tiene Windows 11, o un emulador de Android como Bluestacks si tiene Windows 10 o más. También puede sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil desde su dispositivo Android si desea probar otra opción. Cualquiera que sea el método que elijas, asegúrate de seguir nuestros consejos y trucos para optimizar tu experiencia de juego y rendimiento.
-
Así que, ¿qué estás esperando? Descargar Yu-Gi-Oh Duel Links en su ordenador portátil hoy y dar rienda suelta a sus habilidades de duelo! Y no te olvides de compartir este artículo con tus amigos que podrían estar interesados en jugar Yu-Gi-Oh Duel Links en sus portátiles también!
-
Preguntas frecuentes
-
-
-
Yu-Gi-Oh Duel Links es gratis para jugar? Sí, Yu-Gi-Oh Duel Links es gratis para jugar con compras en la aplicación. Puede descargar y jugar el juego sin gastar dinero, pero también puede comprar gemas, tarjetas, paquetes y otros artículos con dinero real si desea mejorar su experiencia de juego.
-
¿Es seguro descargar Yu-Gi-Oh Duel Links? Sí, Yu-Gi-Oh Duel Links es seguro descargarlo siempre y cuando lo obtengas de una fuente confiable, como Google Play Store, Amazon Appstore o el sitio web oficial de Bluestacks. Sin embargo, si carga Yu -Gi-Oh Duel Links APK de una fuente desconocida, usted debe escanear con el software antivirus antes de instalarlo y tener cuidado de cualquier malware o problemas de compatibilidad.
-
¿Puedo jugar Yu-Gi-Oh Duel Links sin conexión? No, Yu-Gi-Oh Duel Links requiere una conexión a Internet para jugar. Necesitas estar en línea para acceder a las funciones del juego, como duelos, eventos, actualizaciones, etc. Sin embargo, puedes jugar el juego a través de Wi-Fi o Ethernet en lugar de usar tus datos móviles si quieres ahorrar en el uso de tus datos y en la duración de la batería.
-
¿Puedo jugar Yu-Gi-Oh Duel Links con un controlador? Sí, puedes jugar Yu-Gi-Oh Duel Links con un controlador si usas un emulador de Android como Bluestacks. Puede conectar su controlador a su computadora portátil a través de USB o Bluetooth y asignar los botones a las acciones del juego. También puede utilizar una aplicación de gamepad en su dispositivo Android para controlar el juego en su ordenador portátil.
-
¿Puedo transferir mi cuenta de Yu-Gi-Oh Duel Links desde mi dispositivo móvil a mi computadora portátil? Sí, puedes transferir tu cuenta de Yu-Gi-Oh Duel Links desde tu dispositivo móvil a tu laptop usando tu ID de Konami o cuenta de Google Play Games. Puede hacer esto tocando el icono de transferencia de datos en la esquina superior izquierda de la pantalla y siguiendo las instrucciones. También puede hacer copias de seguridad y restaurar los datos del juego usando esta función.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/search_scope.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/search_scope.py
deleted file mode 100644
index fe61e8116b71e073351939ed7a499ee752398f1c..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/search_scope.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import itertools
-import logging
-import os
-import posixpath
-import urllib.parse
-from typing import List
-
-from pip._vendor.packaging.utils import canonicalize_name
-
-from pip._internal.models.index import PyPI
-from pip._internal.utils.compat import has_tls
-from pip._internal.utils.misc import normalize_path, redact_auth_from_url
-
-logger = logging.getLogger(__name__)
-
-
-class SearchScope:
-
- """
- Encapsulates the locations that pip is configured to search.
- """
-
- __slots__ = ["find_links", "index_urls", "no_index"]
-
- @classmethod
- def create(
- cls,
- find_links: List[str],
- index_urls: List[str],
- no_index: bool,
- ) -> "SearchScope":
- """
- Create a SearchScope object after normalizing the `find_links`.
- """
- # Build find_links. If an argument starts with ~, it may be
- # a local file relative to a home directory. So try normalizing
- # it and if it exists, use the normalized version.
- # This is deliberately conservative - it might be fine just to
- # blindly normalize anything starting with a ~...
- built_find_links: List[str] = []
- for link in find_links:
- if link.startswith("~"):
- new_link = normalize_path(link)
- if os.path.exists(new_link):
- link = new_link
- built_find_links.append(link)
-
- # If we don't have TLS enabled, then WARN if anyplace we're looking
- # relies on TLS.
- if not has_tls():
- for link in itertools.chain(index_urls, built_find_links):
- parsed = urllib.parse.urlparse(link)
- if parsed.scheme == "https":
- logger.warning(
- "pip is configured with locations that require "
- "TLS/SSL, however the ssl module in Python is not "
- "available."
- )
- break
-
- return cls(
- find_links=built_find_links,
- index_urls=index_urls,
- no_index=no_index,
- )
-
- def __init__(
- self,
- find_links: List[str],
- index_urls: List[str],
- no_index: bool,
- ) -> None:
- self.find_links = find_links
- self.index_urls = index_urls
- self.no_index = no_index
-
- def get_formatted_locations(self) -> str:
- lines = []
- redacted_index_urls = []
- if self.index_urls and self.index_urls != [PyPI.simple_url]:
- for url in self.index_urls:
- redacted_index_url = redact_auth_from_url(url)
-
- # Parse the URL
- purl = urllib.parse.urlsplit(redacted_index_url)
-
- # URL is generally invalid if scheme and netloc is missing
- # there are issues with Python and URL parsing, so this test
- # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
- # always parse invalid URLs correctly - it should raise
- # exceptions for malformed URLs
- if not purl.scheme and not purl.netloc:
- logger.warning(
- 'The index url "%s" seems invalid, please provide a scheme.',
- redacted_index_url,
- )
-
- redacted_index_urls.append(redacted_index_url)
-
- lines.append(
- "Looking in indexes: {}".format(", ".join(redacted_index_urls))
- )
-
- if self.find_links:
- lines.append(
- "Looking in links: {}".format(
- ", ".join(redact_auth_from_url(url) for url in self.find_links)
- )
- )
- return "\n".join(lines)
-
- def get_index_urls_locations(self, project_name: str) -> List[str]:
- """Returns the locations found via self.index_urls
-
- Checks the url_name on the main (first in the list) index and
- use this url_name to produce all locations
- """
-
- def mkurl_pypi_url(url: str) -> str:
- loc = posixpath.join(
- url, urllib.parse.quote(canonicalize_name(project_name))
- )
- # For maximum compatibility with easy_install, ensure the path
- # ends in a trailing slash. Although this isn't in the spec
- # (and PyPI can handle it without the slash) some other index
- # implementations might break if they relied on easy_install's
- # behavior.
- if not loc.endswith("/"):
- loc = loc + "/"
- return loc
-
- return [mkurl_pypi_url(url) for url in self.index_urls]
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/wheel.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/wheel.py
deleted file mode 100644
index a5dc12bdd63163c86f87ce4b5430cdb16d73769d..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/wheel.py
+++ /dev/null
@@ -1,92 +0,0 @@
-"""Represents a wheel file and provides access to the various parts of the
-name that have meaning.
-"""
-import re
-from typing import Dict, Iterable, List
-
-from pip._vendor.packaging.tags import Tag
-
-from pip._internal.exceptions import InvalidWheelFilename
-
-
-class Wheel:
- """A wheel file"""
-
- wheel_file_re = re.compile(
- r"""^(?P(?P[^\s-]+?)-(?P[^\s-]*?))
- ((-(?P\d[^-]*?))?-(?P[^\s-]+?)-(?P[^\s-]+?)-(?P[^\s-]+?)
- \.whl|\.dist-info)$""",
- re.VERBOSE,
- )
-
- def __init__(self, filename: str) -> None:
- """
- :raises InvalidWheelFilename: when the filename is invalid for a wheel
- """
- wheel_info = self.wheel_file_re.match(filename)
- if not wheel_info:
- raise InvalidWheelFilename(f"{filename} is not a valid wheel filename.")
- self.filename = filename
- self.name = wheel_info.group("name").replace("_", "-")
- # we'll assume "_" means "-" due to wheel naming scheme
- # (https://github.com/pypa/pip/issues/1150)
- self.version = wheel_info.group("ver").replace("_", "-")
- self.build_tag = wheel_info.group("build")
- self.pyversions = wheel_info.group("pyver").split(".")
- self.abis = wheel_info.group("abi").split(".")
- self.plats = wheel_info.group("plat").split(".")
-
- # All the tag combinations from this file
- self.file_tags = {
- Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats
- }
-
- def get_formatted_file_tags(self) -> List[str]:
- """Return the wheel's tags as a sorted list of strings."""
- return sorted(str(tag) for tag in self.file_tags)
-
- def support_index_min(self, tags: List[Tag]) -> int:
- """Return the lowest index that one of the wheel's file_tag combinations
- achieves in the given list of supported tags.
-
- For example, if there are 8 supported tags and one of the file tags
- is first in the list, then return 0.
-
- :param tags: the PEP 425 tags to check the wheel against, in order
- with most preferred first.
-
- :raises ValueError: If none of the wheel's file tags match one of
- the supported tags.
- """
- try:
- return next(i for i, t in enumerate(tags) if t in self.file_tags)
- except StopIteration:
- raise ValueError()
-
- def find_most_preferred_tag(
- self, tags: List[Tag], tag_to_priority: Dict[Tag, int]
- ) -> int:
- """Return the priority of the most preferred tag that one of the wheel's file
- tag combinations achieves in the given list of supported tags using the given
- tag_to_priority mapping, where lower priorities are more-preferred.
-
- This is used in place of support_index_min in some cases in order to avoid
- an expensive linear scan of a large list of tags.
-
- :param tags: the PEP 425 tags to check the wheel against.
- :param tag_to_priority: a mapping from tag to priority of that tag, where
- lower is more preferred.
-
- :raises ValueError: If none of the wheel's file tags match one of
- the supported tags.
- """
- return min(
- tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority
- )
-
- def supported(self, tags: Iterable[Tag]) -> bool:
- """Return whether the wheel is compatible with one of the given tags.
-
- :param tags: the PEP 425 tags to check the wheel against.
- """
- return not self.file_tags.isdisjoint(tags)
diff --git a/spaces/BlitzEsports/TextToImage/html2canvas.js b/spaces/BlitzEsports/TextToImage/html2canvas.js
deleted file mode 100644
index 96e2dc5707b1a584ff7b3b583aea7c6c18d4ea76..0000000000000000000000000000000000000000
--- a/spaces/BlitzEsports/TextToImage/html2canvas.js
+++ /dev/null
@@ -1,7756 +0,0 @@
-/*!
- * html2canvas 1.4.1
- * Copyright (c) 2022 Niklas von Hertzen
- * Released under MIT License
- */
-(function (global, factory) {
- typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
- typeof define === 'function' && define.amd ? define(factory) :
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, global.html2canvas = factory());
-}(this, (function () { 'use strict';
-
- /*! *****************************************************************************
- Copyright (c) Microsoft Corporation.
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted.
-
- THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
- OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- PERFORMANCE OF THIS SOFTWARE.
- ***************************************************************************** */
- /* global Reflect, Promise */
-
- var extendStatics = function(d, b) {
- extendStatics = Object.setPrototypeOf ||
- ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
- function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
- return extendStatics(d, b);
- };
-
- function __extends(d, b) {
- if (typeof b !== "function" && b !== null)
- throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
- extendStatics(d, b);
- function __() { this.constructor = d; }
- d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
- }
-
- var __assign = function() {
- __assign = Object.assign || function __assign(t) {
- for (var s, i = 1, n = arguments.length; i < n; i++) {
- s = arguments[i];
- for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];
- }
- return t;
- };
- return __assign.apply(this, arguments);
- };
-
- function __awaiter(thisArg, _arguments, P, generator) {
- function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
- return new (P || (P = Promise))(function (resolve, reject) {
- function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
- function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
- function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
- step((generator = generator.apply(thisArg, _arguments || [])).next());
- });
- }
-
- function __generator(thisArg, body) {
- var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
- return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
- function verb(n) { return function (v) { return step([n, v]); }; }
- function step(op) {
- if (f) throw new TypeError("Generator is already executing.");
- while (_) try {
- if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
- if (y = 0, t) op = [op[0] & 2, t.value];
- switch (op[0]) {
- case 0: case 1: t = op; break;
- case 4: _.label++; return { value: op[1], done: false };
- case 5: _.label++; y = op[1]; op = [0]; continue;
- case 7: op = _.ops.pop(); _.trys.pop(); continue;
- default:
- if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
- if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
- if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
- if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
- if (t[2]) _.ops.pop();
- _.trys.pop(); continue;
- }
- op = body.call(thisArg, _);
- } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
- if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
- }
- }
-
- function __spreadArray(to, from, pack) {
- if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
- if (ar || !(i in from)) {
- if (!ar) ar = Array.prototype.slice.call(from, 0, i);
- ar[i] = from[i];
- }
- }
- return to.concat(ar || from);
- }
-
- var Bounds = /** @class */ (function () {
- function Bounds(left, top, width, height) {
- this.left = left;
- this.top = top;
- this.width = width;
- this.height = height;
- }
- Bounds.prototype.add = function (x, y, w, h) {
- return new Bounds(this.left + x, this.top + y, this.width + w, this.height + h);
- };
- Bounds.fromClientRect = function (context, clientRect) {
- return new Bounds(clientRect.left + context.windowBounds.left, clientRect.top + context.windowBounds.top, clientRect.width, clientRect.height);
- };
- Bounds.fromDOMRectList = function (context, domRectList) {
- var domRect = Array.from(domRectList).find(function (rect) { return rect.width !== 0; });
- return domRect
- ? new Bounds(domRect.left + context.windowBounds.left, domRect.top + context.windowBounds.top, domRect.width, domRect.height)
- : Bounds.EMPTY;
- };
- Bounds.EMPTY = new Bounds(0, 0, 0, 0);
- return Bounds;
- }());
- var parseBounds = function (context, node) {
- return Bounds.fromClientRect(context, node.getBoundingClientRect());
- };
- var parseDocumentSize = function (document) {
- var body = document.body;
- var documentElement = document.documentElement;
- if (!body || !documentElement) {
- throw new Error("Unable to get document size");
- }
- var width = Math.max(Math.max(body.scrollWidth, documentElement.scrollWidth), Math.max(body.offsetWidth, documentElement.offsetWidth), Math.max(body.clientWidth, documentElement.clientWidth));
- var height = Math.max(Math.max(body.scrollHeight, documentElement.scrollHeight), Math.max(body.offsetHeight, documentElement.offsetHeight), Math.max(body.clientHeight, documentElement.clientHeight));
- return new Bounds(0, 0, width, height);
- };
-
- /*
- * css-line-break 2.1.0
- * Copyright (c) 2022 Niklas von Hertzen
- * Released under MIT License
- */
- var toCodePoints$1 = function (str) {
- var codePoints = [];
- var i = 0;
- var length = str.length;
- while (i < length) {
- var value = str.charCodeAt(i++);
- if (value >= 0xd800 && value <= 0xdbff && i < length) {
- var extra = str.charCodeAt(i++);
- if ((extra & 0xfc00) === 0xdc00) {
- codePoints.push(((value & 0x3ff) << 10) + (extra & 0x3ff) + 0x10000);
- }
- else {
- codePoints.push(value);
- i--;
- }
- }
- else {
- codePoints.push(value);
- }
- }
- return codePoints;
- };
- var fromCodePoint$1 = function () {
- var codePoints = [];
- for (var _i = 0; _i < arguments.length; _i++) {
- codePoints[_i] = arguments[_i];
- }
- if (String.fromCodePoint) {
- return String.fromCodePoint.apply(String, codePoints);
- }
- var length = codePoints.length;
- if (!length) {
- return '';
- }
- var codeUnits = [];
- var index = -1;
- var result = '';
- while (++index < length) {
- var codePoint = codePoints[index];
- if (codePoint <= 0xffff) {
- codeUnits.push(codePoint);
- }
- else {
- codePoint -= 0x10000;
- codeUnits.push((codePoint >> 10) + 0xd800, (codePoint % 0x400) + 0xdc00);
- }
- if (index + 1 === length || codeUnits.length > 0x4000) {
- result += String.fromCharCode.apply(String, codeUnits);
- codeUnits.length = 0;
- }
- }
- return result;
- };
- var chars$2 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
- // Use a lookup table to find the index.
- var lookup$2 = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256);
- for (var i$2 = 0; i$2 < chars$2.length; i$2++) {
- lookup$2[chars$2.charCodeAt(i$2)] = i$2;
- }
-
- /*
- * utrie 1.0.2
- * Copyright (c) 2022 Niklas von Hertzen
- * Released under MIT License
- */
- var chars$1$1 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
- // Use a lookup table to find the index.
- var lookup$1$1 = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256);
- for (var i$1$1 = 0; i$1$1 < chars$1$1.length; i$1$1++) {
- lookup$1$1[chars$1$1.charCodeAt(i$1$1)] = i$1$1;
- }
- var decode$1 = function (base64) {
- var bufferLength = base64.length * 0.75, len = base64.length, i, p = 0, encoded1, encoded2, encoded3, encoded4;
- if (base64[base64.length - 1] === '=') {
- bufferLength--;
- if (base64[base64.length - 2] === '=') {
- bufferLength--;
- }
- }
- var buffer = typeof ArrayBuffer !== 'undefined' &&
- typeof Uint8Array !== 'undefined' &&
- typeof Uint8Array.prototype.slice !== 'undefined'
- ? new ArrayBuffer(bufferLength)
- : new Array(bufferLength);
- var bytes = Array.isArray(buffer) ? buffer : new Uint8Array(buffer);
- for (i = 0; i < len; i += 4) {
- encoded1 = lookup$1$1[base64.charCodeAt(i)];
- encoded2 = lookup$1$1[base64.charCodeAt(i + 1)];
- encoded3 = lookup$1$1[base64.charCodeAt(i + 2)];
- encoded4 = lookup$1$1[base64.charCodeAt(i + 3)];
- bytes[p++] = (encoded1 << 2) | (encoded2 >> 4);
- bytes[p++] = ((encoded2 & 15) << 4) | (encoded3 >> 2);
- bytes[p++] = ((encoded3 & 3) << 6) | (encoded4 & 63);
- }
- return buffer;
- };
- var polyUint16Array$1 = function (buffer) {
- var length = buffer.length;
- var bytes = [];
- for (var i = 0; i < length; i += 2) {
- bytes.push((buffer[i + 1] << 8) | buffer[i]);
- }
- return bytes;
- };
- var polyUint32Array$1 = function (buffer) {
- var length = buffer.length;
- var bytes = [];
- for (var i = 0; i < length; i += 4) {
- bytes.push((buffer[i + 3] << 24) | (buffer[i + 2] << 16) | (buffer[i + 1] << 8) | buffer[i]);
- }
- return bytes;
- };
-
- /** Shift size for getting the index-2 table offset. */
- var UTRIE2_SHIFT_2$1 = 5;
- /** Shift size for getting the index-1 table offset. */
- var UTRIE2_SHIFT_1$1 = 6 + 5;
- /**
- * Shift size for shifting left the index array values.
- * Increases possible data size with 16-bit index values at the cost
- * of compactability.
- * This requires data blocks to be aligned by UTRIE2_DATA_GRANULARITY.
- */
- var UTRIE2_INDEX_SHIFT$1 = 2;
- /**
- * Difference between the two shift sizes,
- * for getting an index-1 offset from an index-2 offset. 6=11-5
- */
- var UTRIE2_SHIFT_1_2$1 = UTRIE2_SHIFT_1$1 - UTRIE2_SHIFT_2$1;
- /**
- * The part of the index-2 table for U+D800..U+DBFF stores values for
- * lead surrogate code _units_ not code _points_.
- * Values for lead surrogate code _points_ are indexed with this portion of the table.
- * Length=32=0x20=0x400>>UTRIE2_SHIFT_2. (There are 1024=0x400 lead surrogates.)
- */
- var UTRIE2_LSCP_INDEX_2_OFFSET$1 = 0x10000 >> UTRIE2_SHIFT_2$1;
- /** Number of entries in a data block. 32=0x20 */
- var UTRIE2_DATA_BLOCK_LENGTH$1 = 1 << UTRIE2_SHIFT_2$1;
- /** Mask for getting the lower bits for the in-data-block offset. */
- var UTRIE2_DATA_MASK$1 = UTRIE2_DATA_BLOCK_LENGTH$1 - 1;
- var UTRIE2_LSCP_INDEX_2_LENGTH$1 = 0x400 >> UTRIE2_SHIFT_2$1;
- /** Count the lengths of both BMP pieces. 2080=0x820 */
- var UTRIE2_INDEX_2_BMP_LENGTH$1 = UTRIE2_LSCP_INDEX_2_OFFSET$1 + UTRIE2_LSCP_INDEX_2_LENGTH$1;
- /**
- * The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820.
- * Length 32=0x20 for lead bytes C0..DF, regardless of UTRIE2_SHIFT_2.
- */
- var UTRIE2_UTF8_2B_INDEX_2_OFFSET$1 = UTRIE2_INDEX_2_BMP_LENGTH$1;
- var UTRIE2_UTF8_2B_INDEX_2_LENGTH$1 = 0x800 >> 6; /* U+0800 is the first code point after 2-byte UTF-8 */
- /**
- * The index-1 table, only used for supplementary code points, at offset 2112=0x840.
- * Variable length, for code points up to highStart, where the last single-value range starts.
- * Maximum length 512=0x200=0x100000>>UTRIE2_SHIFT_1.
- * (For 0x100000 supplementary code points U+10000..U+10ffff.)
- *
- * The part of the index-2 table for supplementary code points starts
- * after this index-1 table.
- *
- * Both the index-1 table and the following part of the index-2 table
- * are omitted completely if there is only BMP data.
- */
- var UTRIE2_INDEX_1_OFFSET$1 = UTRIE2_UTF8_2B_INDEX_2_OFFSET$1 + UTRIE2_UTF8_2B_INDEX_2_LENGTH$1;
- /**
- * Number of index-1 entries for the BMP. 32=0x20
- * This part of the index-1 table is omitted from the serialized form.
- */
- var UTRIE2_OMITTED_BMP_INDEX_1_LENGTH$1 = 0x10000 >> UTRIE2_SHIFT_1$1;
- /** Number of entries in an index-2 block. 64=0x40 */
- var UTRIE2_INDEX_2_BLOCK_LENGTH$1 = 1 << UTRIE2_SHIFT_1_2$1;
- /** Mask for getting the lower bits for the in-index-2-block offset. */
- var UTRIE2_INDEX_2_MASK$1 = UTRIE2_INDEX_2_BLOCK_LENGTH$1 - 1;
- var slice16$1 = function (view, start, end) {
- if (view.slice) {
- return view.slice(start, end);
- }
- return new Uint16Array(Array.prototype.slice.call(view, start, end));
- };
- var slice32$1 = function (view, start, end) {
- if (view.slice) {
- return view.slice(start, end);
- }
- return new Uint32Array(Array.prototype.slice.call(view, start, end));
- };
- var createTrieFromBase64$1 = function (base64, _byteLength) {
- var buffer = decode$1(base64);
- var view32 = Array.isArray(buffer) ? polyUint32Array$1(buffer) : new Uint32Array(buffer);
- var view16 = Array.isArray(buffer) ? polyUint16Array$1(buffer) : new Uint16Array(buffer);
- var headerLength = 24;
- var index = slice16$1(view16, headerLength / 2, view32[4] / 2);
- var data = view32[5] === 2
- ? slice16$1(view16, (headerLength + view32[4]) / 2)
- : slice32$1(view32, Math.ceil((headerLength + view32[4]) / 4));
- return new Trie$1(view32[0], view32[1], view32[2], view32[3], index, data);
- };
- var Trie$1 = /** @class */ (function () {
- function Trie(initialValue, errorValue, highStart, highValueIndex, index, data) {
- this.initialValue = initialValue;
- this.errorValue = errorValue;
- this.highStart = highStart;
- this.highValueIndex = highValueIndex;
- this.index = index;
- this.data = data;
- }
- /**
- * Get the value for a code point as stored in the Trie.
- *
- * @param codePoint the code point
- * @return the value
- */
- Trie.prototype.get = function (codePoint) {
- var ix;
- if (codePoint >= 0) {
- if (codePoint < 0x0d800 || (codePoint > 0x0dbff && codePoint <= 0x0ffff)) {
- // Ordinary BMP code point, excluding leading surrogates.
- // BMP uses a single level lookup. BMP index starts at offset 0 in the Trie2 index.
- // 16 bit data is stored in the index array itself.
- ix = this.index[codePoint >> UTRIE2_SHIFT_2$1];
- ix = (ix << UTRIE2_INDEX_SHIFT$1) + (codePoint & UTRIE2_DATA_MASK$1);
- return this.data[ix];
- }
- if (codePoint <= 0xffff) {
- // Lead Surrogate Code Point. A Separate index section is stored for
- // lead surrogate code units and code points.
- // The main index has the code unit data.
- // For this function, we need the code point data.
- // Note: this expression could be refactored for slightly improved efficiency, but
- // surrogate code points will be so rare in practice that it's not worth it.
- ix = this.index[UTRIE2_LSCP_INDEX_2_OFFSET$1 + ((codePoint - 0xd800) >> UTRIE2_SHIFT_2$1)];
- ix = (ix << UTRIE2_INDEX_SHIFT$1) + (codePoint & UTRIE2_DATA_MASK$1);
- return this.data[ix];
- }
- if (codePoint < this.highStart) {
- // Supplemental code point, use two-level lookup.
- ix = UTRIE2_INDEX_1_OFFSET$1 - UTRIE2_OMITTED_BMP_INDEX_1_LENGTH$1 + (codePoint >> UTRIE2_SHIFT_1$1);
- ix = this.index[ix];
- ix += (codePoint >> UTRIE2_SHIFT_2$1) & UTRIE2_INDEX_2_MASK$1;
- ix = this.index[ix];
- ix = (ix << UTRIE2_INDEX_SHIFT$1) + (codePoint & UTRIE2_DATA_MASK$1);
- return this.data[ix];
- }
- if (codePoint <= 0x10ffff) {
- return this.data[this.highValueIndex];
- }
- }
- // Fall through. The code point is outside of the legal range of 0..0x10ffff.
- return this.errorValue;
- };
- return Trie;
- }());
-
- /*
- * base64-arraybuffer 1.0.2
- * Copyright (c) 2022 Niklas von Hertzen
- * Released under MIT License
- */
- var chars$3 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
- // Use a lookup table to find the index.
- var lookup$3 = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256);
- for (var i$3 = 0; i$3 < chars$3.length; i$3++) {
- lookup$3[chars$3.charCodeAt(i$3)] = i$3;
- }
-
- var base64$1 = 'KwAAAAAAAAAACA4AUD0AADAgAAACAAAAAAAIABAAGABAAEgAUABYAGAAaABgAGgAYgBqAF8AZwBgAGgAcQB5AHUAfQCFAI0AlQCdAKIAqgCyALoAYABoAGAAaABgAGgAwgDKAGAAaADGAM4A0wDbAOEA6QDxAPkAAQEJAQ8BFwF1AH0AHAEkASwBNAE6AUIBQQFJAVEBWQFhAWgBcAF4ATAAgAGGAY4BlQGXAZ8BpwGvAbUBvQHFAc0B0wHbAeMB6wHxAfkBAQIJAvEBEQIZAiECKQIxAjgCQAJGAk4CVgJeAmQCbAJ0AnwCgQKJApECmQKgAqgCsAK4ArwCxAIwAMwC0wLbAjAA4wLrAvMC+AIAAwcDDwMwABcDHQMlAy0DNQN1AD0DQQNJA0kDSQNRA1EDVwNZA1kDdQB1AGEDdQBpA20DdQN1AHsDdQCBA4kDkQN1AHUAmQOhA3UAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AKYDrgN1AHUAtgO+A8YDzgPWAxcD3gPjA+sD8wN1AHUA+wMDBAkEdQANBBUEHQQlBCoEFwMyBDgEYABABBcDSARQBFgEYARoBDAAcAQzAXgEgASIBJAEdQCXBHUAnwSnBK4EtgS6BMIEyAR1AHUAdQB1AHUAdQCVANAEYABgAGAAYABgAGAAYABgANgEYADcBOQEYADsBPQE/AQEBQwFFAUcBSQFLAU0BWQEPAVEBUsFUwVbBWAAYgVgAGoFcgV6BYIFigWRBWAAmQWfBaYFYABgAGAAYABgAKoFYACxBbAFuQW6BcEFwQXHBcEFwQXPBdMF2wXjBeoF8gX6BQIGCgYSBhoGIgYqBjIGOgZgAD4GRgZMBmAAUwZaBmAAYABgAGAAYABgAGAAYABgAGAAYABgAGIGYABpBnAGYABgAGAAYABgAGAAYABgAGAAYAB4Bn8GhQZgAGAAYAB1AHcDFQSLBmAAYABgAJMGdQA9A3UAmwajBqsGqwaVALMGuwbDBjAAywbSBtIG1QbSBtIG0gbSBtIG0gbdBuMG6wbzBvsGAwcLBxMHAwcbByMHJwcsBywHMQcsB9IGOAdAB0gHTgfSBkgHVgfSBtIG0gbSBtIG0gbSBtIG0gbSBiwHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAdgAGAALAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAdbB2MHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsB2kH0gZwB64EdQB1AHUAdQB1AHUAdQB1AHUHfQdgAIUHjQd1AHUAlQedB2AAYAClB6sHYACzB7YHvgfGB3UAzgfWBzMB3gfmB1EB7gf1B/0HlQENAQUIDQh1ABUIHQglCBcDLQg1CD0IRQhNCEEDUwh1AHUAdQBbCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIcAh3CHoIMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIgggwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAALAcsBywHLAcsBywHLAcsBywHLAcsB4oILAcsB44I0gaWCJ4Ipgh1AHUAqgiyCHUAdQB1AHUAdQB1AHUAdQB1AHUAtwh8AXUAvwh1AMUIyQjRCNkI4AjoCHUAdQB1AO4I9gj+CAYJDgkTCS0HGwkjCYIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiAAIAAAAFAAYABgAGIAXwBgAHEAdQBFAJUAogCyAKAAYABgAEIA4ABGANMA4QDxAMEBDwE1AFwBLAE6AQEBUQF4QkhCmEKoQrhCgAHIQsAB0MLAAcABwAHAAeDC6ABoAHDCwMMAAcABwAHAAdDDGMMAAcAB6MM4wwjDWMNow3jDaABoAGgAaABoAGgAaABoAGgAaABoAGgAaABoAGgAaABoAGgAaABoAEjDqABWw6bDqABpg6gAaABoAHcDvwOPA+gAaABfA/8DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DpcPAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcAB9cPKwkyCToJMAB1AHUAdQBCCUoJTQl1AFUJXAljCWcJawkwADAAMAAwAHMJdQB2CX4JdQCECYoJjgmWCXUAngkwAGAAYABxAHUApgn3A64JtAl1ALkJdQDACTAAMAAwADAAdQB1AHUAdQB1AHUAdQB1AHUAowYNBMUIMAAwADAAMADICcsJ0wnZCRUE4QkwAOkJ8An4CTAAMAB1AAAKvwh1AAgKDwoXCh8KdQAwACcKLgp1ADYKqAmICT4KRgowADAAdQB1AE4KMAB1AFYKdQBeCnUAZQowADAAMAAwADAAMAAwADAAMAAVBHUAbQowADAAdQC5CXUKMAAwAHwBxAijBogEMgF9CoQKiASMCpQKmgqIBKIKqgquCogEDQG2Cr4KxgrLCjAAMADTCtsKCgHjCusK8Qr5CgELMAAwADAAMAB1AIsECQsRC3UANAEZCzAAMAAwADAAMAB1ACELKQswAHUANAExCzkLdQBBC0kLMABRC1kLMAAwADAAMAAwADAAdQBhCzAAMAAwAGAAYABpC3ELdwt/CzAAMACHC4sLkwubC58Lpwt1AK4Ltgt1APsDMAAwADAAMAAwADAAMAAwAL4LwwvLC9IL1wvdCzAAMADlC+kL8Qv5C/8LSQswADAAMAAwADAAMAAwADAAMAAHDDAAMAAwADAAMAAODBYMHgx1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1ACYMMAAwADAAdQB1AHUALgx1AHUAdQB1AHUAdQA2DDAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AD4MdQBGDHUAdQB1AHUAdQB1AEkMdQB1AHUAdQB1AFAMMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQBYDHUAdQB1AF8MMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUA+wMVBGcMMAAwAHwBbwx1AHcMfwyHDI8MMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAYABgAJcMMAAwADAAdQB1AJ8MlQClDDAAMACtDCwHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsB7UMLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AA0EMAC9DDAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAsBywHLAcsBywHLAcsBywHLQcwAMEMyAwsBywHLAcsBywHLAcsBywHLAcsBywHzAwwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAHUAdQB1ANQM2QzhDDAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMABgAGAAYABgAGAAYABgAOkMYADxDGAA+AwADQYNYABhCWAAYAAODTAAMAAwADAAFg1gAGAAHg37AzAAMAAwADAAYABgACYNYAAsDTQNPA1gAEMNPg1LDWAAYABgAGAAYABgAGAAYABgAGAAUg1aDYsGVglhDV0NcQBnDW0NdQ15DWAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAlQCBDZUAiA2PDZcNMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAnw2nDTAAMAAwADAAMAAwAHUArw23DTAAMAAwADAAMAAwADAAMAAwADAAMAB1AL8NMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAB1AHUAdQB1AHUAdQDHDTAAYABgAM8NMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAA1w11ANwNMAAwAD0B5A0wADAAMAAwADAAMADsDfQN/A0EDgwOFA4wABsOMAAwADAAMAAwADAAMAAwANIG0gbSBtIG0gbSBtIG0gYjDigOwQUuDsEFMw7SBjoO0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGQg5KDlIOVg7SBtIGXg5lDm0OdQ7SBtIGfQ6EDooOjQ6UDtIGmg6hDtIG0gaoDqwO0ga0DrwO0gZgAGAAYADEDmAAYAAkBtIGzA5gANIOYADaDokO0gbSBt8O5w7SBu8O0gb1DvwO0gZgAGAAxA7SBtIG0gbSBtIGYABgAGAAYAAED2AAsAUMD9IG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGFA8sBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAccD9IGLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHJA8sBywHLAcsBywHLAccDywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywPLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAc0D9IG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAccD9IG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGFA8sBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHPA/SBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gYUD0QPlQCVAJUAMAAwADAAMACVAJUAlQCVAJUAlQCVAEwPMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAA//8EAAQABAAEAAQABAAEAAQABAANAAMAAQABAAIABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQACgATABcAHgAbABoAHgAXABYAEgAeABsAGAAPABgAHABLAEsASwBLAEsASwBLAEsASwBLABgAGAAeAB4AHgATAB4AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQABYAGwASAB4AHgAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAWAA0AEQAeAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAFAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAJABYAGgAbABsAGwAeAB0AHQAeAE8AFwAeAA0AHgAeABoAGwBPAE8ADgBQAB0AHQAdAE8ATwAXAE8ATwBPABYAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAFAAUABQAFAAUABQAFAAUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAB4AHgAeAFAATwBAAE8ATwBPAEAATwBQAFAATwBQAB4AHgAeAB4AHgAeAB0AHQAdAB0AHgAdAB4ADgBQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgBQAB4AUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAJAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAkACQAJAAkACQAJAAkABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgAeAFAAHgAeAB4AKwArAFAAUABQAFAAGABQACsAKwArACsAHgAeAFAAHgBQAFAAUAArAFAAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAEAAQABAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAUAAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAYAA0AKwArAB4AHgAbACsABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQADQAEAB4ABAAEAB4ABAAEABMABAArACsAKwArACsAKwArACsAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAKwArACsAKwBWAFYAVgBWAB4AHgArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AGgAaABoAGAAYAB4AHgAEAAQABAAEAAQABAAEAAQABAAEAAQAEwAEACsAEwATAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABLAEsASwBLAEsASwBLAEsASwBLABoAGQAZAB4AUABQAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQABMAUAAEAAQABAAEAAQABAAEAB4AHgAEAAQABAAEAAQABABQAFAABAAEAB4ABAAEAAQABABQAFAASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUAAeAB4AUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAFAABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQAUABQAB4AHgAYABMAUAArACsABAAbABsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAFAABAAEAAQABAAEAFAABAAEAAQAUAAEAAQABAAEAAQAKwArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAArACsAHgArAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAB4ABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAUAAEAAQABAAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAABAAEAA0ADQBLAEsASwBLAEsASwBLAEsASwBLAB4AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAArAFAAUABQAFAAUABQAFAAUAArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUAArACsAKwBQAFAAUABQACsAKwAEAFAABAAEAAQABAAEAAQABAArACsABAAEACsAKwAEAAQABABQACsAKwArACsAKwArACsAKwAEACsAKwArACsAUABQACsAUABQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAFAAUAAaABoAUABQAFAAUABQAEwAHgAbAFAAHgAEACsAKwAEAAQABAArAFAAUABQAFAAUABQACsAKwArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQACsAUABQACsAKwAEACsABAAEAAQABAAEACsAKwArACsABAAEACsAKwAEAAQABAArACsAKwAEACsAKwArACsAKwArACsAUABQAFAAUAArAFAAKwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLAAQABABQAFAAUAAEAB4AKwArACsAKwArACsAKwArACsAKwAEAAQABAArAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQAFAAUABQACsAKwAEAFAABAAEAAQABAAEAAQABAAEACsABAAEAAQAKwAEAAQABAArACsAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAB4AGwArACsAKwArACsAKwArAFAABAAEAAQABAAEAAQAKwAEAAQABAArAFAAUABQAFAAUABQAFAAUAArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAArACsABAAEACsAKwAEAAQABAArACsAKwArACsAKwArAAQABAAEACsAKwArACsAUABQACsAUABQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAB4AUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArAAQAUAArAFAAUABQAFAAUABQACsAKwArAFAAUABQACsAUABQAFAAUAArACsAKwBQAFAAKwBQACsAUABQACsAKwArAFAAUAArACsAKwBQAFAAUAArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArAAQABAAEAAQABAArACsAKwAEAAQABAArAAQABAAEAAQAKwArAFAAKwArACsAKwArACsABAArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAUABQAFAAHgAeAB4AHgAeAB4AGwAeACsAKwArACsAKwAEAAQABAAEAAQAUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAUAAEAAQABAAEAAQABAAEACsABAAEAAQAKwAEAAQABAAEACsAKwArACsAKwArACsABAAEACsAUABQAFAAKwArACsAKwArAFAAUAAEAAQAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAKwAOAFAAUABQAFAAUABQAFAAHgBQAAQABAAEAA4AUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAKwArAAQAUAAEAAQABAAEAAQABAAEACsABAAEAAQAKwAEAAQABAAEACsAKwArACsAKwArACsABAAEACsAKwArACsAKwArACsAUAArAFAAUAAEAAQAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwBQAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAFAABAAEAAQABAAEAAQABAArAAQABAAEACsABAAEAAQABABQAB4AKwArACsAKwBQAFAAUAAEAFAAUABQAFAAUABQAFAAUABQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAFAAUABQAFAAUABQAFAAUABQABoAUABQAFAAUABQAFAAKwAEAAQABAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQACsAUAArACsAUABQAFAAUABQAFAAUAArACsAKwAEACsAKwArACsABAAEAAQABAAEAAQAKwAEACsABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArAAQABAAeACsAKwArACsAKwArACsAKwArACsAKwArAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAAqAFwAXAAqACoAKgAqACoAKgAqACsAKwArACsAGwBcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAeAEsASwBLAEsASwBLAEsASwBLAEsADQANACsAKwArACsAKwBcAFwAKwBcACsAXABcAFwAXABcACsAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACsAXAArAFwAXABcAFwAXABcAFwAXABcAFwAKgBcAFwAKgAqACoAKgAqACoAKgAqACoAXAArACsAXABcAFwAXABcACsAXAArACoAKgAqACoAKgAqACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwBcAFwAXABcAFAADgAOAA4ADgAeAA4ADgAJAA4ADgANAAkAEwATABMAEwATAAkAHgATAB4AHgAeAAQABAAeAB4AHgAeAB4AHgBLAEsASwBLAEsASwBLAEsASwBLAFAAUABQAFAAUABQAFAAUABQAFAADQAEAB4ABAAeAAQAFgARABYAEQAEAAQAUABQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQADQAEAAQABAAEAAQADQAEAAQAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABAArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArAA0ADQAeAB4AHgAeAB4AHgAEAB4AHgAeAB4AHgAeACsAHgAeAA4ADgANAA4AHgAeAB4AHgAeAAkACQArACsAKwArACsAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgBcAEsASwBLAEsASwBLAEsASwBLAEsADQANAB4AHgAeAB4AXABcAFwAXABcAFwAKgAqACoAKgBcAFwAXABcACoAKgAqAFwAKgAqACoAXABcACoAKgAqACoAKgAqACoAXABcAFwAKgAqACoAKgBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAqACoAKgAqAFwAKgBLAEsASwBLAEsASwBLAEsASwBLACoAKgAqACoAKgAqAFAAUABQAFAAUABQACsAUAArACsAKwArACsAUAArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgBQAFAAUABQAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAUAArACsAUABQAFAAUABQAFAAUAArAFAAKwBQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAKwArAFAAUABQAFAAUABQAFAAKwBQACsAUABQAFAAUAArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsABAAEAAQAHgANAB4AHgAeAB4AHgAeAB4AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUAArACsADQBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAANAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAWABEAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAA0ADQANAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAAQABAAEACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAANAA0AKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUAArAAQABAArACsAKwArACsAKwArACsAKwArACsAKwBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqAA0ADQAVAFwADQAeAA0AGwBcACoAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwAeAB4AEwATAA0ADQAOAB4AEwATAB4ABAAEAAQACQArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUAAEAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQAUAArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAArACsAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAHgArACsAKwATABMASwBLAEsASwBLAEsASwBLAEsASwBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAArACsAXABcAFwAXABcACsAKwArACsAKwArACsAKwArACsAKwBcAFwAXABcAFwAXABcAFwAXABcAFwAXAArACsAKwArAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAXAArACsAKwAqACoAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAArACsAHgAeAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAqACoAKwAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKwArAAQASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwArACsAKwArACoAKgAqACoAKgAqACoAXAAqACoAKgAqACoAKgArACsABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsABAAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABABQAFAAUABQAFAAUABQACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwANAA0AHgANAA0ADQANAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAEAAQABAAEAAQAHgAeAB4AHgAeAB4AHgAeAB4AKwArACsABAAEAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwAeAB4AHgAeAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArAA0ADQANAA0ADQBLAEsASwBLAEsASwBLAEsASwBLACsAKwArAFAAUABQAEsASwBLAEsASwBLAEsASwBLAEsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAA0ADQBQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUAAeAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArAAQABAAEAB4ABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAAQAUABQAFAAUABQAFAABABQAFAABAAEAAQAUAArACsAKwArACsABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsABAAEAAQABAAEAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAKwBQACsAUAArAFAAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArAB4AHgAeAB4AHgAeAB4AHgBQAB4AHgAeAFAAUABQACsAHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQACsAKwAeAB4AHgAeAB4AHgArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArAFAAUABQACsAHgAeAB4AHgAeAB4AHgAOAB4AKwANAA0ADQANAA0ADQANAAkADQANAA0ACAAEAAsABAAEAA0ACQANAA0ADAAdAB0AHgAXABcAFgAXABcAFwAWABcAHQAdAB4AHgAUABQAFAANAAEAAQAEAAQABAAEAAQACQAaABoAGgAaABoAGgAaABoAHgAXABcAHQAVABUAHgAeAB4AHgAeAB4AGAAWABEAFQAVABUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ADQAeAA0ADQANAA0AHgANAA0ADQAHAB4AHgAeAB4AKwAEAAQABAAEAAQABAAEAAQABAAEAFAAUAArACsATwBQAFAAUABQAFAAHgAeAB4AFgARAE8AUABPAE8ATwBPAFAAUABQAFAAUAAeAB4AHgAWABEAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArABsAGwAbABsAGwAbABsAGgAbABsAGwAbABsAGwAbABsAGwAbABsAGwAbABsAGgAbABsAGwAbABoAGwAbABoAGwAbABsAGwAbABsAGwAbABsAGwAbABsAGwAbABsAGwAbAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAHgAeAFAAGgAeAB0AHgBQAB4AGgAeAB4AHgAeAB4AHgAeAB4AHgBPAB4AUAAbAB4AHgBQAFAAUABQAFAAHgAeAB4AHQAdAB4AUAAeAFAAHgBQAB4AUABPAFAAUAAeAB4AHgAeAB4AHgAeAFAAUABQAFAAUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAAHgBQAFAAUABQAE8ATwBQAFAAUABQAFAATwBQAFAATwBQAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAFAAUABQAFAATwBPAE8ATwBPAE8ATwBPAE8ATwBQAFAAUABQAFAAUABQAFAAUAAeAB4AUABQAFAAUABPAB4AHgArACsAKwArAB0AHQAdAB0AHQAdAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB4AHQAdAB4AHgAeAB0AHQAeAB4AHQAeAB4AHgAdAB4AHQAbABsAHgAdAB4AHgAeAB4AHQAeAB4AHQAdAB0AHQAeAB4AHQAeAB0AHgAdAB0AHQAdAB0AHQAeAB0AHgAeAB4AHgAeAB0AHQAdAB0AHgAeAB4AHgAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB4AHgAeAB0AHgAeAB4AHgAeAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHgAeAB0AHQAdAB0AHgAeAB0AHQAeAB4AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHQAeAB4AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHQAeAB4AHgAdAB4AHgAeAB4AHgAeAB4AHQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AFAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeABYAEQAWABEAHgAeAB4AHgAeAB4AHQAeAB4AHgAeAB4AHgAeACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAWABEAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAFAAHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHgAeAB4AHgAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAeAB4AHQAdAB0AHQAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHQAeAB0AHQAdAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB0AHQAeAB4AHQAdAB4AHgAeAB4AHQAdAB4AHgAeAB4AHQAdAB0AHgAeAB0AHgAeAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlAB4AHQAdAB4AHgAdAB4AHgAeAB4AHQAdAB4AHgAeAB4AJQAlAB0AHQAlAB4AJQAlACUAIAAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAeAB4AHgAeAB0AHgAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHgAdAB0AHQAeAB0AJQAdAB0AHgAdAB0AHgAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACUAJQAlACUAJQAdAB0AHQAdACUAHgAlACUAJQAdACUAJQAdAB0AHQAlACUAHQAdACUAHQAdACUAJQAlAB4AHQAeAB4AHgAeAB0AHQAlAB0AHQAdAB0AHQAdACUAJQAlACUAJQAdACUAJQAgACUAHQAdACUAJQAlACUAJQAlACUAJQAeAB4AHgAlACUAIAAgACAAIAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAeAB4AFwAXABcAFwAXABcAHgATABMAJQAeAB4AHgAWABEAFgARABYAEQAWABEAFgARABYAEQAWABEATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeABYAEQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAWABEAFgARABYAEQAWABEAFgARAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AFgARABYAEQAWABEAFgARABYAEQAWABEAFgARABYAEQAWABEAFgARABYAEQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAWABEAFgARAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AFgARAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AUABQAFAAUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAEAAQABAAeAB4AKwArACsAKwArABMADQANAA0AUAATAA0AUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAUAANACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAA0ADQANAA0ADQANAA0ADQAeAA0AFgANAB4AHgAXABcAHgAeABcAFwAWABEAFgARABYAEQAWABEADQANAA0ADQATAFAADQANAB4ADQANAB4AHgAeAB4AHgAMAAwADQANAA0AHgANAA0AFgANAA0ADQANAA0ADQANAA0AHgANAB4ADQANAB4AHgAeACsAKwArACsAKwArACsAKwArACsAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAKwArACsAKwArACsAKwArACsAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAlACUAJQAlACUAJQAlACUAJQAlACUAJQArACsAKwArAA0AEQARACUAJQBHAFcAVwAWABEAFgARABYAEQAWABEAFgARACUAJQAWABEAFgARABYAEQAWABEAFQAWABEAEQAlAFcAVwBXAFcAVwBXAFcAVwBXAAQABAAEAAQABAAEACUAVwBXAFcAVwA2ACUAJQBXAFcAVwBHAEcAJQAlACUAKwBRAFcAUQBXAFEAVwBRAFcAUQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFEAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBRAFcAUQBXAFEAVwBXAFcAVwBXAFcAUQBXAFcAVwBXAFcAVwBRAFEAKwArAAQABAAVABUARwBHAFcAFQBRAFcAUQBXAFEAVwBRAFcAUQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFEAVwBRAFcAUQBXAFcAVwBXAFcAVwBRAFcAVwBXAFcAVwBXAFEAUQBXAFcAVwBXABUAUQBHAEcAVwArACsAKwArACsAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAKwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAKwAlACUAVwBXAFcAVwAlACUAJQAlACUAJQAlACUAJQAlACsAKwArACsAKwArACsAKwArACsAKwArAFEAUQBRAFEAUQBRAFEAUQBRAFEAUQBRAFEAUQBRAFEAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQArAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQBPAE8ATwBPAE8ATwBPAE8AJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACUAJQAlAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAEcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAADQATAA0AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABLAEsASwBLAEsASwBLAEsASwBLAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAABAAEAAQABAAeAAQABAAEAAQABAAEAAQABAAEAAQAHgBQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AUABQAAQABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAeAA0ADQANAA0ADQArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAB4AHgAeAB4AHgAeAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAHgAeAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAeAB4AUABQAFAAUABQAFAAUABQAFAAUABQAAQAUABQAFAABABQAFAAUABQAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAeAB4AHgAeAAQAKwArACsAUABQAFAAUABQAFAAHgAeABoAHgArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAADgAOABMAEwArACsAKwArACsAKwArACsABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwANAA0ASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArACsAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAFAAUAAeAB4AHgBQAA4AUABQAAQAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAA0ADQBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArACsAKwArACsAKwArAB4AWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYACsAKwArAAQAHgAeAB4AHgAeAB4ADQANAA0AHgAeAB4AHgArAFAASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArAB4AHgBcAFwAXABcAFwAKgBcAFwAXABcAFwAXABcAFwAXABcAEsASwBLAEsASwBLAEsASwBLAEsAXABcAFwAXABcACsAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwArAFAAUABQAAQAUABQAFAAUABQAFAAUABQAAQABAArACsASwBLAEsASwBLAEsASwBLAEsASwArACsAHgANAA0ADQBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAKgAqACoAXAAqACoAKgBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAAqAFwAKgAqACoAXABcACoAKgBcAFwAXABcAFwAKgAqAFwAKgBcACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFwAXABcACoAKgBQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAA0ADQBQAFAAUAAEAAQAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUAArACsAUABQAFAAUABQAFAAKwArAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQADQAEAAQAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAVABVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBUAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVACsAKwArACsAKwArACsAKwArACsAKwArAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAKwArACsAKwBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAKwArACsAKwAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAKwArACsAKwArAFYABABWAFYAVgBWAFYAVgBWAFYAVgBWAB4AVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgArAFYAVgBWAFYAVgArAFYAKwBWAFYAKwBWAFYAKwBWAFYAVgBWAFYAVgBWAFYAVgBWAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAEQAWAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUAAaAB4AKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAGAARABEAGAAYABMAEwAWABEAFAArACsAKwArACsAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACUAJQAlACUAJQAWABEAFgARABYAEQAWABEAFgARABYAEQAlACUAFgARACUAJQAlACUAJQAlACUAEQAlABEAKwAVABUAEwATACUAFgARABYAEQAWABEAJQAlACUAJQAlACUAJQAlACsAJQAbABoAJQArACsAKwArAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAcAKwATACUAJQAbABoAJQAlABYAEQAlACUAEQAlABEAJQBXAFcAVwBXAFcAVwBXAFcAVwBXABUAFQAlACUAJQATACUAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXABYAJQARACUAJQAlAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwAWACUAEQAlABYAEQARABYAEQARABUAVwBRAFEAUQBRAFEAUQBRAFEAUQBRAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAEcARwArACsAVwBXAFcAVwBXAFcAKwArAFcAVwBXAFcAVwBXACsAKwBXAFcAVwBXAFcAVwArACsAVwBXAFcAKwArACsAGgAbACUAJQAlABsAGwArAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwAEAAQABAAQAB0AKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsADQANAA0AKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAB4AHgAeAB4AHgAeAB4AHgAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAAQAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAA0AUABQAFAAUAArACsAKwArAFAAUABQAFAAUABQAFAAUAANAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwAeACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAKwArAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUAArACsAKwBQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwANAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAB4AUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUAArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAA0AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAUABQAFAAUABQAAQABAAEACsABAAEACsAKwArACsAKwAEAAQABAAEAFAAUABQAFAAKwBQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAQABAAEACsAKwArACsABABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAA0ADQANAA0ADQANAA0ADQAeACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAFAAUABQAFAAUABQAFAAUAAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAArACsAKwArAFAAUABQAFAAUAANAA0ADQANAA0ADQAUACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsADQANAA0ADQANAA0ADQBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAAQABAAEAAQAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUAArAAQABAANACsAKwBQAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAB4AHgAeAB4AHgArACsAKwArACsAKwAEAAQABAAEAAQABAAEAA0ADQAeAB4AHgAeAB4AKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgANAA0ADQANACsAKwArACsAKwArACsAKwArACsAKwAeACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwArACsAKwArAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsASwBLAEsASwBLAEsASwBLAEsASwANAA0ADQANAFAABAAEAFAAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAeAA4AUAArACsAKwArACsAKwArACsAKwAEAFAAUABQAFAADQANAB4ADQAEAAQABAAEAB4ABAAEAEsASwBLAEsASwBLAEsASwBLAEsAUAAOAFAADQANAA0AKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAANAA0AHgANAA0AHgAEACsAUABQAFAAUABQAFAAUAArAFAAKwBQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAA0AKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsABAAEAAQABAArAFAAUABQAFAAUABQAFAAUAArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQAFAAUABQACsABAAEAFAABAAEAAQABAAEAAQABAArACsABAAEACsAKwAEAAQABAArACsAUAArACsAKwArACsAKwAEACsAKwArACsAKwBQAFAAUABQAFAABAAEACsAKwAEAAQABAAEAAQABAAEACsAKwArAAQABAAEAAQABAArACsAKwArACsAKwArACsAKwArACsABAAEAAQABAAEAAQABABQAFAAUABQAA0ADQANAA0AHgBLAEsASwBLAEsASwBLAEsASwBLAA0ADQArAB4ABABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAAQABAAEAFAAUAAeAFAAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAArACsABAAEAAQABAAEAAQABAAEAAQADgANAA0AEwATAB4AHgAeAA0ADQANAA0ADQANAA0ADQANAA0ADQANAA0ADQANAFAAUABQAFAABAAEACsAKwAEAA0ADQAeAFAAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAFAAKwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAKwArACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwBcAFwADQANAA0AKgBQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAKwArAFAAKwArAFAAUABQAFAAUABQAFAAUAArAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQAKwAEAAQAKwArAAQABAAEAAQAUAAEAFAABAAEAA0ADQANACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAArACsABAAEAAQABAAEAAQABABQAA4AUAAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAFAABAAEAAQABAAOAB4ADQANAA0ADQAOAB4ABAArACsAKwArACsAKwArACsAUAAEAAQABAAEAAQABAAEAAQABAAEAAQAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAA0ADQANAFAADgAOAA4ADQANACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEACsABAAEAAQABAAEAAQABAAEAFAADQANAA0ADQANACsAKwArACsAKwArACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwAOABMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAArACsAKwAEACsABAAEACsABAAEAAQABAAEAAQABABQAAQAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAKwBQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQAKwAEAAQAKwAEAAQABAAEAAQAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAaABoAGgAaAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArACsAKwArAA0AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsADQANAA0ADQANACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAASABIAEgAQwBDAEMAUABQAFAAUABDAFAAUABQAEgAQwBIAEMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAASABDAEMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwAJAAkACQAJAAkACQAJABYAEQArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABIAEMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwANAA0AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAQABAAEAAQABAANACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAA0ADQANAB4AHgAeAB4AHgAeAFAAUABQAFAADQAeACsAKwArACsAKwArACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwArAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAANAA0AHgAeACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwAEAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArACsAKwAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAARwBHABUARwAJACsAKwArACsAKwArACsAKwArACsAKwAEAAQAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACsAKwArACsAKwArACsAKwBXAFcAVwBXAFcAVwBXAFcAVwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUQBRAFEAKwArACsAKwArACsAKwArACsAKwArACsAKwBRAFEAUQBRACsAKwArACsAKwArACsAKwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUAArACsAHgAEAAQADQAEAAQABAAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArAB4AHgAeAB4AHgAeAB4AKwArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAAQABAAEAAQABAAeAB4AHgAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAB4AHgAEAAQABAAEAAQABAAEAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQAHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwBQAFAAKwArAFAAKwArAFAAUAArACsAUABQAFAAUAArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAUAArAFAAUABQAFAAUABQAFAAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwBQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAHgAeAFAAUABQAFAAUAArAFAAKwArACsAUABQAFAAUABQAFAAUAArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeACsAKwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgAeAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgAeAB4AHgAeAB4ABAAeAB4AHgAeAB4AHgAeAB4AHgAeAAQAHgAeAA0ADQANAA0AHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAAQABAAEAAQAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAEAAQAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArAAQABAAEAAQABAAEAAQAKwAEAAQAKwAEAAQABAAEAAQAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwAEAAQABAAEAAQABAAEAFAAUABQAFAAUABQAFAAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwBQAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArABsAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwArAB4AHgAeAB4ABAAEAAQABAAEAAQABABQACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArABYAFgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAGgBQAFAAUAAaAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAKwBQACsAKwBQACsAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAKwBQACsAUAArACsAKwArACsAKwBQACsAKwArACsAUAArAFAAKwBQACsAUABQAFAAKwBQAFAAKwBQACsAKwBQACsAUAArAFAAKwBQACsAUAArAFAAUAArAFAAKwArAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQAFAAUAArAFAAUABQAFAAKwBQACsAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAUABQAFAAKwBQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8AJQAlACUAHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHgAeAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB4AHgAeACUAJQAlAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAJQAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlAB4AHgAlACUAJQAlACUAHgAlACUAJQAlACUAIAAgACAAJQAlACAAJQAlACAAIAAgACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACEAIQAhACEAIQAlACUAIAAgACUAJQAgACAAIAAgACAAIAAgACAAIAAgACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAJQAlACUAIAAlACUAJQAlACAAIAAgACUAIAAgACAAJQAlACUAJQAlACUAJQAgACUAIAAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAlAB4AJQAeACUAJQAlACUAJQAgACUAJQAlACUAHgAlAB4AHgAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAJQAlACUAJQAgACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACAAIAAgACUAJQAlACAAIAAgACAAIAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeABcAFwAXABUAFQAVAB4AHgAeAB4AJQAlACUAIAAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAgACUAJQAlACUAJQAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAgACUAJQAgACUAJQAlACUAJQAlACUAJQAgACAAIAAgACAAIAAgACAAJQAlACUAJQAlACUAIAAlACUAJQAlACUAJQAlACUAJQAgACAAIAAgACAAIAAgACAAIAAgACUAJQAgACAAIAAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAgACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAlACAAIAAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAgACAAIAAlACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAJQAlAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAKwArAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwAlACUAJQAlACUAJQAlACUAJQAlACUAVwBXACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAKwAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAA==';
-
- var LETTER_NUMBER_MODIFIER = 50;
- // Non-tailorable Line Breaking Classes
- var BK = 1; // Cause a line break (after)
- var CR$1 = 2; // Cause a line break (after), except between CR and LF
- var LF$1 = 3; // Cause a line break (after)
- var CM = 4; // Prohibit a line break between the character and the preceding character
- var NL = 5; // Cause a line break (after)
- var WJ = 7; // Prohibit line breaks before and after
- var ZW = 8; // Provide a break opportunity
- var GL = 9; // Prohibit line breaks before and after
- var SP = 10; // Enable indirect line breaks
- var ZWJ$1 = 11; // Prohibit line breaks within joiner sequences
- // Break Opportunities
- var B2 = 12; // Provide a line break opportunity before and after the character
- var BA = 13; // Generally provide a line break opportunity after the character
- var BB = 14; // Generally provide a line break opportunity before the character
- var HY = 15; // Provide a line break opportunity after the character, except in numeric context
- var CB = 16; // Provide a line break opportunity contingent on additional information
- // Characters Prohibiting Certain Breaks
- var CL = 17; // Prohibit line breaks before
- var CP = 18; // Prohibit line breaks before
- var EX = 19; // Prohibit line breaks before
- var IN = 20; // Allow only indirect line breaks between pairs
- var NS = 21; // Allow only indirect line breaks before
- var OP = 22; // Prohibit line breaks after
- var QU = 23; // Act like they are both opening and closing
- // Numeric Context
- var IS = 24; // Prevent breaks after any and before numeric
- var NU = 25; // Form numeric expressions for line breaking purposes
- var PO = 26; // Do not break following a numeric expression
- var PR = 27; // Do not break in front of a numeric expression
- var SY = 28; // Prevent a break before; and allow a break after
- // Other Characters
- var AI = 29; // Act like AL when the resolvedEAW is N; otherwise; act as ID
- var AL = 30; // Are alphabetic characters or symbols that are used with alphabetic characters
- var CJ = 31; // Treat as NS or ID for strict or normal breaking.
- var EB = 32; // Do not break from following Emoji Modifier
- var EM = 33; // Do not break from preceding Emoji Base
- var H2 = 34; // Form Korean syllable blocks
- var H3 = 35; // Form Korean syllable blocks
- var HL = 36; // Do not break around a following hyphen; otherwise act as Alphabetic
- var ID = 37; // Break before or after; except in some numeric context
- var JL = 38; // Form Korean syllable blocks
- var JV = 39; // Form Korean syllable blocks
- var JT = 40; // Form Korean syllable blocks
- var RI$1 = 41; // Keep pairs together. For pairs; break before and after other classes
- var SA = 42; // Provide a line break opportunity contingent on additional, language-specific context analysis
- var XX = 43; // Have as yet unknown line breaking behavior or unassigned code positions
- var ea_OP = [0x2329, 0xff08];
- var BREAK_MANDATORY = '!';
- var BREAK_NOT_ALLOWED$1 = '×';
- var BREAK_ALLOWED$1 = '÷';
- var UnicodeTrie$1 = createTrieFromBase64$1(base64$1);
- var ALPHABETICS = [AL, HL];
- var HARD_LINE_BREAKS = [BK, CR$1, LF$1, NL];
- var SPACE$1 = [SP, ZW];
- var PREFIX_POSTFIX = [PR, PO];
- var LINE_BREAKS = HARD_LINE_BREAKS.concat(SPACE$1);
- var KOREAN_SYLLABLE_BLOCK = [JL, JV, JT, H2, H3];
- var HYPHEN = [HY, BA];
- var codePointsToCharacterClasses = function (codePoints, lineBreak) {
- if (lineBreak === void 0) { lineBreak = 'strict'; }
- var types = [];
- var indices = [];
- var categories = [];
- codePoints.forEach(function (codePoint, index) {
- var classType = UnicodeTrie$1.get(codePoint);
- if (classType > LETTER_NUMBER_MODIFIER) {
- categories.push(true);
- classType -= LETTER_NUMBER_MODIFIER;
- }
- else {
- categories.push(false);
- }
- if (['normal', 'auto', 'loose'].indexOf(lineBreak) !== -1) {
- // U+2010, – U+2013, 〜 U+301C, ゠ U+30A0
- if ([0x2010, 0x2013, 0x301c, 0x30a0].indexOf(codePoint) !== -1) {
- indices.push(index);
- return types.push(CB);
- }
- }
- if (classType === CM || classType === ZWJ$1) {
- // LB10 Treat any remaining combining mark or ZWJ as AL.
- if (index === 0) {
- indices.push(index);
- return types.push(AL);
- }
- // LB9 Do not break a combining character sequence; treat it as if it has the line breaking class of
- // the base character in all of the following rules. Treat ZWJ as if it were CM.
- var prev = types[index - 1];
- if (LINE_BREAKS.indexOf(prev) === -1) {
- indices.push(indices[index - 1]);
- return types.push(prev);
- }
- indices.push(index);
- return types.push(AL);
- }
- indices.push(index);
- if (classType === CJ) {
- return types.push(lineBreak === 'strict' ? NS : ID);
- }
- if (classType === SA) {
- return types.push(AL);
- }
- if (classType === AI) {
- return types.push(AL);
- }
- // For supplementary characters, a useful default is to treat characters in the range 10000..1FFFD as AL
- // and characters in the ranges 20000..2FFFD and 30000..3FFFD as ID, until the implementation can be revised
- // to take into account the actual line breaking properties for these characters.
- if (classType === XX) {
- if ((codePoint >= 0x20000 && codePoint <= 0x2fffd) || (codePoint >= 0x30000 && codePoint <= 0x3fffd)) {
- return types.push(ID);
- }
- else {
- return types.push(AL);
- }
- }
- types.push(classType);
- });
- return [indices, types, categories];
- };
- var isAdjacentWithSpaceIgnored = function (a, b, currentIndex, classTypes) {
- var current = classTypes[currentIndex];
- if (Array.isArray(a) ? a.indexOf(current) !== -1 : a === current) {
- var i = currentIndex;
- while (i <= classTypes.length) {
- i++;
- var next = classTypes[i];
- if (next === b) {
- return true;
- }
- if (next !== SP) {
- break;
- }
- }
- }
- if (current === SP) {
- var i = currentIndex;
- while (i > 0) {
- i--;
- var prev = classTypes[i];
- if (Array.isArray(a) ? a.indexOf(prev) !== -1 : a === prev) {
- var n = currentIndex;
- while (n <= classTypes.length) {
- n++;
- var next = classTypes[n];
- if (next === b) {
- return true;
- }
- if (next !== SP) {
- break;
- }
- }
- }
- if (prev !== SP) {
- break;
- }
- }
- }
- return false;
- };
- var previousNonSpaceClassType = function (currentIndex, classTypes) {
- var i = currentIndex;
- while (i >= 0) {
- var type = classTypes[i];
- if (type === SP) {
- i--;
- }
- else {
- return type;
- }
- }
- return 0;
- };
- var _lineBreakAtIndex = function (codePoints, classTypes, indicies, index, forbiddenBreaks) {
- if (indicies[index] === 0) {
- return BREAK_NOT_ALLOWED$1;
- }
- var currentIndex = index - 1;
- if (Array.isArray(forbiddenBreaks) && forbiddenBreaks[currentIndex] === true) {
- return BREAK_NOT_ALLOWED$1;
- }
- var beforeIndex = currentIndex - 1;
- var afterIndex = currentIndex + 1;
- var current = classTypes[currentIndex];
- // LB4 Always break after hard line breaks.
- // LB5 Treat CR followed by LF, as well as CR, LF, and NL as hard line breaks.
- var before = beforeIndex >= 0 ? classTypes[beforeIndex] : 0;
- var next = classTypes[afterIndex];
- if (current === CR$1 && next === LF$1) {
- return BREAK_NOT_ALLOWED$1;
- }
- if (HARD_LINE_BREAKS.indexOf(current) !== -1) {
- return BREAK_MANDATORY;
- }
- // LB6 Do not break before hard line breaks.
- if (HARD_LINE_BREAKS.indexOf(next) !== -1) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB7 Do not break before spaces or zero width space.
- if (SPACE$1.indexOf(next) !== -1) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB8 Break before any character following a zero-width space, even if one or more spaces intervene.
- if (previousNonSpaceClassType(currentIndex, classTypes) === ZW) {
- return BREAK_ALLOWED$1;
- }
- // LB8a Do not break after a zero width joiner.
- if (UnicodeTrie$1.get(codePoints[currentIndex]) === ZWJ$1) {
- return BREAK_NOT_ALLOWED$1;
- }
- // zwj emojis
- if ((current === EB || current === EM) && UnicodeTrie$1.get(codePoints[afterIndex]) === ZWJ$1) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB11 Do not break before or after Word joiner and related characters.
- if (current === WJ || next === WJ) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB12 Do not break after NBSP and related characters.
- if (current === GL) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB12a Do not break before NBSP and related characters, except after spaces and hyphens.
- if ([SP, BA, HY].indexOf(current) === -1 && next === GL) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB13 Do not break before ‘]’ or ‘!’ or ‘;’ or ‘/’, even after spaces.
- if ([CL, CP, EX, IS, SY].indexOf(next) !== -1) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB14 Do not break after ‘[’, even after spaces.
- if (previousNonSpaceClassType(currentIndex, classTypes) === OP) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB15 Do not break within ‘”[’, even with intervening spaces.
- if (isAdjacentWithSpaceIgnored(QU, OP, currentIndex, classTypes)) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB16 Do not break between closing punctuation and a nonstarter (lb=NS), even with intervening spaces.
- if (isAdjacentWithSpaceIgnored([CL, CP], NS, currentIndex, classTypes)) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB17 Do not break within ‘——’, even with intervening spaces.
- if (isAdjacentWithSpaceIgnored(B2, B2, currentIndex, classTypes)) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB18 Break after spaces.
- if (current === SP) {
- return BREAK_ALLOWED$1;
- }
- // LB19 Do not break before or after quotation marks, such as ‘ ” ’.
- if (current === QU || next === QU) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB20 Break before and after unresolved CB.
- if (next === CB || current === CB) {
- return BREAK_ALLOWED$1;
- }
- // LB21 Do not break before hyphen-minus, other hyphens, fixed-width spaces, small kana, and other non-starters, or after acute accents.
- if ([BA, HY, NS].indexOf(next) !== -1 || current === BB) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB21a Don't break after Hebrew + Hyphen.
- if (before === HL && HYPHEN.indexOf(current) !== -1) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB21b Don’t break between Solidus and Hebrew letters.
- if (current === SY && next === HL) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB22 Do not break before ellipsis.
- if (next === IN) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB23 Do not break between digits and letters.
- if ((ALPHABETICS.indexOf(next) !== -1 && current === NU) || (ALPHABETICS.indexOf(current) !== -1 && next === NU)) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB23a Do not break between numeric prefixes and ideographs, or between ideographs and numeric postfixes.
- if ((current === PR && [ID, EB, EM].indexOf(next) !== -1) ||
- ([ID, EB, EM].indexOf(current) !== -1 && next === PO)) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB24 Do not break between numeric prefix/postfix and letters, or between letters and prefix/postfix.
- if ((ALPHABETICS.indexOf(current) !== -1 && PREFIX_POSTFIX.indexOf(next) !== -1) ||
- (PREFIX_POSTFIX.indexOf(current) !== -1 && ALPHABETICS.indexOf(next) !== -1)) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB25 Do not break between the following pairs of classes relevant to numbers:
- if (
- // (PR | PO) × ( OP | HY )? NU
- ([PR, PO].indexOf(current) !== -1 &&
- (next === NU || ([OP, HY].indexOf(next) !== -1 && classTypes[afterIndex + 1] === NU))) ||
- // ( OP | HY ) × NU
- ([OP, HY].indexOf(current) !== -1 && next === NU) ||
- // NU × (NU | SY | IS)
- (current === NU && [NU, SY, IS].indexOf(next) !== -1)) {
- return BREAK_NOT_ALLOWED$1;
- }
- // NU (NU | SY | IS)* × (NU | SY | IS | CL | CP)
- if ([NU, SY, IS, CL, CP].indexOf(next) !== -1) {
- var prevIndex = currentIndex;
- while (prevIndex >= 0) {
- var type = classTypes[prevIndex];
- if (type === NU) {
- return BREAK_NOT_ALLOWED$1;
- }
- else if ([SY, IS].indexOf(type) !== -1) {
- prevIndex--;
- }
- else {
- break;
- }
- }
- }
- // NU (NU | SY | IS)* (CL | CP)? × (PO | PR))
- if ([PR, PO].indexOf(next) !== -1) {
- var prevIndex = [CL, CP].indexOf(current) !== -1 ? beforeIndex : currentIndex;
- while (prevIndex >= 0) {
- var type = classTypes[prevIndex];
- if (type === NU) {
- return BREAK_NOT_ALLOWED$1;
- }
- else if ([SY, IS].indexOf(type) !== -1) {
- prevIndex--;
- }
- else {
- break;
- }
- }
- }
- // LB26 Do not break a Korean syllable.
- if ((JL === current && [JL, JV, H2, H3].indexOf(next) !== -1) ||
- ([JV, H2].indexOf(current) !== -1 && [JV, JT].indexOf(next) !== -1) ||
- ([JT, H3].indexOf(current) !== -1 && next === JT)) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB27 Treat a Korean Syllable Block the same as ID.
- if ((KOREAN_SYLLABLE_BLOCK.indexOf(current) !== -1 && [IN, PO].indexOf(next) !== -1) ||
- (KOREAN_SYLLABLE_BLOCK.indexOf(next) !== -1 && current === PR)) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB28 Do not break between alphabetics (“at”).
- if (ALPHABETICS.indexOf(current) !== -1 && ALPHABETICS.indexOf(next) !== -1) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB29 Do not break between numeric punctuation and alphabetics (“e.g.”).
- if (current === IS && ALPHABETICS.indexOf(next) !== -1) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB30 Do not break between letters, numbers, or ordinary symbols and opening or closing parentheses.
- if ((ALPHABETICS.concat(NU).indexOf(current) !== -1 &&
- next === OP &&
- ea_OP.indexOf(codePoints[afterIndex]) === -1) ||
- (ALPHABETICS.concat(NU).indexOf(next) !== -1 && current === CP)) {
- return BREAK_NOT_ALLOWED$1;
- }
- // LB30a Break between two regional indicator symbols if and only if there are an even number of regional
- // indicators preceding the position of the break.
- if (current === RI$1 && next === RI$1) {
- var i = indicies[currentIndex];
- var count = 1;
- while (i > 0) {
- i--;
- if (classTypes[i] === RI$1) {
- count++;
- }
- else {
- break;
- }
- }
- if (count % 2 !== 0) {
- return BREAK_NOT_ALLOWED$1;
- }
- }
- // LB30b Do not break between an emoji base and an emoji modifier.
- if (current === EB && next === EM) {
- return BREAK_NOT_ALLOWED$1;
- }
- return BREAK_ALLOWED$1;
- };
- var cssFormattedClasses = function (codePoints, options) {
- if (!options) {
- options = { lineBreak: 'normal', wordBreak: 'normal' };
- }
- var _a = codePointsToCharacterClasses(codePoints, options.lineBreak), indicies = _a[0], classTypes = _a[1], isLetterNumber = _a[2];
- if (options.wordBreak === 'break-all' || options.wordBreak === 'break-word') {
- classTypes = classTypes.map(function (type) { return ([NU, AL, SA].indexOf(type) !== -1 ? ID : type); });
- }
- var forbiddenBreakpoints = options.wordBreak === 'keep-all'
- ? isLetterNumber.map(function (letterNumber, i) {
- return letterNumber && codePoints[i] >= 0x4e00 && codePoints[i] <= 0x9fff;
- })
- : undefined;
- return [indicies, classTypes, forbiddenBreakpoints];
- };
- var Break = /** @class */ (function () {
- function Break(codePoints, lineBreak, start, end) {
- this.codePoints = codePoints;
- this.required = lineBreak === BREAK_MANDATORY;
- this.start = start;
- this.end = end;
- }
- Break.prototype.slice = function () {
- return fromCodePoint$1.apply(void 0, this.codePoints.slice(this.start, this.end));
- };
- return Break;
- }());
- var LineBreaker = function (str, options) {
- var codePoints = toCodePoints$1(str);
- var _a = cssFormattedClasses(codePoints, options), indicies = _a[0], classTypes = _a[1], forbiddenBreakpoints = _a[2];
- var length = codePoints.length;
- var lastEnd = 0;
- var nextIndex = 0;
- return {
- next: function () {
- if (nextIndex >= length) {
- return { done: true, value: null };
- }
- var lineBreak = BREAK_NOT_ALLOWED$1;
- while (nextIndex < length &&
- (lineBreak = _lineBreakAtIndex(codePoints, classTypes, indicies, ++nextIndex, forbiddenBreakpoints)) ===
- BREAK_NOT_ALLOWED$1) { }
- if (lineBreak !== BREAK_NOT_ALLOWED$1 || nextIndex === length) {
- var value = new Break(codePoints, lineBreak, lastEnd, nextIndex);
- lastEnd = nextIndex;
- return { value: value, done: false };
- }
- return { done: true, value: null };
- },
- };
- };
-
- // https://www.w3.org/TR/css-syntax-3
- var FLAG_UNRESTRICTED = 1 << 0;
- var FLAG_ID = 1 << 1;
- var FLAG_INTEGER = 1 << 2;
- var FLAG_NUMBER = 1 << 3;
- var LINE_FEED = 0x000a;
- var SOLIDUS = 0x002f;
- var REVERSE_SOLIDUS = 0x005c;
- var CHARACTER_TABULATION = 0x0009;
- var SPACE = 0x0020;
- var QUOTATION_MARK = 0x0022;
- var EQUALS_SIGN = 0x003d;
- var NUMBER_SIGN = 0x0023;
- var DOLLAR_SIGN = 0x0024;
- var PERCENTAGE_SIGN = 0x0025;
- var APOSTROPHE = 0x0027;
- var LEFT_PARENTHESIS = 0x0028;
- var RIGHT_PARENTHESIS = 0x0029;
- var LOW_LINE = 0x005f;
- var HYPHEN_MINUS = 0x002d;
- var EXCLAMATION_MARK = 0x0021;
- var LESS_THAN_SIGN = 0x003c;
- var GREATER_THAN_SIGN = 0x003e;
- var COMMERCIAL_AT = 0x0040;
- var LEFT_SQUARE_BRACKET = 0x005b;
- var RIGHT_SQUARE_BRACKET = 0x005d;
- var CIRCUMFLEX_ACCENT = 0x003d;
- var LEFT_CURLY_BRACKET = 0x007b;
- var QUESTION_MARK = 0x003f;
- var RIGHT_CURLY_BRACKET = 0x007d;
- var VERTICAL_LINE = 0x007c;
- var TILDE = 0x007e;
- var CONTROL = 0x0080;
- var REPLACEMENT_CHARACTER = 0xfffd;
- var ASTERISK = 0x002a;
- var PLUS_SIGN = 0x002b;
- var COMMA = 0x002c;
- var COLON = 0x003a;
- var SEMICOLON = 0x003b;
- var FULL_STOP = 0x002e;
- var NULL = 0x0000;
- var BACKSPACE = 0x0008;
- var LINE_TABULATION = 0x000b;
- var SHIFT_OUT = 0x000e;
- var INFORMATION_SEPARATOR_ONE = 0x001f;
- var DELETE = 0x007f;
- var EOF = -1;
- var ZERO = 0x0030;
- var a = 0x0061;
- var e = 0x0065;
- var f = 0x0066;
- var u = 0x0075;
- var z = 0x007a;
- var A = 0x0041;
- var E = 0x0045;
- var F = 0x0046;
- var U = 0x0055;
- var Z = 0x005a;
- var isDigit = function (codePoint) { return codePoint >= ZERO && codePoint <= 0x0039; };
- var isSurrogateCodePoint = function (codePoint) { return codePoint >= 0xd800 && codePoint <= 0xdfff; };
- var isHex = function (codePoint) {
- return isDigit(codePoint) || (codePoint >= A && codePoint <= F) || (codePoint >= a && codePoint <= f);
- };
- var isLowerCaseLetter = function (codePoint) { return codePoint >= a && codePoint <= z; };
- var isUpperCaseLetter = function (codePoint) { return codePoint >= A && codePoint <= Z; };
- var isLetter = function (codePoint) { return isLowerCaseLetter(codePoint) || isUpperCaseLetter(codePoint); };
- var isNonASCIICodePoint = function (codePoint) { return codePoint >= CONTROL; };
- var isWhiteSpace = function (codePoint) {
- return codePoint === LINE_FEED || codePoint === CHARACTER_TABULATION || codePoint === SPACE;
- };
- var isNameStartCodePoint = function (codePoint) {
- return isLetter(codePoint) || isNonASCIICodePoint(codePoint) || codePoint === LOW_LINE;
- };
- var isNameCodePoint = function (codePoint) {
- return isNameStartCodePoint(codePoint) || isDigit(codePoint) || codePoint === HYPHEN_MINUS;
- };
- var isNonPrintableCodePoint = function (codePoint) {
- return ((codePoint >= NULL && codePoint <= BACKSPACE) ||
- codePoint === LINE_TABULATION ||
- (codePoint >= SHIFT_OUT && codePoint <= INFORMATION_SEPARATOR_ONE) ||
- codePoint === DELETE);
- };
- var isValidEscape = function (c1, c2) {
- if (c1 !== REVERSE_SOLIDUS) {
- return false;
- }
- return c2 !== LINE_FEED;
- };
- var isIdentifierStart = function (c1, c2, c3) {
- if (c1 === HYPHEN_MINUS) {
- return isNameStartCodePoint(c2) || isValidEscape(c2, c3);
- }
- else if (isNameStartCodePoint(c1)) {
- return true;
- }
- else if (c1 === REVERSE_SOLIDUS && isValidEscape(c1, c2)) {
- return true;
- }
- return false;
- };
- var isNumberStart = function (c1, c2, c3) {
- if (c1 === PLUS_SIGN || c1 === HYPHEN_MINUS) {
- if (isDigit(c2)) {
- return true;
- }
- return c2 === FULL_STOP && isDigit(c3);
- }
- if (c1 === FULL_STOP) {
- return isDigit(c2);
- }
- return isDigit(c1);
- };
- var stringToNumber = function (codePoints) {
- var c = 0;
- var sign = 1;
- if (codePoints[c] === PLUS_SIGN || codePoints[c] === HYPHEN_MINUS) {
- if (codePoints[c] === HYPHEN_MINUS) {
- sign = -1;
- }
- c++;
- }
- var integers = [];
- while (isDigit(codePoints[c])) {
- integers.push(codePoints[c++]);
- }
- var int = integers.length ? parseInt(fromCodePoint$1.apply(void 0, integers), 10) : 0;
- if (codePoints[c] === FULL_STOP) {
- c++;
- }
- var fraction = [];
- while (isDigit(codePoints[c])) {
- fraction.push(codePoints[c++]);
- }
- var fracd = fraction.length;
- var frac = fracd ? parseInt(fromCodePoint$1.apply(void 0, fraction), 10) : 0;
- if (codePoints[c] === E || codePoints[c] === e) {
- c++;
- }
- var expsign = 1;
- if (codePoints[c] === PLUS_SIGN || codePoints[c] === HYPHEN_MINUS) {
- if (codePoints[c] === HYPHEN_MINUS) {
- expsign = -1;
- }
- c++;
- }
- var exponent = [];
- while (isDigit(codePoints[c])) {
- exponent.push(codePoints[c++]);
- }
- var exp = exponent.length ? parseInt(fromCodePoint$1.apply(void 0, exponent), 10) : 0;
- return sign * (int + frac * Math.pow(10, -fracd)) * Math.pow(10, expsign * exp);
- };
- var LEFT_PARENTHESIS_TOKEN = {
- type: 2 /* LEFT_PARENTHESIS_TOKEN */
- };
- var RIGHT_PARENTHESIS_TOKEN = {
- type: 3 /* RIGHT_PARENTHESIS_TOKEN */
- };
- var COMMA_TOKEN = { type: 4 /* COMMA_TOKEN */ };
- var SUFFIX_MATCH_TOKEN = { type: 13 /* SUFFIX_MATCH_TOKEN */ };
- var PREFIX_MATCH_TOKEN = { type: 8 /* PREFIX_MATCH_TOKEN */ };
- var COLUMN_TOKEN = { type: 21 /* COLUMN_TOKEN */ };
- var DASH_MATCH_TOKEN = { type: 9 /* DASH_MATCH_TOKEN */ };
- var INCLUDE_MATCH_TOKEN = { type: 10 /* INCLUDE_MATCH_TOKEN */ };
- var LEFT_CURLY_BRACKET_TOKEN = {
- type: 11 /* LEFT_CURLY_BRACKET_TOKEN */
- };
- var RIGHT_CURLY_BRACKET_TOKEN = {
- type: 12 /* RIGHT_CURLY_BRACKET_TOKEN */
- };
- var SUBSTRING_MATCH_TOKEN = { type: 14 /* SUBSTRING_MATCH_TOKEN */ };
- var BAD_URL_TOKEN = { type: 23 /* BAD_URL_TOKEN */ };
- var BAD_STRING_TOKEN = { type: 1 /* BAD_STRING_TOKEN */ };
- var CDO_TOKEN = { type: 25 /* CDO_TOKEN */ };
- var CDC_TOKEN = { type: 24 /* CDC_TOKEN */ };
- var COLON_TOKEN = { type: 26 /* COLON_TOKEN */ };
- var SEMICOLON_TOKEN = { type: 27 /* SEMICOLON_TOKEN */ };
- var LEFT_SQUARE_BRACKET_TOKEN = {
- type: 28 /* LEFT_SQUARE_BRACKET_TOKEN */
- };
- var RIGHT_SQUARE_BRACKET_TOKEN = {
- type: 29 /* RIGHT_SQUARE_BRACKET_TOKEN */
- };
- var WHITESPACE_TOKEN = { type: 31 /* WHITESPACE_TOKEN */ };
- var EOF_TOKEN = { type: 32 /* EOF_TOKEN */ };
- var Tokenizer = /** @class */ (function () {
- function Tokenizer() {
- this._value = [];
- }
- Tokenizer.prototype.write = function (chunk) {
- this._value = this._value.concat(toCodePoints$1(chunk));
- };
- Tokenizer.prototype.read = function () {
- var tokens = [];
- var token = this.consumeToken();
- while (token !== EOF_TOKEN) {
- tokens.push(token);
- token = this.consumeToken();
- }
- return tokens;
- };
- Tokenizer.prototype.consumeToken = function () {
- var codePoint = this.consumeCodePoint();
- switch (codePoint) {
- case QUOTATION_MARK:
- return this.consumeStringToken(QUOTATION_MARK);
- case NUMBER_SIGN:
- var c1 = this.peekCodePoint(0);
- var c2 = this.peekCodePoint(1);
- var c3 = this.peekCodePoint(2);
- if (isNameCodePoint(c1) || isValidEscape(c2, c3)) {
- var flags = isIdentifierStart(c1, c2, c3) ? FLAG_ID : FLAG_UNRESTRICTED;
- var value = this.consumeName();
- return { type: 5 /* HASH_TOKEN */, value: value, flags: flags };
- }
- break;
- case DOLLAR_SIGN:
- if (this.peekCodePoint(0) === EQUALS_SIGN) {
- this.consumeCodePoint();
- return SUFFIX_MATCH_TOKEN;
- }
- break;
- case APOSTROPHE:
- return this.consumeStringToken(APOSTROPHE);
- case LEFT_PARENTHESIS:
- return LEFT_PARENTHESIS_TOKEN;
- case RIGHT_PARENTHESIS:
- return RIGHT_PARENTHESIS_TOKEN;
- case ASTERISK:
- if (this.peekCodePoint(0) === EQUALS_SIGN) {
- this.consumeCodePoint();
- return SUBSTRING_MATCH_TOKEN;
- }
- break;
- case PLUS_SIGN:
- if (isNumberStart(codePoint, this.peekCodePoint(0), this.peekCodePoint(1))) {
- this.reconsumeCodePoint(codePoint);
- return this.consumeNumericToken();
- }
- break;
- case COMMA:
- return COMMA_TOKEN;
- case HYPHEN_MINUS:
- var e1 = codePoint;
- var e2 = this.peekCodePoint(0);
- var e3 = this.peekCodePoint(1);
- if (isNumberStart(e1, e2, e3)) {
- this.reconsumeCodePoint(codePoint);
- return this.consumeNumericToken();
- }
- if (isIdentifierStart(e1, e2, e3)) {
- this.reconsumeCodePoint(codePoint);
- return this.consumeIdentLikeToken();
- }
- if (e2 === HYPHEN_MINUS && e3 === GREATER_THAN_SIGN) {
- this.consumeCodePoint();
- this.consumeCodePoint();
- return CDC_TOKEN;
- }
- break;
- case FULL_STOP:
- if (isNumberStart(codePoint, this.peekCodePoint(0), this.peekCodePoint(1))) {
- this.reconsumeCodePoint(codePoint);
- return this.consumeNumericToken();
- }
- break;
- case SOLIDUS:
- if (this.peekCodePoint(0) === ASTERISK) {
- this.consumeCodePoint();
- while (true) {
- var c = this.consumeCodePoint();
- if (c === ASTERISK) {
- c = this.consumeCodePoint();
- if (c === SOLIDUS) {
- return this.consumeToken();
- }
- }
- if (c === EOF) {
- return this.consumeToken();
- }
- }
- }
- break;
- case COLON:
- return COLON_TOKEN;
- case SEMICOLON:
- return SEMICOLON_TOKEN;
- case LESS_THAN_SIGN:
- if (this.peekCodePoint(0) === EXCLAMATION_MARK &&
- this.peekCodePoint(1) === HYPHEN_MINUS &&
- this.peekCodePoint(2) === HYPHEN_MINUS) {
- this.consumeCodePoint();
- this.consumeCodePoint();
- return CDO_TOKEN;
- }
- break;
- case COMMERCIAL_AT:
- var a1 = this.peekCodePoint(0);
- var a2 = this.peekCodePoint(1);
- var a3 = this.peekCodePoint(2);
- if (isIdentifierStart(a1, a2, a3)) {
- var value = this.consumeName();
- return { type: 7 /* AT_KEYWORD_TOKEN */, value: value };
- }
- break;
- case LEFT_SQUARE_BRACKET:
- return LEFT_SQUARE_BRACKET_TOKEN;
- case REVERSE_SOLIDUS:
- if (isValidEscape(codePoint, this.peekCodePoint(0))) {
- this.reconsumeCodePoint(codePoint);
- return this.consumeIdentLikeToken();
- }
- break;
- case RIGHT_SQUARE_BRACKET:
- return RIGHT_SQUARE_BRACKET_TOKEN;
- case CIRCUMFLEX_ACCENT:
- if (this.peekCodePoint(0) === EQUALS_SIGN) {
- this.consumeCodePoint();
- return PREFIX_MATCH_TOKEN;
- }
- break;
- case LEFT_CURLY_BRACKET:
- return LEFT_CURLY_BRACKET_TOKEN;
- case RIGHT_CURLY_BRACKET:
- return RIGHT_CURLY_BRACKET_TOKEN;
- case u:
- case U:
- var u1 = this.peekCodePoint(0);
- var u2 = this.peekCodePoint(1);
- if (u1 === PLUS_SIGN && (isHex(u2) || u2 === QUESTION_MARK)) {
- this.consumeCodePoint();
- this.consumeUnicodeRangeToken();
- }
- this.reconsumeCodePoint(codePoint);
- return this.consumeIdentLikeToken();
- case VERTICAL_LINE:
- if (this.peekCodePoint(0) === EQUALS_SIGN) {
- this.consumeCodePoint();
- return DASH_MATCH_TOKEN;
- }
- if (this.peekCodePoint(0) === VERTICAL_LINE) {
- this.consumeCodePoint();
- return COLUMN_TOKEN;
- }
- break;
- case TILDE:
- if (this.peekCodePoint(0) === EQUALS_SIGN) {
- this.consumeCodePoint();
- return INCLUDE_MATCH_TOKEN;
- }
- break;
- case EOF:
- return EOF_TOKEN;
- }
- if (isWhiteSpace(codePoint)) {
- this.consumeWhiteSpace();
- return WHITESPACE_TOKEN;
- }
- if (isDigit(codePoint)) {
- this.reconsumeCodePoint(codePoint);
- return this.consumeNumericToken();
- }
- if (isNameStartCodePoint(codePoint)) {
- this.reconsumeCodePoint(codePoint);
- return this.consumeIdentLikeToken();
- }
- return { type: 6 /* DELIM_TOKEN */, value: fromCodePoint$1(codePoint) };
- };
- Tokenizer.prototype.consumeCodePoint = function () {
- var value = this._value.shift();
- return typeof value === 'undefined' ? -1 : value;
- };
- Tokenizer.prototype.reconsumeCodePoint = function (codePoint) {
- this._value.unshift(codePoint);
- };
- Tokenizer.prototype.peekCodePoint = function (delta) {
- if (delta >= this._value.length) {
- return -1;
- }
- return this._value[delta];
- };
- Tokenizer.prototype.consumeUnicodeRangeToken = function () {
- var digits = [];
- var codePoint = this.consumeCodePoint();
- while (isHex(codePoint) && digits.length < 6) {
- digits.push(codePoint);
- codePoint = this.consumeCodePoint();
- }
- var questionMarks = false;
- while (codePoint === QUESTION_MARK && digits.length < 6) {
- digits.push(codePoint);
- codePoint = this.consumeCodePoint();
- questionMarks = true;
- }
- if (questionMarks) {
- var start_1 = parseInt(fromCodePoint$1.apply(void 0, digits.map(function (digit) { return (digit === QUESTION_MARK ? ZERO : digit); })), 16);
- var end = parseInt(fromCodePoint$1.apply(void 0, digits.map(function (digit) { return (digit === QUESTION_MARK ? F : digit); })), 16);
- return { type: 30 /* UNICODE_RANGE_TOKEN */, start: start_1, end: end };
- }
- var start = parseInt(fromCodePoint$1.apply(void 0, digits), 16);
- if (this.peekCodePoint(0) === HYPHEN_MINUS && isHex(this.peekCodePoint(1))) {
- this.consumeCodePoint();
- codePoint = this.consumeCodePoint();
- var endDigits = [];
- while (isHex(codePoint) && endDigits.length < 6) {
- endDigits.push(codePoint);
- codePoint = this.consumeCodePoint();
- }
- var end = parseInt(fromCodePoint$1.apply(void 0, endDigits), 16);
- return { type: 30 /* UNICODE_RANGE_TOKEN */, start: start, end: end };
- }
- else {
- return { type: 30 /* UNICODE_RANGE_TOKEN */, start: start, end: start };
- }
- };
- Tokenizer.prototype.consumeIdentLikeToken = function () {
- var value = this.consumeName();
- if (value.toLowerCase() === 'url' && this.peekCodePoint(0) === LEFT_PARENTHESIS) {
- this.consumeCodePoint();
- return this.consumeUrlToken();
- }
- else if (this.peekCodePoint(0) === LEFT_PARENTHESIS) {
- this.consumeCodePoint();
- return { type: 19 /* FUNCTION_TOKEN */, value: value };
- }
- return { type: 20 /* IDENT_TOKEN */, value: value };
- };
- Tokenizer.prototype.consumeUrlToken = function () {
- var value = [];
- this.consumeWhiteSpace();
- if (this.peekCodePoint(0) === EOF) {
- return { type: 22 /* URL_TOKEN */, value: '' };
- }
- var next = this.peekCodePoint(0);
- if (next === APOSTROPHE || next === QUOTATION_MARK) {
- var stringToken = this.consumeStringToken(this.consumeCodePoint());
- if (stringToken.type === 0 /* STRING_TOKEN */) {
- this.consumeWhiteSpace();
- if (this.peekCodePoint(0) === EOF || this.peekCodePoint(0) === RIGHT_PARENTHESIS) {
- this.consumeCodePoint();
- return { type: 22 /* URL_TOKEN */, value: stringToken.value };
- }
- }
- this.consumeBadUrlRemnants();
- return BAD_URL_TOKEN;
- }
- while (true) {
- var codePoint = this.consumeCodePoint();
- if (codePoint === EOF || codePoint === RIGHT_PARENTHESIS) {
- return { type: 22 /* URL_TOKEN */, value: fromCodePoint$1.apply(void 0, value) };
- }
- else if (isWhiteSpace(codePoint)) {
- this.consumeWhiteSpace();
- if (this.peekCodePoint(0) === EOF || this.peekCodePoint(0) === RIGHT_PARENTHESIS) {
- this.consumeCodePoint();
- return { type: 22 /* URL_TOKEN */, value: fromCodePoint$1.apply(void 0, value) };
- }
- this.consumeBadUrlRemnants();
- return BAD_URL_TOKEN;
- }
- else if (codePoint === QUOTATION_MARK ||
- codePoint === APOSTROPHE ||
- codePoint === LEFT_PARENTHESIS ||
- isNonPrintableCodePoint(codePoint)) {
- this.consumeBadUrlRemnants();
- return BAD_URL_TOKEN;
- }
- else if (codePoint === REVERSE_SOLIDUS) {
- if (isValidEscape(codePoint, this.peekCodePoint(0))) {
- value.push(this.consumeEscapedCodePoint());
- }
- else {
- this.consumeBadUrlRemnants();
- return BAD_URL_TOKEN;
- }
- }
- else {
- value.push(codePoint);
- }
- }
- };
- Tokenizer.prototype.consumeWhiteSpace = function () {
- while (isWhiteSpace(this.peekCodePoint(0))) {
- this.consumeCodePoint();
- }
- };
- Tokenizer.prototype.consumeBadUrlRemnants = function () {
- while (true) {
- var codePoint = this.consumeCodePoint();
- if (codePoint === RIGHT_PARENTHESIS || codePoint === EOF) {
- return;
- }
- if (isValidEscape(codePoint, this.peekCodePoint(0))) {
- this.consumeEscapedCodePoint();
- }
- }
- };
- Tokenizer.prototype.consumeStringSlice = function (count) {
- var SLICE_STACK_SIZE = 50000;
- var value = '';
- while (count > 0) {
- var amount = Math.min(SLICE_STACK_SIZE, count);
- value += fromCodePoint$1.apply(void 0, this._value.splice(0, amount));
- count -= amount;
- }
- this._value.shift();
- return value;
- };
- Tokenizer.prototype.consumeStringToken = function (endingCodePoint) {
- var value = '';
- var i = 0;
- do {
- var codePoint = this._value[i];
- if (codePoint === EOF || codePoint === undefined || codePoint === endingCodePoint) {
- value += this.consumeStringSlice(i);
- return { type: 0 /* STRING_TOKEN */, value: value };
- }
- if (codePoint === LINE_FEED) {
- this._value.splice(0, i);
- return BAD_STRING_TOKEN;
- }
- if (codePoint === REVERSE_SOLIDUS) {
- var next = this._value[i + 1];
- if (next !== EOF && next !== undefined) {
- if (next === LINE_FEED) {
- value += this.consumeStringSlice(i);
- i = -1;
- this._value.shift();
- }
- else if (isValidEscape(codePoint, next)) {
- value += this.consumeStringSlice(i);
- value += fromCodePoint$1(this.consumeEscapedCodePoint());
- i = -1;
- }
- }
- }
- i++;
- } while (true);
- };
- Tokenizer.prototype.consumeNumber = function () {
- var repr = [];
- var type = FLAG_INTEGER;
- var c1 = this.peekCodePoint(0);
- if (c1 === PLUS_SIGN || c1 === HYPHEN_MINUS) {
- repr.push(this.consumeCodePoint());
- }
- while (isDigit(this.peekCodePoint(0))) {
- repr.push(this.consumeCodePoint());
- }
- c1 = this.peekCodePoint(0);
- var c2 = this.peekCodePoint(1);
- if (c1 === FULL_STOP && isDigit(c2)) {
- repr.push(this.consumeCodePoint(), this.consumeCodePoint());
- type = FLAG_NUMBER;
- while (isDigit(this.peekCodePoint(0))) {
- repr.push(this.consumeCodePoint());
- }
- }
- c1 = this.peekCodePoint(0);
- c2 = this.peekCodePoint(1);
- var c3 = this.peekCodePoint(2);
- if ((c1 === E || c1 === e) && (((c2 === PLUS_SIGN || c2 === HYPHEN_MINUS) && isDigit(c3)) || isDigit(c2))) {
- repr.push(this.consumeCodePoint(), this.consumeCodePoint());
- type = FLAG_NUMBER;
- while (isDigit(this.peekCodePoint(0))) {
- repr.push(this.consumeCodePoint());
- }
- }
- return [stringToNumber(repr), type];
- };
- Tokenizer.prototype.consumeNumericToken = function () {
- var _a = this.consumeNumber(), number = _a[0], flags = _a[1];
- var c1 = this.peekCodePoint(0);
- var c2 = this.peekCodePoint(1);
- var c3 = this.peekCodePoint(2);
- if (isIdentifierStart(c1, c2, c3)) {
- var unit = this.consumeName();
- return { type: 15 /* DIMENSION_TOKEN */, number: number, flags: flags, unit: unit };
- }
- if (c1 === PERCENTAGE_SIGN) {
- this.consumeCodePoint();
- return { type: 16 /* PERCENTAGE_TOKEN */, number: number, flags: flags };
- }
- return { type: 17 /* NUMBER_TOKEN */, number: number, flags: flags };
- };
- Tokenizer.prototype.consumeEscapedCodePoint = function () {
- var codePoint = this.consumeCodePoint();
- if (isHex(codePoint)) {
- var hex = fromCodePoint$1(codePoint);
- while (isHex(this.peekCodePoint(0)) && hex.length < 6) {
- hex += fromCodePoint$1(this.consumeCodePoint());
- }
- if (isWhiteSpace(this.peekCodePoint(0))) {
- this.consumeCodePoint();
- }
- var hexCodePoint = parseInt(hex, 16);
- if (hexCodePoint === 0 || isSurrogateCodePoint(hexCodePoint) || hexCodePoint > 0x10ffff) {
- return REPLACEMENT_CHARACTER;
- }
- return hexCodePoint;
- }
- if (codePoint === EOF) {
- return REPLACEMENT_CHARACTER;
- }
- return codePoint;
- };
- Tokenizer.prototype.consumeName = function () {
- var result = '';
- while (true) {
- var codePoint = this.consumeCodePoint();
- if (isNameCodePoint(codePoint)) {
- result += fromCodePoint$1(codePoint);
- }
- else if (isValidEscape(codePoint, this.peekCodePoint(0))) {
- result += fromCodePoint$1(this.consumeEscapedCodePoint());
- }
- else {
- this.reconsumeCodePoint(codePoint);
- return result;
- }
- }
- };
- return Tokenizer;
- }());
-
- var Parser = /** @class */ (function () {
- function Parser(tokens) {
- this._tokens = tokens;
- }
- Parser.create = function (value) {
- var tokenizer = new Tokenizer();
- tokenizer.write(value);
- return new Parser(tokenizer.read());
- };
- Parser.parseValue = function (value) {
- return Parser.create(value).parseComponentValue();
- };
- Parser.parseValues = function (value) {
- return Parser.create(value).parseComponentValues();
- };
- Parser.prototype.parseComponentValue = function () {
- var token = this.consumeToken();
- while (token.type === 31 /* WHITESPACE_TOKEN */) {
- token = this.consumeToken();
- }
- if (token.type === 32 /* EOF_TOKEN */) {
- throw new SyntaxError("Error parsing CSS component value, unexpected EOF");
- }
- this.reconsumeToken(token);
- var value = this.consumeComponentValue();
- do {
- token = this.consumeToken();
- } while (token.type === 31 /* WHITESPACE_TOKEN */);
- if (token.type === 32 /* EOF_TOKEN */) {
- return value;
- }
- throw new SyntaxError("Error parsing CSS component value, multiple values found when expecting only one");
- };
- Parser.prototype.parseComponentValues = function () {
- var values = [];
- while (true) {
- var value = this.consumeComponentValue();
- if (value.type === 32 /* EOF_TOKEN */) {
- return values;
- }
- values.push(value);
- values.push();
- }
- };
- Parser.prototype.consumeComponentValue = function () {
- var token = this.consumeToken();
- switch (token.type) {
- case 11 /* LEFT_CURLY_BRACKET_TOKEN */:
- case 28 /* LEFT_SQUARE_BRACKET_TOKEN */:
- case 2 /* LEFT_PARENTHESIS_TOKEN */:
- return this.consumeSimpleBlock(token.type);
- case 19 /* FUNCTION_TOKEN */:
- return this.consumeFunction(token);
- }
- return token;
- };
- Parser.prototype.consumeSimpleBlock = function (type) {
- var block = { type: type, values: [] };
- var token = this.consumeToken();
- while (true) {
- if (token.type === 32 /* EOF_TOKEN */ || isEndingTokenFor(token, type)) {
- return block;
- }
- this.reconsumeToken(token);
- block.values.push(this.consumeComponentValue());
- token = this.consumeToken();
- }
- };
- Parser.prototype.consumeFunction = function (functionToken) {
- var cssFunction = {
- name: functionToken.value,
- values: [],
- type: 18 /* FUNCTION */
- };
- while (true) {
- var token = this.consumeToken();
- if (token.type === 32 /* EOF_TOKEN */ || token.type === 3 /* RIGHT_PARENTHESIS_TOKEN */) {
- return cssFunction;
- }
- this.reconsumeToken(token);
- cssFunction.values.push(this.consumeComponentValue());
- }
- };
- Parser.prototype.consumeToken = function () {
- var token = this._tokens.shift();
- return typeof token === 'undefined' ? EOF_TOKEN : token;
- };
- Parser.prototype.reconsumeToken = function (token) {
- this._tokens.unshift(token);
- };
- return Parser;
- }());
- var isDimensionToken = function (token) { return token.type === 15 /* DIMENSION_TOKEN */; };
- var isNumberToken = function (token) { return token.type === 17 /* NUMBER_TOKEN */; };
- var isIdentToken = function (token) { return token.type === 20 /* IDENT_TOKEN */; };
- var isStringToken = function (token) { return token.type === 0 /* STRING_TOKEN */; };
- var isIdentWithValue = function (token, value) {
- return isIdentToken(token) && token.value === value;
- };
- var nonWhiteSpace = function (token) { return token.type !== 31 /* WHITESPACE_TOKEN */; };
- var nonFunctionArgSeparator = function (token) {
- return token.type !== 31 /* WHITESPACE_TOKEN */ && token.type !== 4 /* COMMA_TOKEN */;
- };
- var parseFunctionArgs = function (tokens) {
- var args = [];
- var arg = [];
- tokens.forEach(function (token) {
- if (token.type === 4 /* COMMA_TOKEN */) {
- if (arg.length === 0) {
- throw new Error("Error parsing function args, zero tokens for arg");
- }
- args.push(arg);
- arg = [];
- return;
- }
- if (token.type !== 31 /* WHITESPACE_TOKEN */) {
- arg.push(token);
- }
- });
- if (arg.length) {
- args.push(arg);
- }
- return args;
- };
- var isEndingTokenFor = function (token, type) {
- if (type === 11 /* LEFT_CURLY_BRACKET_TOKEN */ && token.type === 12 /* RIGHT_CURLY_BRACKET_TOKEN */) {
- return true;
- }
- if (type === 28 /* LEFT_SQUARE_BRACKET_TOKEN */ && token.type === 29 /* RIGHT_SQUARE_BRACKET_TOKEN */) {
- return true;
- }
- return type === 2 /* LEFT_PARENTHESIS_TOKEN */ && token.type === 3 /* RIGHT_PARENTHESIS_TOKEN */;
- };
-
- var isLength = function (token) {
- return token.type === 17 /* NUMBER_TOKEN */ || token.type === 15 /* DIMENSION_TOKEN */;
- };
-
- var isLengthPercentage = function (token) {
- return token.type === 16 /* PERCENTAGE_TOKEN */ || isLength(token);
- };
- var parseLengthPercentageTuple = function (tokens) {
- return tokens.length > 1 ? [tokens[0], tokens[1]] : [tokens[0]];
- };
- var ZERO_LENGTH = {
- type: 17 /* NUMBER_TOKEN */,
- number: 0,
- flags: FLAG_INTEGER
- };
- var FIFTY_PERCENT = {
- type: 16 /* PERCENTAGE_TOKEN */,
- number: 50,
- flags: FLAG_INTEGER
- };
- var HUNDRED_PERCENT = {
- type: 16 /* PERCENTAGE_TOKEN */,
- number: 100,
- flags: FLAG_INTEGER
- };
- var getAbsoluteValueForTuple = function (tuple, width, height) {
- var x = tuple[0], y = tuple[1];
- return [getAbsoluteValue(x, width), getAbsoluteValue(typeof y !== 'undefined' ? y : x, height)];
- };
- var getAbsoluteValue = function (token, parent) {
- if (token.type === 16 /* PERCENTAGE_TOKEN */) {
- return (token.number / 100) * parent;
- }
- if (isDimensionToken(token)) {
- switch (token.unit) {
- case 'rem':
- case 'em':
- return 16 * token.number; // TODO use correct font-size
- case 'px':
- default:
- return token.number;
- }
- }
- return token.number;
- };
-
- var DEG = 'deg';
- var GRAD = 'grad';
- var RAD = 'rad';
- var TURN = 'turn';
- var angle = {
- name: 'angle',
- parse: function (_context, value) {
- if (value.type === 15 /* DIMENSION_TOKEN */) {
- switch (value.unit) {
- case DEG:
- return (Math.PI * value.number) / 180;
- case GRAD:
- return (Math.PI / 200) * value.number;
- case RAD:
- return value.number;
- case TURN:
- return Math.PI * 2 * value.number;
- }
- }
- throw new Error("Unsupported angle type");
- }
- };
- var isAngle = function (value) {
- if (value.type === 15 /* DIMENSION_TOKEN */) {
- if (value.unit === DEG || value.unit === GRAD || value.unit === RAD || value.unit === TURN) {
- return true;
- }
- }
- return false;
- };
- var parseNamedSide = function (tokens) {
- var sideOrCorner = tokens
- .filter(isIdentToken)
- .map(function (ident) { return ident.value; })
- .join(' ');
- switch (sideOrCorner) {
- case 'to bottom right':
- case 'to right bottom':
- case 'left top':
- case 'top left':
- return [ZERO_LENGTH, ZERO_LENGTH];
- case 'to top':
- case 'bottom':
- return deg(0);
- case 'to bottom left':
- case 'to left bottom':
- case 'right top':
- case 'top right':
- return [ZERO_LENGTH, HUNDRED_PERCENT];
- case 'to right':
- case 'left':
- return deg(90);
- case 'to top left':
- case 'to left top':
- case 'right bottom':
- case 'bottom right':
- return [HUNDRED_PERCENT, HUNDRED_PERCENT];
- case 'to bottom':
- case 'top':
- return deg(180);
- case 'to top right':
- case 'to right top':
- case 'left bottom':
- case 'bottom left':
- return [HUNDRED_PERCENT, ZERO_LENGTH];
- case 'to left':
- case 'right':
- return deg(270);
- }
- return 0;
- };
- var deg = function (deg) { return (Math.PI * deg) / 180; };
-
- var color$1 = {
- name: 'color',
- parse: function (context, value) {
- if (value.type === 18 /* FUNCTION */) {
- var colorFunction = SUPPORTED_COLOR_FUNCTIONS[value.name];
- if (typeof colorFunction === 'undefined') {
- throw new Error("Attempting to parse an unsupported color function \"" + value.name + "\"");
- }
- return colorFunction(context, value.values);
- }
- if (value.type === 5 /* HASH_TOKEN */) {
- if (value.value.length === 3) {
- var r = value.value.substring(0, 1);
- var g = value.value.substring(1, 2);
- var b = value.value.substring(2, 3);
- return pack(parseInt(r + r, 16), parseInt(g + g, 16), parseInt(b + b, 16), 1);
- }
- if (value.value.length === 4) {
- var r = value.value.substring(0, 1);
- var g = value.value.substring(1, 2);
- var b = value.value.substring(2, 3);
- var a = value.value.substring(3, 4);
- return pack(parseInt(r + r, 16), parseInt(g + g, 16), parseInt(b + b, 16), parseInt(a + a, 16) / 255);
- }
- if (value.value.length === 6) {
- var r = value.value.substring(0, 2);
- var g = value.value.substring(2, 4);
- var b = value.value.substring(4, 6);
- return pack(parseInt(r, 16), parseInt(g, 16), parseInt(b, 16), 1);
- }
- if (value.value.length === 8) {
- var r = value.value.substring(0, 2);
- var g = value.value.substring(2, 4);
- var b = value.value.substring(4, 6);
- var a = value.value.substring(6, 8);
- return pack(parseInt(r, 16), parseInt(g, 16), parseInt(b, 16), parseInt(a, 16) / 255);
- }
- }
- if (value.type === 20 /* IDENT_TOKEN */) {
- var namedColor = COLORS[value.value.toUpperCase()];
- if (typeof namedColor !== 'undefined') {
- return namedColor;
- }
- }
- return COLORS.TRANSPARENT;
- }
- };
- var isTransparent = function (color) { return (0xff & color) === 0; };
- var asString = function (color) {
- var alpha = 0xff & color;
- var blue = 0xff & (color >> 8);
- var green = 0xff & (color >> 16);
- var red = 0xff & (color >> 24);
- return alpha < 255 ? "rgba(" + red + "," + green + "," + blue + "," + alpha / 255 + ")" : "rgb(" + red + "," + green + "," + blue + ")";
- };
- var pack = function (r, g, b, a) {
- return ((r << 24) | (g << 16) | (b << 8) | (Math.round(a * 255) << 0)) >>> 0;
- };
- var getTokenColorValue = function (token, i) {
- if (token.type === 17 /* NUMBER_TOKEN */) {
- return token.number;
- }
- if (token.type === 16 /* PERCENTAGE_TOKEN */) {
- var max = i === 3 ? 1 : 255;
- return i === 3 ? (token.number / 100) * max : Math.round((token.number / 100) * max);
- }
- return 0;
- };
- var rgb = function (_context, args) {
- var tokens = args.filter(nonFunctionArgSeparator);
- if (tokens.length === 3) {
- var _a = tokens.map(getTokenColorValue), r = _a[0], g = _a[1], b = _a[2];
- return pack(r, g, b, 1);
- }
- if (tokens.length === 4) {
- var _b = tokens.map(getTokenColorValue), r = _b[0], g = _b[1], b = _b[2], a = _b[3];
- return pack(r, g, b, a);
- }
- return 0;
- };
- function hue2rgb(t1, t2, hue) {
- if (hue < 0) {
- hue += 1;
- }
- if (hue >= 1) {
- hue -= 1;
- }
- if (hue < 1 / 6) {
- return (t2 - t1) * hue * 6 + t1;
- }
- else if (hue < 1 / 2) {
- return t2;
- }
- else if (hue < 2 / 3) {
- return (t2 - t1) * 6 * (2 / 3 - hue) + t1;
- }
- else {
- return t1;
- }
- }
- var hsl = function (context, args) {
- var tokens = args.filter(nonFunctionArgSeparator);
- var hue = tokens[0], saturation = tokens[1], lightness = tokens[2], alpha = tokens[3];
- var h = (hue.type === 17 /* NUMBER_TOKEN */ ? deg(hue.number) : angle.parse(context, hue)) / (Math.PI * 2);
- var s = isLengthPercentage(saturation) ? saturation.number / 100 : 0;
- var l = isLengthPercentage(lightness) ? lightness.number / 100 : 0;
- var a = typeof alpha !== 'undefined' && isLengthPercentage(alpha) ? getAbsoluteValue(alpha, 1) : 1;
- if (s === 0) {
- return pack(l * 255, l * 255, l * 255, 1);
- }
- var t2 = l <= 0.5 ? l * (s + 1) : l + s - l * s;
- var t1 = l * 2 - t2;
- var r = hue2rgb(t1, t2, h + 1 / 3);
- var g = hue2rgb(t1, t2, h);
- var b = hue2rgb(t1, t2, h - 1 / 3);
- return pack(r * 255, g * 255, b * 255, a);
- };
- var SUPPORTED_COLOR_FUNCTIONS = {
- hsl: hsl,
- hsla: hsl,
- rgb: rgb,
- rgba: rgb
- };
- var parseColor = function (context, value) {
- return color$1.parse(context, Parser.create(value).parseComponentValue());
- };
- var COLORS = {
- ALICEBLUE: 0xf0f8ffff,
- ANTIQUEWHITE: 0xfaebd7ff,
- AQUA: 0x00ffffff,
- AQUAMARINE: 0x7fffd4ff,
- AZURE: 0xf0ffffff,
- BEIGE: 0xf5f5dcff,
- BISQUE: 0xffe4c4ff,
- BLACK: 0x000000ff,
- BLANCHEDALMOND: 0xffebcdff,
- BLUE: 0x0000ffff,
- BLUEVIOLET: 0x8a2be2ff,
- BROWN: 0xa52a2aff,
- BURLYWOOD: 0xdeb887ff,
- CADETBLUE: 0x5f9ea0ff,
- CHARTREUSE: 0x7fff00ff,
- CHOCOLATE: 0xd2691eff,
- CORAL: 0xff7f50ff,
- CORNFLOWERBLUE: 0x6495edff,
- CORNSILK: 0xfff8dcff,
- CRIMSON: 0xdc143cff,
- CYAN: 0x00ffffff,
- DARKBLUE: 0x00008bff,
- DARKCYAN: 0x008b8bff,
- DARKGOLDENROD: 0xb886bbff,
- DARKGRAY: 0xa9a9a9ff,
- DARKGREEN: 0x006400ff,
- DARKGREY: 0xa9a9a9ff,
- DARKKHAKI: 0xbdb76bff,
- DARKMAGENTA: 0x8b008bff,
- DARKOLIVEGREEN: 0x556b2fff,
- DARKORANGE: 0xff8c00ff,
- DARKORCHID: 0x9932ccff,
- DARKRED: 0x8b0000ff,
- DARKSALMON: 0xe9967aff,
- DARKSEAGREEN: 0x8fbc8fff,
- DARKSLATEBLUE: 0x483d8bff,
- DARKSLATEGRAY: 0x2f4f4fff,
- DARKSLATEGREY: 0x2f4f4fff,
- DARKTURQUOISE: 0x00ced1ff,
- DARKVIOLET: 0x9400d3ff,
- DEEPPINK: 0xff1493ff,
- DEEPSKYBLUE: 0x00bfffff,
- DIMGRAY: 0x696969ff,
- DIMGREY: 0x696969ff,
- DODGERBLUE: 0x1e90ffff,
- FIREBRICK: 0xb22222ff,
- FLORALWHITE: 0xfffaf0ff,
- FORESTGREEN: 0x228b22ff,
- FUCHSIA: 0xff00ffff,
- GAINSBORO: 0xdcdcdcff,
- GHOSTWHITE: 0xf8f8ffff,
- GOLD: 0xffd700ff,
- GOLDENROD: 0xdaa520ff,
- GRAY: 0x808080ff,
- GREEN: 0x008000ff,
- GREENYELLOW: 0xadff2fff,
- GREY: 0x808080ff,
- HONEYDEW: 0xf0fff0ff,
- HOTPINK: 0xff69b4ff,
- INDIANRED: 0xcd5c5cff,
- INDIGO: 0x4b0082ff,
- IVORY: 0xfffff0ff,
- KHAKI: 0xf0e68cff,
- LAVENDER: 0xe6e6faff,
- LAVENDERBLUSH: 0xfff0f5ff,
- LAWNGREEN: 0x7cfc00ff,
- LEMONCHIFFON: 0xfffacdff,
- LIGHTBLUE: 0xadd8e6ff,
- LIGHTCORAL: 0xf08080ff,
- LIGHTCYAN: 0xe0ffffff,
- LIGHTGOLDENRODYELLOW: 0xfafad2ff,
- LIGHTGRAY: 0xd3d3d3ff,
- LIGHTGREEN: 0x90ee90ff,
- LIGHTGREY: 0xd3d3d3ff,
- LIGHTPINK: 0xffb6c1ff,
- LIGHTSALMON: 0xffa07aff,
- LIGHTSEAGREEN: 0x20b2aaff,
- LIGHTSKYBLUE: 0x87cefaff,
- LIGHTSLATEGRAY: 0x778899ff,
- LIGHTSLATEGREY: 0x778899ff,
- LIGHTSTEELBLUE: 0xb0c4deff,
- LIGHTYELLOW: 0xffffe0ff,
- LIME: 0x00ff00ff,
- LIMEGREEN: 0x32cd32ff,
- LINEN: 0xfaf0e6ff,
- MAGENTA: 0xff00ffff,
- MAROON: 0x800000ff,
- MEDIUMAQUAMARINE: 0x66cdaaff,
- MEDIUMBLUE: 0x0000cdff,
- MEDIUMORCHID: 0xba55d3ff,
- MEDIUMPURPLE: 0x9370dbff,
- MEDIUMSEAGREEN: 0x3cb371ff,
- MEDIUMSLATEBLUE: 0x7b68eeff,
- MEDIUMSPRINGGREEN: 0x00fa9aff,
- MEDIUMTURQUOISE: 0x48d1ccff,
- MEDIUMVIOLETRED: 0xc71585ff,
- MIDNIGHTBLUE: 0x191970ff,
- MINTCREAM: 0xf5fffaff,
- MISTYROSE: 0xffe4e1ff,
- MOCCASIN: 0xffe4b5ff,
- NAVAJOWHITE: 0xffdeadff,
- NAVY: 0x000080ff,
- OLDLACE: 0xfdf5e6ff,
- OLIVE: 0x808000ff,
- OLIVEDRAB: 0x6b8e23ff,
- ORANGE: 0xffa500ff,
- ORANGERED: 0xff4500ff,
- ORCHID: 0xda70d6ff,
- PALEGOLDENROD: 0xeee8aaff,
- PALEGREEN: 0x98fb98ff,
- PALETURQUOISE: 0xafeeeeff,
- PALEVIOLETRED: 0xdb7093ff,
- PAPAYAWHIP: 0xffefd5ff,
- PEACHPUFF: 0xffdab9ff,
- PERU: 0xcd853fff,
- PINK: 0xffc0cbff,
- PLUM: 0xdda0ddff,
- POWDERBLUE: 0xb0e0e6ff,
- PURPLE: 0x800080ff,
- REBECCAPURPLE: 0x663399ff,
- RED: 0xff0000ff,
- ROSYBROWN: 0xbc8f8fff,
- ROYALBLUE: 0x4169e1ff,
- SADDLEBROWN: 0x8b4513ff,
- SALMON: 0xfa8072ff,
- SANDYBROWN: 0xf4a460ff,
- SEAGREEN: 0x2e8b57ff,
- SEASHELL: 0xfff5eeff,
- SIENNA: 0xa0522dff,
- SILVER: 0xc0c0c0ff,
- SKYBLUE: 0x87ceebff,
- SLATEBLUE: 0x6a5acdff,
- SLATEGRAY: 0x708090ff,
- SLATEGREY: 0x708090ff,
- SNOW: 0xfffafaff,
- SPRINGGREEN: 0x00ff7fff,
- STEELBLUE: 0x4682b4ff,
- TAN: 0xd2b48cff,
- TEAL: 0x008080ff,
- THISTLE: 0xd8bfd8ff,
- TOMATO: 0xff6347ff,
- TRANSPARENT: 0x00000000,
- TURQUOISE: 0x40e0d0ff,
- VIOLET: 0xee82eeff,
- WHEAT: 0xf5deb3ff,
- WHITE: 0xffffffff,
- WHITESMOKE: 0xf5f5f5ff,
- YELLOW: 0xffff00ff,
- YELLOWGREEN: 0x9acd32ff
- };
-
- var backgroundClip = {
- name: 'background-clip',
- initialValue: 'border-box',
- prefix: false,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- return tokens.map(function (token) {
- if (isIdentToken(token)) {
- switch (token.value) {
- case 'padding-box':
- return 1 /* PADDING_BOX */;
- case 'content-box':
- return 2 /* CONTENT_BOX */;
- }
- }
- return 0 /* BORDER_BOX */;
- });
- }
- };
-
- var backgroundColor = {
- name: "background-color",
- initialValue: 'transparent',
- prefix: false,
- type: 3 /* TYPE_VALUE */,
- format: 'color'
- };
-
- var parseColorStop = function (context, args) {
- var color = color$1.parse(context, args[0]);
- var stop = args[1];
- return stop && isLengthPercentage(stop) ? { color: color, stop: stop } : { color: color, stop: null };
- };
- var processColorStops = function (stops, lineLength) {
- var first = stops[0];
- var last = stops[stops.length - 1];
- if (first.stop === null) {
- first.stop = ZERO_LENGTH;
- }
- if (last.stop === null) {
- last.stop = HUNDRED_PERCENT;
- }
- var processStops = [];
- var previous = 0;
- for (var i = 0; i < stops.length; i++) {
- var stop_1 = stops[i].stop;
- if (stop_1 !== null) {
- var absoluteValue = getAbsoluteValue(stop_1, lineLength);
- if (absoluteValue > previous) {
- processStops.push(absoluteValue);
- }
- else {
- processStops.push(previous);
- }
- previous = absoluteValue;
- }
- else {
- processStops.push(null);
- }
- }
- var gapBegin = null;
- for (var i = 0; i < processStops.length; i++) {
- var stop_2 = processStops[i];
- if (stop_2 === null) {
- if (gapBegin === null) {
- gapBegin = i;
- }
- }
- else if (gapBegin !== null) {
- var gapLength = i - gapBegin;
- var beforeGap = processStops[gapBegin - 1];
- var gapValue = (stop_2 - beforeGap) / (gapLength + 1);
- for (var g = 1; g <= gapLength; g++) {
- processStops[gapBegin + g - 1] = gapValue * g;
- }
- gapBegin = null;
- }
- }
- return stops.map(function (_a, i) {
- var color = _a.color;
- return { color: color, stop: Math.max(Math.min(1, processStops[i] / lineLength), 0) };
- });
- };
- var getAngleFromCorner = function (corner, width, height) {
- var centerX = width / 2;
- var centerY = height / 2;
- var x = getAbsoluteValue(corner[0], width) - centerX;
- var y = centerY - getAbsoluteValue(corner[1], height);
- return (Math.atan2(y, x) + Math.PI * 2) % (Math.PI * 2);
- };
- var calculateGradientDirection = function (angle, width, height) {
- var radian = typeof angle === 'number' ? angle : getAngleFromCorner(angle, width, height);
- var lineLength = Math.abs(width * Math.sin(radian)) + Math.abs(height * Math.cos(radian));
- var halfWidth = width / 2;
- var halfHeight = height / 2;
- var halfLineLength = lineLength / 2;
- var yDiff = Math.sin(radian - Math.PI / 2) * halfLineLength;
- var xDiff = Math.cos(radian - Math.PI / 2) * halfLineLength;
- return [lineLength, halfWidth - xDiff, halfWidth + xDiff, halfHeight - yDiff, halfHeight + yDiff];
- };
- var distance = function (a, b) { return Math.sqrt(a * a + b * b); };
- var findCorner = function (width, height, x, y, closest) {
- var corners = [
- [0, 0],
- [0, height],
- [width, 0],
- [width, height]
- ];
- return corners.reduce(function (stat, corner) {
- var cx = corner[0], cy = corner[1];
- var d = distance(x - cx, y - cy);
- if (closest ? d < stat.optimumDistance : d > stat.optimumDistance) {
- return {
- optimumCorner: corner,
- optimumDistance: d
- };
- }
- return stat;
- }, {
- optimumDistance: closest ? Infinity : -Infinity,
- optimumCorner: null
- }).optimumCorner;
- };
- var calculateRadius = function (gradient, x, y, width, height) {
- var rx = 0;
- var ry = 0;
- switch (gradient.size) {
- case 0 /* CLOSEST_SIDE */:
- // The ending shape is sized so that that it exactly meets the side of the gradient box closest to the gradient’s center.
- // If the shape is an ellipse, it exactly meets the closest side in each dimension.
- if (gradient.shape === 0 /* CIRCLE */) {
- rx = ry = Math.min(Math.abs(x), Math.abs(x - width), Math.abs(y), Math.abs(y - height));
- }
- else if (gradient.shape === 1 /* ELLIPSE */) {
- rx = Math.min(Math.abs(x), Math.abs(x - width));
- ry = Math.min(Math.abs(y), Math.abs(y - height));
- }
- break;
- case 2 /* CLOSEST_CORNER */:
- // The ending shape is sized so that that it passes through the corner of the gradient box closest to the gradient’s center.
- // If the shape is an ellipse, the ending shape is given the same aspect-ratio it would have if closest-side were specified.
- if (gradient.shape === 0 /* CIRCLE */) {
- rx = ry = Math.min(distance(x, y), distance(x, y - height), distance(x - width, y), distance(x - width, y - height));
- }
- else if (gradient.shape === 1 /* ELLIPSE */) {
- // Compute the ratio ry/rx (which is to be the same as for "closest-side")
- var c = Math.min(Math.abs(y), Math.abs(y - height)) / Math.min(Math.abs(x), Math.abs(x - width));
- var _a = findCorner(width, height, x, y, true), cx = _a[0], cy = _a[1];
- rx = distance(cx - x, (cy - y) / c);
- ry = c * rx;
- }
- break;
- case 1 /* FARTHEST_SIDE */:
- // Same as closest-side, except the ending shape is sized based on the farthest side(s)
- if (gradient.shape === 0 /* CIRCLE */) {
- rx = ry = Math.max(Math.abs(x), Math.abs(x - width), Math.abs(y), Math.abs(y - height));
- }
- else if (gradient.shape === 1 /* ELLIPSE */) {
- rx = Math.max(Math.abs(x), Math.abs(x - width));
- ry = Math.max(Math.abs(y), Math.abs(y - height));
- }
- break;
- case 3 /* FARTHEST_CORNER */:
- // Same as closest-corner, except the ending shape is sized based on the farthest corner.
- // If the shape is an ellipse, the ending shape is given the same aspect ratio it would have if farthest-side were specified.
- if (gradient.shape === 0 /* CIRCLE */) {
- rx = ry = Math.max(distance(x, y), distance(x, y - height), distance(x - width, y), distance(x - width, y - height));
- }
- else if (gradient.shape === 1 /* ELLIPSE */) {
- // Compute the ratio ry/rx (which is to be the same as for "farthest-side")
- var c = Math.max(Math.abs(y), Math.abs(y - height)) / Math.max(Math.abs(x), Math.abs(x - width));
- var _b = findCorner(width, height, x, y, false), cx = _b[0], cy = _b[1];
- rx = distance(cx - x, (cy - y) / c);
- ry = c * rx;
- }
- break;
- }
- if (Array.isArray(gradient.size)) {
- rx = getAbsoluteValue(gradient.size[0], width);
- ry = gradient.size.length === 2 ? getAbsoluteValue(gradient.size[1], height) : rx;
- }
- return [rx, ry];
- };
-
- var linearGradient = function (context, tokens) {
- var angle$1 = deg(180);
- var stops = [];
- parseFunctionArgs(tokens).forEach(function (arg, i) {
- if (i === 0) {
- var firstToken = arg[0];
- if (firstToken.type === 20 /* IDENT_TOKEN */ && firstToken.value === 'to') {
- angle$1 = parseNamedSide(arg);
- return;
- }
- else if (isAngle(firstToken)) {
- angle$1 = angle.parse(context, firstToken);
- return;
- }
- }
- var colorStop = parseColorStop(context, arg);
- stops.push(colorStop);
- });
- return { angle: angle$1, stops: stops, type: 1 /* LINEAR_GRADIENT */ };
- };
-
- var prefixLinearGradient = function (context, tokens) {
- var angle$1 = deg(180);
- var stops = [];
- parseFunctionArgs(tokens).forEach(function (arg, i) {
- if (i === 0) {
- var firstToken = arg[0];
- if (firstToken.type === 20 /* IDENT_TOKEN */ &&
- ['top', 'left', 'right', 'bottom'].indexOf(firstToken.value) !== -1) {
- angle$1 = parseNamedSide(arg);
- return;
- }
- else if (isAngle(firstToken)) {
- angle$1 = (angle.parse(context, firstToken) + deg(270)) % deg(360);
- return;
- }
- }
- var colorStop = parseColorStop(context, arg);
- stops.push(colorStop);
- });
- return {
- angle: angle$1,
- stops: stops,
- type: 1 /* LINEAR_GRADIENT */
- };
- };
-
- var webkitGradient = function (context, tokens) {
- var angle = deg(180);
- var stops = [];
- var type = 1 /* LINEAR_GRADIENT */;
- var shape = 0 /* CIRCLE */;
- var size = 3 /* FARTHEST_CORNER */;
- var position = [];
- parseFunctionArgs(tokens).forEach(function (arg, i) {
- var firstToken = arg[0];
- if (i === 0) {
- if (isIdentToken(firstToken) && firstToken.value === 'linear') {
- type = 1 /* LINEAR_GRADIENT */;
- return;
- }
- else if (isIdentToken(firstToken) && firstToken.value === 'radial') {
- type = 2 /* RADIAL_GRADIENT */;
- return;
- }
- }
- if (firstToken.type === 18 /* FUNCTION */) {
- if (firstToken.name === 'from') {
- var color = color$1.parse(context, firstToken.values[0]);
- stops.push({ stop: ZERO_LENGTH, color: color });
- }
- else if (firstToken.name === 'to') {
- var color = color$1.parse(context, firstToken.values[0]);
- stops.push({ stop: HUNDRED_PERCENT, color: color });
- }
- else if (firstToken.name === 'color-stop') {
- var values = firstToken.values.filter(nonFunctionArgSeparator);
- if (values.length === 2) {
- var color = color$1.parse(context, values[1]);
- var stop_1 = values[0];
- if (isNumberToken(stop_1)) {
- stops.push({
- stop: { type: 16 /* PERCENTAGE_TOKEN */, number: stop_1.number * 100, flags: stop_1.flags },
- color: color
- });
- }
- }
- }
- }
- });
- return type === 1 /* LINEAR_GRADIENT */
- ? {
- angle: (angle + deg(180)) % deg(360),
- stops: stops,
- type: type
- }
- : { size: size, shape: shape, stops: stops, position: position, type: type };
- };
-
- var CLOSEST_SIDE = 'closest-side';
- var FARTHEST_SIDE = 'farthest-side';
- var CLOSEST_CORNER = 'closest-corner';
- var FARTHEST_CORNER = 'farthest-corner';
- var CIRCLE = 'circle';
- var ELLIPSE = 'ellipse';
- var COVER = 'cover';
- var CONTAIN = 'contain';
- var radialGradient = function (context, tokens) {
- var shape = 0 /* CIRCLE */;
- var size = 3 /* FARTHEST_CORNER */;
- var stops = [];
- var position = [];
- parseFunctionArgs(tokens).forEach(function (arg, i) {
- var isColorStop = true;
- if (i === 0) {
- var isAtPosition_1 = false;
- isColorStop = arg.reduce(function (acc, token) {
- if (isAtPosition_1) {
- if (isIdentToken(token)) {
- switch (token.value) {
- case 'center':
- position.push(FIFTY_PERCENT);
- return acc;
- case 'top':
- case 'left':
- position.push(ZERO_LENGTH);
- return acc;
- case 'right':
- case 'bottom':
- position.push(HUNDRED_PERCENT);
- return acc;
- }
- }
- else if (isLengthPercentage(token) || isLength(token)) {
- position.push(token);
- }
- }
- else if (isIdentToken(token)) {
- switch (token.value) {
- case CIRCLE:
- shape = 0 /* CIRCLE */;
- return false;
- case ELLIPSE:
- shape = 1 /* ELLIPSE */;
- return false;
- case 'at':
- isAtPosition_1 = true;
- return false;
- case CLOSEST_SIDE:
- size = 0 /* CLOSEST_SIDE */;
- return false;
- case COVER:
- case FARTHEST_SIDE:
- size = 1 /* FARTHEST_SIDE */;
- return false;
- case CONTAIN:
- case CLOSEST_CORNER:
- size = 2 /* CLOSEST_CORNER */;
- return false;
- case FARTHEST_CORNER:
- size = 3 /* FARTHEST_CORNER */;
- return false;
- }
- }
- else if (isLength(token) || isLengthPercentage(token)) {
- if (!Array.isArray(size)) {
- size = [];
- }
- size.push(token);
- return false;
- }
- return acc;
- }, isColorStop);
- }
- if (isColorStop) {
- var colorStop = parseColorStop(context, arg);
- stops.push(colorStop);
- }
- });
- return { size: size, shape: shape, stops: stops, position: position, type: 2 /* RADIAL_GRADIENT */ };
- };
-
- var prefixRadialGradient = function (context, tokens) {
- var shape = 0 /* CIRCLE */;
- var size = 3 /* FARTHEST_CORNER */;
- var stops = [];
- var position = [];
- parseFunctionArgs(tokens).forEach(function (arg, i) {
- var isColorStop = true;
- if (i === 0) {
- isColorStop = arg.reduce(function (acc, token) {
- if (isIdentToken(token)) {
- switch (token.value) {
- case 'center':
- position.push(FIFTY_PERCENT);
- return false;
- case 'top':
- case 'left':
- position.push(ZERO_LENGTH);
- return false;
- case 'right':
- case 'bottom':
- position.push(HUNDRED_PERCENT);
- return false;
- }
- }
- else if (isLengthPercentage(token) || isLength(token)) {
- position.push(token);
- return false;
- }
- return acc;
- }, isColorStop);
- }
- else if (i === 1) {
- isColorStop = arg.reduce(function (acc, token) {
- if (isIdentToken(token)) {
- switch (token.value) {
- case CIRCLE:
- shape = 0 /* CIRCLE */;
- return false;
- case ELLIPSE:
- shape = 1 /* ELLIPSE */;
- return false;
- case CONTAIN:
- case CLOSEST_SIDE:
- size = 0 /* CLOSEST_SIDE */;
- return false;
- case FARTHEST_SIDE:
- size = 1 /* FARTHEST_SIDE */;
- return false;
- case CLOSEST_CORNER:
- size = 2 /* CLOSEST_CORNER */;
- return false;
- case COVER:
- case FARTHEST_CORNER:
- size = 3 /* FARTHEST_CORNER */;
- return false;
- }
- }
- else if (isLength(token) || isLengthPercentage(token)) {
- if (!Array.isArray(size)) {
- size = [];
- }
- size.push(token);
- return false;
- }
- return acc;
- }, isColorStop);
- }
- if (isColorStop) {
- var colorStop = parseColorStop(context, arg);
- stops.push(colorStop);
- }
- });
- return { size: size, shape: shape, stops: stops, position: position, type: 2 /* RADIAL_GRADIENT */ };
- };
-
- var isLinearGradient = function (background) {
- return background.type === 1 /* LINEAR_GRADIENT */;
- };
- var isRadialGradient = function (background) {
- return background.type === 2 /* RADIAL_GRADIENT */;
- };
- var image = {
- name: 'image',
- parse: function (context, value) {
- if (value.type === 22 /* URL_TOKEN */) {
- var image_1 = { url: value.value, type: 0 /* URL */ };
- context.cache.addImage(value.value);
- return image_1;
- }
- if (value.type === 18 /* FUNCTION */) {
- var imageFunction = SUPPORTED_IMAGE_FUNCTIONS[value.name];
- if (typeof imageFunction === 'undefined') {
- throw new Error("Attempting to parse an unsupported image function \"" + value.name + "\"");
- }
- return imageFunction(context, value.values);
- }
- throw new Error("Unsupported image type " + value.type);
- }
- };
- function isSupportedImage(value) {
- return (!(value.type === 20 /* IDENT_TOKEN */ && value.value === 'none') &&
- (value.type !== 18 /* FUNCTION */ || !!SUPPORTED_IMAGE_FUNCTIONS[value.name]));
- }
- var SUPPORTED_IMAGE_FUNCTIONS = {
- 'linear-gradient': linearGradient,
- '-moz-linear-gradient': prefixLinearGradient,
- '-ms-linear-gradient': prefixLinearGradient,
- '-o-linear-gradient': prefixLinearGradient,
- '-webkit-linear-gradient': prefixLinearGradient,
- 'radial-gradient': radialGradient,
- '-moz-radial-gradient': prefixRadialGradient,
- '-ms-radial-gradient': prefixRadialGradient,
- '-o-radial-gradient': prefixRadialGradient,
- '-webkit-radial-gradient': prefixRadialGradient,
- '-webkit-gradient': webkitGradient
- };
-
- var backgroundImage = {
- name: 'background-image',
- initialValue: 'none',
- type: 1 /* LIST */,
- prefix: false,
- parse: function (context, tokens) {
- if (tokens.length === 0) {
- return [];
- }
- var first = tokens[0];
- if (first.type === 20 /* IDENT_TOKEN */ && first.value === 'none') {
- return [];
- }
- return tokens
- .filter(function (value) { return nonFunctionArgSeparator(value) && isSupportedImage(value); })
- .map(function (value) { return image.parse(context, value); });
- }
- };
-
- var backgroundOrigin = {
- name: 'background-origin',
- initialValue: 'border-box',
- prefix: false,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- return tokens.map(function (token) {
- if (isIdentToken(token)) {
- switch (token.value) {
- case 'padding-box':
- return 1 /* PADDING_BOX */;
- case 'content-box':
- return 2 /* CONTENT_BOX */;
- }
- }
- return 0 /* BORDER_BOX */;
- });
- }
- };
-
- var backgroundPosition = {
- name: 'background-position',
- initialValue: '0% 0%',
- type: 1 /* LIST */,
- prefix: false,
- parse: function (_context, tokens) {
- return parseFunctionArgs(tokens)
- .map(function (values) { return values.filter(isLengthPercentage); })
- .map(parseLengthPercentageTuple);
- }
- };
-
- var backgroundRepeat = {
- name: 'background-repeat',
- initialValue: 'repeat',
- prefix: false,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- return parseFunctionArgs(tokens)
- .map(function (values) {
- return values
- .filter(isIdentToken)
- .map(function (token) { return token.value; })
- .join(' ');
- })
- .map(parseBackgroundRepeat);
- }
- };
- var parseBackgroundRepeat = function (value) {
- switch (value) {
- case 'no-repeat':
- return 1 /* NO_REPEAT */;
- case 'repeat-x':
- case 'repeat no-repeat':
- return 2 /* REPEAT_X */;
- case 'repeat-y':
- case 'no-repeat repeat':
- return 3 /* REPEAT_Y */;
- case 'repeat':
- default:
- return 0 /* REPEAT */;
- }
- };
-
- var BACKGROUND_SIZE;
- (function (BACKGROUND_SIZE) {
- BACKGROUND_SIZE["AUTO"] = "auto";
- BACKGROUND_SIZE["CONTAIN"] = "contain";
- BACKGROUND_SIZE["COVER"] = "cover";
- })(BACKGROUND_SIZE || (BACKGROUND_SIZE = {}));
- var backgroundSize = {
- name: 'background-size',
- initialValue: '0',
- prefix: false,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- return parseFunctionArgs(tokens).map(function (values) { return values.filter(isBackgroundSizeInfoToken); });
- }
- };
- var isBackgroundSizeInfoToken = function (value) {
- return isIdentToken(value) || isLengthPercentage(value);
- };
-
- var borderColorForSide = function (side) { return ({
- name: "border-" + side + "-color",
- initialValue: 'transparent',
- prefix: false,
- type: 3 /* TYPE_VALUE */,
- format: 'color'
- }); };
- var borderTopColor = borderColorForSide('top');
- var borderRightColor = borderColorForSide('right');
- var borderBottomColor = borderColorForSide('bottom');
- var borderLeftColor = borderColorForSide('left');
-
- var borderRadiusForSide = function (side) { return ({
- name: "border-radius-" + side,
- initialValue: '0 0',
- prefix: false,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- return parseLengthPercentageTuple(tokens.filter(isLengthPercentage));
- }
- }); };
- var borderTopLeftRadius = borderRadiusForSide('top-left');
- var borderTopRightRadius = borderRadiusForSide('top-right');
- var borderBottomRightRadius = borderRadiusForSide('bottom-right');
- var borderBottomLeftRadius = borderRadiusForSide('bottom-left');
-
- var borderStyleForSide = function (side) { return ({
- name: "border-" + side + "-style",
- initialValue: 'solid',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, style) {
- switch (style) {
- case 'none':
- return 0 /* NONE */;
- case 'dashed':
- return 2 /* DASHED */;
- case 'dotted':
- return 3 /* DOTTED */;
- case 'double':
- return 4 /* DOUBLE */;
- }
- return 1 /* SOLID */;
- }
- }); };
- var borderTopStyle = borderStyleForSide('top');
- var borderRightStyle = borderStyleForSide('right');
- var borderBottomStyle = borderStyleForSide('bottom');
- var borderLeftStyle = borderStyleForSide('left');
-
- var borderWidthForSide = function (side) { return ({
- name: "border-" + side + "-width",
- initialValue: '0',
- type: 0 /* VALUE */,
- prefix: false,
- parse: function (_context, token) {
- if (isDimensionToken(token)) {
- return token.number;
- }
- return 0;
- }
- }); };
- var borderTopWidth = borderWidthForSide('top');
- var borderRightWidth = borderWidthForSide('right');
- var borderBottomWidth = borderWidthForSide('bottom');
- var borderLeftWidth = borderWidthForSide('left');
-
- var color = {
- name: "color",
- initialValue: 'transparent',
- prefix: false,
- type: 3 /* TYPE_VALUE */,
- format: 'color'
- };
-
- var direction = {
- name: 'direction',
- initialValue: 'ltr',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, direction) {
- switch (direction) {
- case 'rtl':
- return 1 /* RTL */;
- case 'ltr':
- default:
- return 0 /* LTR */;
- }
- }
- };
-
- var display = {
- name: 'display',
- initialValue: 'inline-block',
- prefix: false,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- return tokens.filter(isIdentToken).reduce(function (bit, token) {
- return bit | parseDisplayValue(token.value);
- }, 0 /* NONE */);
- }
- };
- var parseDisplayValue = function (display) {
- switch (display) {
- case 'block':
- case '-webkit-box':
- return 2 /* BLOCK */;
- case 'inline':
- return 4 /* INLINE */;
- case 'run-in':
- return 8 /* RUN_IN */;
- case 'flow':
- return 16 /* FLOW */;
- case 'flow-root':
- return 32 /* FLOW_ROOT */;
- case 'table':
- return 64 /* TABLE */;
- case 'flex':
- case '-webkit-flex':
- return 128 /* FLEX */;
- case 'grid':
- case '-ms-grid':
- return 256 /* GRID */;
- case 'ruby':
- return 512 /* RUBY */;
- case 'subgrid':
- return 1024 /* SUBGRID */;
- case 'list-item':
- return 2048 /* LIST_ITEM */;
- case 'table-row-group':
- return 4096 /* TABLE_ROW_GROUP */;
- case 'table-header-group':
- return 8192 /* TABLE_HEADER_GROUP */;
- case 'table-footer-group':
- return 16384 /* TABLE_FOOTER_GROUP */;
- case 'table-row':
- return 32768 /* TABLE_ROW */;
- case 'table-cell':
- return 65536 /* TABLE_CELL */;
- case 'table-column-group':
- return 131072 /* TABLE_COLUMN_GROUP */;
- case 'table-column':
- return 262144 /* TABLE_COLUMN */;
- case 'table-caption':
- return 524288 /* TABLE_CAPTION */;
- case 'ruby-base':
- return 1048576 /* RUBY_BASE */;
- case 'ruby-text':
- return 2097152 /* RUBY_TEXT */;
- case 'ruby-base-container':
- return 4194304 /* RUBY_BASE_CONTAINER */;
- case 'ruby-text-container':
- return 8388608 /* RUBY_TEXT_CONTAINER */;
- case 'contents':
- return 16777216 /* CONTENTS */;
- case 'inline-block':
- return 33554432 /* INLINE_BLOCK */;
- case 'inline-list-item':
- return 67108864 /* INLINE_LIST_ITEM */;
- case 'inline-table':
- return 134217728 /* INLINE_TABLE */;
- case 'inline-flex':
- return 268435456 /* INLINE_FLEX */;
- case 'inline-grid':
- return 536870912 /* INLINE_GRID */;
- }
- return 0 /* NONE */;
- };
-
- var float = {
- name: 'float',
- initialValue: 'none',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, float) {
- switch (float) {
- case 'left':
- return 1 /* LEFT */;
- case 'right':
- return 2 /* RIGHT */;
- case 'inline-start':
- return 3 /* INLINE_START */;
- case 'inline-end':
- return 4 /* INLINE_END */;
- }
- return 0 /* NONE */;
- }
- };
-
- var letterSpacing = {
- name: 'letter-spacing',
- initialValue: '0',
- prefix: false,
- type: 0 /* VALUE */,
- parse: function (_context, token) {
- if (token.type === 20 /* IDENT_TOKEN */ && token.value === 'normal') {
- return 0;
- }
- if (token.type === 17 /* NUMBER_TOKEN */) {
- return token.number;
- }
- if (token.type === 15 /* DIMENSION_TOKEN */) {
- return token.number;
- }
- return 0;
- }
- };
-
- var LINE_BREAK;
- (function (LINE_BREAK) {
- LINE_BREAK["NORMAL"] = "normal";
- LINE_BREAK["STRICT"] = "strict";
- })(LINE_BREAK || (LINE_BREAK = {}));
- var lineBreak = {
- name: 'line-break',
- initialValue: 'normal',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, lineBreak) {
- switch (lineBreak) {
- case 'strict':
- return LINE_BREAK.STRICT;
- case 'normal':
- default:
- return LINE_BREAK.NORMAL;
- }
- }
- };
-
- var lineHeight = {
- name: 'line-height',
- initialValue: 'normal',
- prefix: false,
- type: 4 /* TOKEN_VALUE */
- };
- var computeLineHeight = function (token, fontSize) {
- if (isIdentToken(token) && token.value === 'normal') {
- return 1.2 * fontSize;
- }
- else if (token.type === 17 /* NUMBER_TOKEN */) {
- return fontSize * token.number;
- }
- else if (isLengthPercentage(token)) {
- return getAbsoluteValue(token, fontSize);
- }
- return fontSize;
- };
-
- var listStyleImage = {
- name: 'list-style-image',
- initialValue: 'none',
- type: 0 /* VALUE */,
- prefix: false,
- parse: function (context, token) {
- if (token.type === 20 /* IDENT_TOKEN */ && token.value === 'none') {
- return null;
- }
- return image.parse(context, token);
- }
- };
-
- var listStylePosition = {
- name: 'list-style-position',
- initialValue: 'outside',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, position) {
- switch (position) {
- case 'inside':
- return 0 /* INSIDE */;
- case 'outside':
- default:
- return 1 /* OUTSIDE */;
- }
- }
- };
-
- var listStyleType = {
- name: 'list-style-type',
- initialValue: 'none',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, type) {
- switch (type) {
- case 'disc':
- return 0 /* DISC */;
- case 'circle':
- return 1 /* CIRCLE */;
- case 'square':
- return 2 /* SQUARE */;
- case 'decimal':
- return 3 /* DECIMAL */;
- case 'cjk-decimal':
- return 4 /* CJK_DECIMAL */;
- case 'decimal-leading-zero':
- return 5 /* DECIMAL_LEADING_ZERO */;
- case 'lower-roman':
- return 6 /* LOWER_ROMAN */;
- case 'upper-roman':
- return 7 /* UPPER_ROMAN */;
- case 'lower-greek':
- return 8 /* LOWER_GREEK */;
- case 'lower-alpha':
- return 9 /* LOWER_ALPHA */;
- case 'upper-alpha':
- return 10 /* UPPER_ALPHA */;
- case 'arabic-indic':
- return 11 /* ARABIC_INDIC */;
- case 'armenian':
- return 12 /* ARMENIAN */;
- case 'bengali':
- return 13 /* BENGALI */;
- case 'cambodian':
- return 14 /* CAMBODIAN */;
- case 'cjk-earthly-branch':
- return 15 /* CJK_EARTHLY_BRANCH */;
- case 'cjk-heavenly-stem':
- return 16 /* CJK_HEAVENLY_STEM */;
- case 'cjk-ideographic':
- return 17 /* CJK_IDEOGRAPHIC */;
- case 'devanagari':
- return 18 /* DEVANAGARI */;
- case 'ethiopic-numeric':
- return 19 /* ETHIOPIC_NUMERIC */;
- case 'georgian':
- return 20 /* GEORGIAN */;
- case 'gujarati':
- return 21 /* GUJARATI */;
- case 'gurmukhi':
- return 22 /* GURMUKHI */;
- case 'hebrew':
- return 22 /* HEBREW */;
- case 'hiragana':
- return 23 /* HIRAGANA */;
- case 'hiragana-iroha':
- return 24 /* HIRAGANA_IROHA */;
- case 'japanese-formal':
- return 25 /* JAPANESE_FORMAL */;
- case 'japanese-informal':
- return 26 /* JAPANESE_INFORMAL */;
- case 'kannada':
- return 27 /* KANNADA */;
- case 'katakana':
- return 28 /* KATAKANA */;
- case 'katakana-iroha':
- return 29 /* KATAKANA_IROHA */;
- case 'khmer':
- return 30 /* KHMER */;
- case 'korean-hangul-formal':
- return 31 /* KOREAN_HANGUL_FORMAL */;
- case 'korean-hanja-formal':
- return 32 /* KOREAN_HANJA_FORMAL */;
- case 'korean-hanja-informal':
- return 33 /* KOREAN_HANJA_INFORMAL */;
- case 'lao':
- return 34 /* LAO */;
- case 'lower-armenian':
- return 35 /* LOWER_ARMENIAN */;
- case 'malayalam':
- return 36 /* MALAYALAM */;
- case 'mongolian':
- return 37 /* MONGOLIAN */;
- case 'myanmar':
- return 38 /* MYANMAR */;
- case 'oriya':
- return 39 /* ORIYA */;
- case 'persian':
- return 40 /* PERSIAN */;
- case 'simp-chinese-formal':
- return 41 /* SIMP_CHINESE_FORMAL */;
- case 'simp-chinese-informal':
- return 42 /* SIMP_CHINESE_INFORMAL */;
- case 'tamil':
- return 43 /* TAMIL */;
- case 'telugu':
- return 44 /* TELUGU */;
- case 'thai':
- return 45 /* THAI */;
- case 'tibetan':
- return 46 /* TIBETAN */;
- case 'trad-chinese-formal':
- return 47 /* TRAD_CHINESE_FORMAL */;
- case 'trad-chinese-informal':
- return 48 /* TRAD_CHINESE_INFORMAL */;
- case 'upper-armenian':
- return 49 /* UPPER_ARMENIAN */;
- case 'disclosure-open':
- return 50 /* DISCLOSURE_OPEN */;
- case 'disclosure-closed':
- return 51 /* DISCLOSURE_CLOSED */;
- case 'none':
- default:
- return -1 /* NONE */;
- }
- }
- };
-
- var marginForSide = function (side) { return ({
- name: "margin-" + side,
- initialValue: '0',
- prefix: false,
- type: 4 /* TOKEN_VALUE */
- }); };
- var marginTop = marginForSide('top');
- var marginRight = marginForSide('right');
- var marginBottom = marginForSide('bottom');
- var marginLeft = marginForSide('left');
-
- var overflow = {
- name: 'overflow',
- initialValue: 'visible',
- prefix: false,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- return tokens.filter(isIdentToken).map(function (overflow) {
- switch (overflow.value) {
- case 'hidden':
- return 1 /* HIDDEN */;
- case 'scroll':
- return 2 /* SCROLL */;
- case 'clip':
- return 3 /* CLIP */;
- case 'auto':
- return 4 /* AUTO */;
- case 'visible':
- default:
- return 0 /* VISIBLE */;
- }
- });
- }
- };
-
- var overflowWrap = {
- name: 'overflow-wrap',
- initialValue: 'normal',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, overflow) {
- switch (overflow) {
- case 'break-word':
- return "break-word" /* BREAK_WORD */;
- case 'normal':
- default:
- return "normal" /* NORMAL */;
- }
- }
- };
-
- var paddingForSide = function (side) { return ({
- name: "padding-" + side,
- initialValue: '0',
- prefix: false,
- type: 3 /* TYPE_VALUE */,
- format: 'length-percentage'
- }); };
- var paddingTop = paddingForSide('top');
- var paddingRight = paddingForSide('right');
- var paddingBottom = paddingForSide('bottom');
- var paddingLeft = paddingForSide('left');
-
- var textAlign = {
- name: 'text-align',
- initialValue: 'left',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, textAlign) {
- switch (textAlign) {
- case 'right':
- return 2 /* RIGHT */;
- case 'center':
- case 'justify':
- return 1 /* CENTER */;
- case 'left':
- default:
- return 0 /* LEFT */;
- }
- }
- };
-
- var position = {
- name: 'position',
- initialValue: 'static',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, position) {
- switch (position) {
- case 'relative':
- return 1 /* RELATIVE */;
- case 'absolute':
- return 2 /* ABSOLUTE */;
- case 'fixed':
- return 3 /* FIXED */;
- case 'sticky':
- return 4 /* STICKY */;
- }
- return 0 /* STATIC */;
- }
- };
-
- var textShadow = {
- name: 'text-shadow',
- initialValue: 'none',
- type: 1 /* LIST */,
- prefix: false,
- parse: function (context, tokens) {
- if (tokens.length === 1 && isIdentWithValue(tokens[0], 'none')) {
- return [];
- }
- return parseFunctionArgs(tokens).map(function (values) {
- var shadow = {
- color: COLORS.TRANSPARENT,
- offsetX: ZERO_LENGTH,
- offsetY: ZERO_LENGTH,
- blur: ZERO_LENGTH
- };
- var c = 0;
- for (var i = 0; i < values.length; i++) {
- var token = values[i];
- if (isLength(token)) {
- if (c === 0) {
- shadow.offsetX = token;
- }
- else if (c === 1) {
- shadow.offsetY = token;
- }
- else {
- shadow.blur = token;
- }
- c++;
- }
- else {
- shadow.color = color$1.parse(context, token);
- }
- }
- return shadow;
- });
- }
- };
-
- var textTransform = {
- name: 'text-transform',
- initialValue: 'none',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, textTransform) {
- switch (textTransform) {
- case 'uppercase':
- return 2 /* UPPERCASE */;
- case 'lowercase':
- return 1 /* LOWERCASE */;
- case 'capitalize':
- return 3 /* CAPITALIZE */;
- }
- return 0 /* NONE */;
- }
- };
-
- var transform$1 = {
- name: 'transform',
- initialValue: 'none',
- prefix: true,
- type: 0 /* VALUE */,
- parse: function (_context, token) {
- if (token.type === 20 /* IDENT_TOKEN */ && token.value === 'none') {
- return null;
- }
- if (token.type === 18 /* FUNCTION */) {
- var transformFunction = SUPPORTED_TRANSFORM_FUNCTIONS[token.name];
- if (typeof transformFunction === 'undefined') {
- throw new Error("Attempting to parse an unsupported transform function \"" + token.name + "\"");
- }
- return transformFunction(token.values);
- }
- return null;
- }
- };
- var matrix = function (args) {
- var values = args.filter(function (arg) { return arg.type === 17 /* NUMBER_TOKEN */; }).map(function (arg) { return arg.number; });
- return values.length === 6 ? values : null;
- };
- // doesn't support 3D transforms at the moment
- var matrix3d = function (args) {
- var values = args.filter(function (arg) { return arg.type === 17 /* NUMBER_TOKEN */; }).map(function (arg) { return arg.number; });
- var a1 = values[0], b1 = values[1]; values[2]; values[3]; var a2 = values[4], b2 = values[5]; values[6]; values[7]; values[8]; values[9]; values[10]; values[11]; var a4 = values[12], b4 = values[13]; values[14]; values[15];
- return values.length === 16 ? [a1, b1, a2, b2, a4, b4] : null;
- };
- var SUPPORTED_TRANSFORM_FUNCTIONS = {
- matrix: matrix,
- matrix3d: matrix3d
- };
-
- var DEFAULT_VALUE = {
- type: 16 /* PERCENTAGE_TOKEN */,
- number: 50,
- flags: FLAG_INTEGER
- };
- var DEFAULT = [DEFAULT_VALUE, DEFAULT_VALUE];
- var transformOrigin = {
- name: 'transform-origin',
- initialValue: '50% 50%',
- prefix: true,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- var origins = tokens.filter(isLengthPercentage);
- if (origins.length !== 2) {
- return DEFAULT;
- }
- return [origins[0], origins[1]];
- }
- };
-
- var visibility = {
- name: 'visible',
- initialValue: 'none',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, visibility) {
- switch (visibility) {
- case 'hidden':
- return 1 /* HIDDEN */;
- case 'collapse':
- return 2 /* COLLAPSE */;
- case 'visible':
- default:
- return 0 /* VISIBLE */;
- }
- }
- };
-
- var WORD_BREAK;
- (function (WORD_BREAK) {
- WORD_BREAK["NORMAL"] = "normal";
- WORD_BREAK["BREAK_ALL"] = "break-all";
- WORD_BREAK["KEEP_ALL"] = "keep-all";
- })(WORD_BREAK || (WORD_BREAK = {}));
- var wordBreak = {
- name: 'word-break',
- initialValue: 'normal',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, wordBreak) {
- switch (wordBreak) {
- case 'break-all':
- return WORD_BREAK.BREAK_ALL;
- case 'keep-all':
- return WORD_BREAK.KEEP_ALL;
- case 'normal':
- default:
- return WORD_BREAK.NORMAL;
- }
- }
- };
-
- var zIndex = {
- name: 'z-index',
- initialValue: 'auto',
- prefix: false,
- type: 0 /* VALUE */,
- parse: function (_context, token) {
- if (token.type === 20 /* IDENT_TOKEN */) {
- return { auto: true, order: 0 };
- }
- if (isNumberToken(token)) {
- return { auto: false, order: token.number };
- }
- throw new Error("Invalid z-index number parsed");
- }
- };
-
- var time = {
- name: 'time',
- parse: function (_context, value) {
- if (value.type === 15 /* DIMENSION_TOKEN */) {
- switch (value.unit.toLowerCase()) {
- case 's':
- return 1000 * value.number;
- case 'ms':
- return value.number;
- }
- }
- throw new Error("Unsupported time type");
- }
- };
-
- var opacity = {
- name: 'opacity',
- initialValue: '1',
- type: 0 /* VALUE */,
- prefix: false,
- parse: function (_context, token) {
- if (isNumberToken(token)) {
- return token.number;
- }
- return 1;
- }
- };
-
- var textDecorationColor = {
- name: "text-decoration-color",
- initialValue: 'transparent',
- prefix: false,
- type: 3 /* TYPE_VALUE */,
- format: 'color'
- };
-
- var textDecorationLine = {
- name: 'text-decoration-line',
- initialValue: 'none',
- prefix: false,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- return tokens
- .filter(isIdentToken)
- .map(function (token) {
- switch (token.value) {
- case 'underline':
- return 1 /* UNDERLINE */;
- case 'overline':
- return 2 /* OVERLINE */;
- case 'line-through':
- return 3 /* LINE_THROUGH */;
- case 'none':
- return 4 /* BLINK */;
- }
- return 0 /* NONE */;
- })
- .filter(function (line) { return line !== 0 /* NONE */; });
- }
- };
-
- var fontFamily = {
- name: "font-family",
- initialValue: '',
- prefix: false,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- var accumulator = [];
- var results = [];
- tokens.forEach(function (token) {
- switch (token.type) {
- case 20 /* IDENT_TOKEN */:
- case 0 /* STRING_TOKEN */:
- accumulator.push(token.value);
- break;
- case 17 /* NUMBER_TOKEN */:
- accumulator.push(token.number.toString());
- break;
- case 4 /* COMMA_TOKEN */:
- results.push(accumulator.join(' '));
- accumulator.length = 0;
- break;
- }
- });
- if (accumulator.length) {
- results.push(accumulator.join(' '));
- }
- return results.map(function (result) { return (result.indexOf(' ') === -1 ? result : "'" + result + "'"); });
- }
- };
-
- var fontSize = {
- name: "font-size",
- initialValue: '0',
- prefix: false,
- type: 3 /* TYPE_VALUE */,
- format: 'length'
- };
-
- var fontWeight = {
- name: 'font-weight',
- initialValue: 'normal',
- type: 0 /* VALUE */,
- prefix: false,
- parse: function (_context, token) {
- if (isNumberToken(token)) {
- return token.number;
- }
- if (isIdentToken(token)) {
- switch (token.value) {
- case 'bold':
- return 700;
- case 'normal':
- default:
- return 400;
- }
- }
- return 400;
- }
- };
-
- var fontVariant = {
- name: 'font-variant',
- initialValue: 'none',
- type: 1 /* LIST */,
- prefix: false,
- parse: function (_context, tokens) {
- return tokens.filter(isIdentToken).map(function (token) { return token.value; });
- }
- };
-
- var fontStyle = {
- name: 'font-style',
- initialValue: 'normal',
- prefix: false,
- type: 2 /* IDENT_VALUE */,
- parse: function (_context, overflow) {
- switch (overflow) {
- case 'oblique':
- return "oblique" /* OBLIQUE */;
- case 'italic':
- return "italic" /* ITALIC */;
- case 'normal':
- default:
- return "normal" /* NORMAL */;
- }
- }
- };
-
- var contains = function (bit, value) { return (bit & value) !== 0; };
-
- var content = {
- name: 'content',
- initialValue: 'none',
- type: 1 /* LIST */,
- prefix: false,
- parse: function (_context, tokens) {
- if (tokens.length === 0) {
- return [];
- }
- var first = tokens[0];
- if (first.type === 20 /* IDENT_TOKEN */ && first.value === 'none') {
- return [];
- }
- return tokens;
- }
- };
-
- var counterIncrement = {
- name: 'counter-increment',
- initialValue: 'none',
- prefix: true,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- if (tokens.length === 0) {
- return null;
- }
- var first = tokens[0];
- if (first.type === 20 /* IDENT_TOKEN */ && first.value === 'none') {
- return null;
- }
- var increments = [];
- var filtered = tokens.filter(nonWhiteSpace);
- for (var i = 0; i < filtered.length; i++) {
- var counter = filtered[i];
- var next = filtered[i + 1];
- if (counter.type === 20 /* IDENT_TOKEN */) {
- var increment = next && isNumberToken(next) ? next.number : 1;
- increments.push({ counter: counter.value, increment: increment });
- }
- }
- return increments;
- }
- };
-
- var counterReset = {
- name: 'counter-reset',
- initialValue: 'none',
- prefix: true,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- if (tokens.length === 0) {
- return [];
- }
- var resets = [];
- var filtered = tokens.filter(nonWhiteSpace);
- for (var i = 0; i < filtered.length; i++) {
- var counter = filtered[i];
- var next = filtered[i + 1];
- if (isIdentToken(counter) && counter.value !== 'none') {
- var reset = next && isNumberToken(next) ? next.number : 0;
- resets.push({ counter: counter.value, reset: reset });
- }
- }
- return resets;
- }
- };
-
- var duration = {
- name: 'duration',
- initialValue: '0s',
- prefix: false,
- type: 1 /* LIST */,
- parse: function (context, tokens) {
- return tokens.filter(isDimensionToken).map(function (token) { return time.parse(context, token); });
- }
- };
-
- var quotes = {
- name: 'quotes',
- initialValue: 'none',
- prefix: true,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- if (tokens.length === 0) {
- return null;
- }
- var first = tokens[0];
- if (first.type === 20 /* IDENT_TOKEN */ && first.value === 'none') {
- return null;
- }
- var quotes = [];
- var filtered = tokens.filter(isStringToken);
- if (filtered.length % 2 !== 0) {
- return null;
- }
- for (var i = 0; i < filtered.length; i += 2) {
- var open_1 = filtered[i].value;
- var close_1 = filtered[i + 1].value;
- quotes.push({ open: open_1, close: close_1 });
- }
- return quotes;
- }
- };
- var getQuote = function (quotes, depth, open) {
- if (!quotes) {
- return '';
- }
- var quote = quotes[Math.min(depth, quotes.length - 1)];
- if (!quote) {
- return '';
- }
- return open ? quote.open : quote.close;
- };
-
- var paintOrder = {
- name: 'paint-order',
- initialValue: 'normal',
- prefix: false,
- type: 1 /* LIST */,
- parse: function (_context, tokens) {
- var DEFAULT_VALUE = [0 /* FILL */, 1 /* STROKE */, 2 /* MARKERS */];
- var layers = [];
- tokens.filter(isIdentToken).forEach(function (token) {
- switch (token.value) {
- case 'stroke':
- layers.push(1 /* STROKE */);
- break;
- case 'fill':
- layers.push(0 /* FILL */);
- break;
- case 'markers':
- layers.push(2 /* MARKERS */);
- break;
- }
- });
- DEFAULT_VALUE.forEach(function (value) {
- if (layers.indexOf(value) === -1) {
- layers.push(value);
- }
- });
- return layers;
- }
- };
-
- var webkitTextStrokeColor = {
- name: "-webkit-text-stroke-color",
- initialValue: 'currentcolor',
- prefix: false,
- type: 3 /* TYPE_VALUE */,
- format: 'color'
- };
-
- var webkitTextStrokeWidth = {
- name: "-webkit-text-stroke-width",
- initialValue: '0',
- type: 0 /* VALUE */,
- prefix: false,
- parse: function (_context, token) {
- if (isDimensionToken(token)) {
- return token.number;
- }
- return 0;
- }
- };
-
- var CSSParsedDeclaration = /** @class */ (function () {
- function CSSParsedDeclaration(context, declaration) {
- var _a, _b;
- this.animationDuration = parse(context, duration, declaration.animationDuration);
- this.backgroundClip = parse(context, backgroundClip, declaration.backgroundClip);
- this.backgroundColor = parse(context, backgroundColor, declaration.backgroundColor);
- this.backgroundImage = parse(context, backgroundImage, declaration.backgroundImage);
- this.backgroundOrigin = parse(context, backgroundOrigin, declaration.backgroundOrigin);
- this.backgroundPosition = parse(context, backgroundPosition, declaration.backgroundPosition);
- this.backgroundRepeat = parse(context, backgroundRepeat, declaration.backgroundRepeat);
- this.backgroundSize = parse(context, backgroundSize, declaration.backgroundSize);
- this.borderTopColor = parse(context, borderTopColor, declaration.borderTopColor);
- this.borderRightColor = parse(context, borderRightColor, declaration.borderRightColor);
- this.borderBottomColor = parse(context, borderBottomColor, declaration.borderBottomColor);
- this.borderLeftColor = parse(context, borderLeftColor, declaration.borderLeftColor);
- this.borderTopLeftRadius = parse(context, borderTopLeftRadius, declaration.borderTopLeftRadius);
- this.borderTopRightRadius = parse(context, borderTopRightRadius, declaration.borderTopRightRadius);
- this.borderBottomRightRadius = parse(context, borderBottomRightRadius, declaration.borderBottomRightRadius);
- this.borderBottomLeftRadius = parse(context, borderBottomLeftRadius, declaration.borderBottomLeftRadius);
- this.borderTopStyle = parse(context, borderTopStyle, declaration.borderTopStyle);
- this.borderRightStyle = parse(context, borderRightStyle, declaration.borderRightStyle);
- this.borderBottomStyle = parse(context, borderBottomStyle, declaration.borderBottomStyle);
- this.borderLeftStyle = parse(context, borderLeftStyle, declaration.borderLeftStyle);
- this.borderTopWidth = parse(context, borderTopWidth, declaration.borderTopWidth);
- this.borderRightWidth = parse(context, borderRightWidth, declaration.borderRightWidth);
- this.borderBottomWidth = parse(context, borderBottomWidth, declaration.borderBottomWidth);
- this.borderLeftWidth = parse(context, borderLeftWidth, declaration.borderLeftWidth);
- this.color = parse(context, color, declaration.color);
- this.direction = parse(context, direction, declaration.direction);
- this.display = parse(context, display, declaration.display);
- this.float = parse(context, float, declaration.cssFloat);
- this.fontFamily = parse(context, fontFamily, declaration.fontFamily);
- this.fontSize = parse(context, fontSize, declaration.fontSize);
- this.fontStyle = parse(context, fontStyle, declaration.fontStyle);
- this.fontVariant = parse(context, fontVariant, declaration.fontVariant);
- this.fontWeight = parse(context, fontWeight, declaration.fontWeight);
- this.letterSpacing = parse(context, letterSpacing, declaration.letterSpacing);
- this.lineBreak = parse(context, lineBreak, declaration.lineBreak);
- this.lineHeight = parse(context, lineHeight, declaration.lineHeight);
- this.listStyleImage = parse(context, listStyleImage, declaration.listStyleImage);
- this.listStylePosition = parse(context, listStylePosition, declaration.listStylePosition);
- this.listStyleType = parse(context, listStyleType, declaration.listStyleType);
- this.marginTop = parse(context, marginTop, declaration.marginTop);
- this.marginRight = parse(context, marginRight, declaration.marginRight);
- this.marginBottom = parse(context, marginBottom, declaration.marginBottom);
- this.marginLeft = parse(context, marginLeft, declaration.marginLeft);
- this.opacity = parse(context, opacity, declaration.opacity);
- var overflowTuple = parse(context, overflow, declaration.overflow);
- this.overflowX = overflowTuple[0];
- this.overflowY = overflowTuple[overflowTuple.length > 1 ? 1 : 0];
- this.overflowWrap = parse(context, overflowWrap, declaration.overflowWrap);
- this.paddingTop = parse(context, paddingTop, declaration.paddingTop);
- this.paddingRight = parse(context, paddingRight, declaration.paddingRight);
- this.paddingBottom = parse(context, paddingBottom, declaration.paddingBottom);
- this.paddingLeft = parse(context, paddingLeft, declaration.paddingLeft);
- this.paintOrder = parse(context, paintOrder, declaration.paintOrder);
- this.position = parse(context, position, declaration.position);
- this.textAlign = parse(context, textAlign, declaration.textAlign);
- this.textDecorationColor = parse(context, textDecorationColor, (_a = declaration.textDecorationColor) !== null && _a !== void 0 ? _a : declaration.color);
- this.textDecorationLine = parse(context, textDecorationLine, (_b = declaration.textDecorationLine) !== null && _b !== void 0 ? _b : declaration.textDecoration);
- this.textShadow = parse(context, textShadow, declaration.textShadow);
- this.textTransform = parse(context, textTransform, declaration.textTransform);
- this.transform = parse(context, transform$1, declaration.transform);
- this.transformOrigin = parse(context, transformOrigin, declaration.transformOrigin);
- this.visibility = parse(context, visibility, declaration.visibility);
- this.webkitTextStrokeColor = parse(context, webkitTextStrokeColor, declaration.webkitTextStrokeColor);
- this.webkitTextStrokeWidth = parse(context, webkitTextStrokeWidth, declaration.webkitTextStrokeWidth);
- this.wordBreak = parse(context, wordBreak, declaration.wordBreak);
- this.zIndex = parse(context, zIndex, declaration.zIndex);
- }
- CSSParsedDeclaration.prototype.isVisible = function () {
- return this.display > 0 && this.opacity > 0 && this.visibility === 0 /* VISIBLE */;
- };
- CSSParsedDeclaration.prototype.isTransparent = function () {
- return isTransparent(this.backgroundColor);
- };
- CSSParsedDeclaration.prototype.isTransformed = function () {
- return this.transform !== null;
- };
- CSSParsedDeclaration.prototype.isPositioned = function () {
- return this.position !== 0 /* STATIC */;
- };
- CSSParsedDeclaration.prototype.isPositionedWithZIndex = function () {
- return this.isPositioned() && !this.zIndex.auto;
- };
- CSSParsedDeclaration.prototype.isFloating = function () {
- return this.float !== 0 /* NONE */;
- };
- CSSParsedDeclaration.prototype.isInlineLevel = function () {
- return (contains(this.display, 4 /* INLINE */) ||
- contains(this.display, 33554432 /* INLINE_BLOCK */) ||
- contains(this.display, 268435456 /* INLINE_FLEX */) ||
- contains(this.display, 536870912 /* INLINE_GRID */) ||
- contains(this.display, 67108864 /* INLINE_LIST_ITEM */) ||
- contains(this.display, 134217728 /* INLINE_TABLE */));
- };
- return CSSParsedDeclaration;
- }());
- var CSSParsedPseudoDeclaration = /** @class */ (function () {
- function CSSParsedPseudoDeclaration(context, declaration) {
- this.content = parse(context, content, declaration.content);
- this.quotes = parse(context, quotes, declaration.quotes);
- }
- return CSSParsedPseudoDeclaration;
- }());
- var CSSParsedCounterDeclaration = /** @class */ (function () {
- function CSSParsedCounterDeclaration(context, declaration) {
- this.counterIncrement = parse(context, counterIncrement, declaration.counterIncrement);
- this.counterReset = parse(context, counterReset, declaration.counterReset);
- }
- return CSSParsedCounterDeclaration;
- }());
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- var parse = function (context, descriptor, style) {
- var tokenizer = new Tokenizer();
- var value = style !== null && typeof style !== 'undefined' ? style.toString() : descriptor.initialValue;
- tokenizer.write(value);
- var parser = new Parser(tokenizer.read());
- switch (descriptor.type) {
- case 2 /* IDENT_VALUE */:
- var token = parser.parseComponentValue();
- return descriptor.parse(context, isIdentToken(token) ? token.value : descriptor.initialValue);
- case 0 /* VALUE */:
- return descriptor.parse(context, parser.parseComponentValue());
- case 1 /* LIST */:
- return descriptor.parse(context, parser.parseComponentValues());
- case 4 /* TOKEN_VALUE */:
- return parser.parseComponentValue();
- case 3 /* TYPE_VALUE */:
- switch (descriptor.format) {
- case 'angle':
- return angle.parse(context, parser.parseComponentValue());
- case 'color':
- return color$1.parse(context, parser.parseComponentValue());
- case 'image':
- return image.parse(context, parser.parseComponentValue());
- case 'length':
- var length_1 = parser.parseComponentValue();
- return isLength(length_1) ? length_1 : ZERO_LENGTH;
- case 'length-percentage':
- var value_1 = parser.parseComponentValue();
- return isLengthPercentage(value_1) ? value_1 : ZERO_LENGTH;
- case 'time':
- return time.parse(context, parser.parseComponentValue());
- }
- break;
- }
- };
-
- var elementDebuggerAttribute = 'data-html2canvas-debug';
- var getElementDebugType = function (element) {
- var attribute = element.getAttribute(elementDebuggerAttribute);
- switch (attribute) {
- case 'all':
- return 1 /* ALL */;
- case 'clone':
- return 2 /* CLONE */;
- case 'parse':
- return 3 /* PARSE */;
- case 'render':
- return 4 /* RENDER */;
- default:
- return 0 /* NONE */;
- }
- };
- var isDebugging = function (element, type) {
- var elementType = getElementDebugType(element);
- return elementType === 1 /* ALL */ || type === elementType;
- };
-
- var ElementContainer = /** @class */ (function () {
- function ElementContainer(context, element) {
- this.context = context;
- this.textNodes = [];
- this.elements = [];
- this.flags = 0;
- if (isDebugging(element, 3 /* PARSE */)) {
- debugger;
- }
- this.styles = new CSSParsedDeclaration(context, window.getComputedStyle(element, null));
- if (isHTMLElementNode(element)) {
- if (this.styles.animationDuration.some(function (duration) { return duration > 0; })) {
- element.style.animationDuration = '0s';
- }
- if (this.styles.transform !== null) {
- // getBoundingClientRect takes transforms into account
- element.style.transform = 'none';
- }
- }
- this.bounds = parseBounds(this.context, element);
- if (isDebugging(element, 4 /* RENDER */)) {
- this.flags |= 16 /* DEBUG_RENDER */;
- }
- }
- return ElementContainer;
- }());
-
- /*
- * text-segmentation 1.0.3
- * Copyright (c) 2022 Niklas von Hertzen
- * Released under MIT License
- */
- var base64 = 'AAAAAAAAAAAAEA4AGBkAAFAaAAACAAAAAAAIABAAGAAwADgACAAQAAgAEAAIABAACAAQAAgAEAAIABAACAAQAAgAEAAIABAAQABIAEQATAAIABAACAAQAAgAEAAIABAAVABcAAgAEAAIABAACAAQAGAAaABwAHgAgACIAI4AlgAIABAAmwCjAKgAsAC2AL4AvQDFAMoA0gBPAVYBWgEIAAgACACMANoAYgFkAWwBdAF8AX0BhQGNAZUBlgGeAaMBlQGWAasBswF8AbsBwwF0AcsBYwHTAQgA2wG/AOMBdAF8AekB8QF0AfkB+wHiAHQBfAEIAAMC5gQIAAsCEgIIAAgAFgIeAggAIgIpAggAMQI5AkACygEIAAgASAJQAlgCYAIIAAgACAAKBQoFCgUTBRMFGQUrBSsFCAAIAAgACAAIAAgACAAIAAgACABdAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABoAmgCrwGvAQgAbgJ2AggAHgEIAAgACADnAXsCCAAIAAgAgwIIAAgACAAIAAgACACKAggAkQKZAggAPADJAAgAoQKkAqwCsgK6AsICCADJAggA0AIIAAgACAAIANYC3gIIAAgACAAIAAgACABAAOYCCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAkASoB+QIEAAgACAA8AEMCCABCBQgACABJBVAFCAAIAAgACAAIAAgACAAIAAgACABTBVoFCAAIAFoFCABfBWUFCAAIAAgACAAIAAgAbQUIAAgACAAIAAgACABzBXsFfQWFBYoFigWKBZEFigWKBYoFmAWfBaYFrgWxBbkFCAAIAAgACAAIAAgACAAIAAgACAAIAMEFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAMgFCADQBQgACAAIAAgACAAIAAgACAAIAAgACAAIAO4CCAAIAAgAiQAIAAgACABAAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAD0AggACAD8AggACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIANYFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAMDvwAIAAgAJAIIAAgACAAIAAgACAAIAAgACwMTAwgACAB9BOsEGwMjAwgAKwMyAwsFYgE3A/MEPwMIAEUDTQNRAwgAWQOsAGEDCAAIAAgACAAIAAgACABpAzQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFIQUoBSwFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABtAwgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABMAEwACAAIAAgACAAIABgACAAIAAgACAC/AAgACAAyAQgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACACAAIAAwAAgACAAIAAgACAAIAAgACAAIAAAARABIAAgACAAIABQASAAIAAgAIABwAEAAjgCIABsAqAC2AL0AigDQAtwC+IJIQqVAZUBWQqVAZUBlQGVAZUBlQGrC5UBlQGVAZUBlQGVAZUBlQGVAXsKlQGVAbAK6wsrDGUMpQzlDJUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAfAKAAuZA64AtwCJALoC6ADwAAgAuACgA/oEpgO6AqsD+AAIAAgAswMIAAgACAAIAIkAuwP5AfsBwwPLAwgACAAIAAgACADRA9kDCAAIAOED6QMIAAgACAAIAAgACADuA/YDCAAIAP4DyQAIAAgABgQIAAgAXQAOBAgACAAIAAgACAAIABMECAAIAAgACAAIAAgACAD8AAQBCAAIAAgAGgQiBCoECAExBAgAEAEIAAgACAAIAAgACAAIAAgACAAIAAgACAA4BAgACABABEYECAAIAAgATAQYAQgAVAQIAAgACAAIAAgACAAIAAgACAAIAFoECAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAOQEIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAB+BAcACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAEABhgSMBAgACAAIAAgAlAQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAwAEAAQABAADAAMAAwADAAQABAAEAAQABAAEAAQABHATAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAdQMIAAgACAAIAAgACAAIAMkACAAIAAgAfQMIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACACFA4kDCAAIAAgACAAIAOcBCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAIcDCAAIAAgACAAIAAgACAAIAAgACAAIAJEDCAAIAAgACADFAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABgBAgAZgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAbAQCBXIECAAIAHkECAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABAAJwEQACjBKoEsgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAC6BMIECAAIAAgACAAIAAgACABmBAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAxwQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAGYECAAIAAgAzgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAigWKBYoFigWKBYoFigWKBd0FXwUIAOIF6gXxBYoF3gT5BQAGCAaKBYoFigWKBYoFigWKBYoFigWKBYoFigXWBIoFigWKBYoFigWKBYoFigWKBYsFEAaKBYoFigWKBYoFigWKBRQGCACKBYoFigWKBQgACAAIANEECAAIABgGigUgBggAJgYIAC4GMwaKBYoF0wQ3Bj4GigWKBYoFigWKBYoFigWKBYoFigWKBYoFigUIAAgACAAIAAgACAAIAAgAigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWLBf///////wQABAAEAAQABAAEAAQABAAEAAQAAwAEAAQAAgAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAQADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAUAAAAFAAUAAAAFAAUAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUAAQAAAAUABQAFAAUABQAFAAAAAAAFAAUAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAFAAUAAQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUABQAFAAAABwAHAAcAAAAHAAcABwAFAAEAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAcABwAFAAUAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAAAAQABAAAAAAAAAAAAAAAFAAUABQAFAAAABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABwAHAAcAAAAHAAcAAAAAAAUABQAHAAUAAQAHAAEABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABwABAAUABQAFAAUAAAAAAAAAAAAAAAEAAQABAAEAAQABAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABQANAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEAAQABAAEAAQABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAABQAHAAUABQAFAAAAAAAAAAcABQAFAAUABQAFAAQABAAEAAQABAAEAAQABAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUAAAAFAAUABQAFAAUAAAAFAAUABQAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAAAAAAAAAAAAUABQAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAUAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABwAHAAcABwAFAAcABwAAAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAUABwAHAAUABQAFAAUAAAAAAAcABwAAAAAABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAABQAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABwAHAAcABQAFAAAAAAAAAAAABQAFAAAAAAAFAAUABQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAFAAUABQAFAAUAAAAFAAUABwAAAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAFAAUABwAFAAUABQAFAAAAAAAHAAcAAAAAAAcABwAFAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABwAAAAAAAAAHAAcABwAAAAcABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAABQAHAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAcABwAAAAUABQAFAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABQAHAAcABQAHAAcAAAAFAAcABwAAAAcABwAFAAUAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAFAAcABwAFAAUABQAAAAUAAAAHAAcABwAHAAcABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAHAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABwAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAUAAAAFAAAAAAAAAAAABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUABQAFAAUAAAAFAAUAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABwAFAAUABQAFAAUABQAAAAUABQAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABQAFAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABQAFAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAHAAUABQAFAAUABQAFAAUABwAHAAcABwAHAAcABwAHAAUABwAHAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABwAHAAcABwAFAAUABwAHAAcAAAAAAAAAAAAHAAcABQAHAAcABwAHAAcABwAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAHAAUABQAFAAUABQAFAAUAAAAFAAAABQAAAAAABQAFAAUABQAFAAUABQAFAAcABwAHAAcABwAHAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAUABQAFAAUABQAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABwAFAAcABwAHAAcABwAFAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAUABQAFAAUABwAHAAUABQAHAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABQAFAAcABwAHAAUABwAFAAUABQAHAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAUABQAFAAUABQAFAAUABQAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAcABQAFAAUABQAFAAUABQAAAAAAAAAAAAUAAAAAAAAAAAAAAAAABQAAAAAABwAFAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUAAAAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAABQAAAAAAAAAFAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAUABQAHAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAHAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABwAFAAUABQAFAAcABwAFAAUABwAHAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAcABwAFAAUABwAHAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAFAAUABQAAAAAABQAFAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAFAAcABwAAAAAAAAAAAAAABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAFAAcABwAFAAcABwAAAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAFAAUABQAAAAUABQAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABwAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABQAFAAUABQAFAAUABQAFAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAHAAcABQAHAAUABQAAAAAAAAAAAAAAAAAFAAAABwAHAAcABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAcABwAAAAAABwAHAAAAAAAHAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABwAHAAUABQAFAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABQAFAAUABQAFAAUABwAFAAcABwAFAAcABQAFAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABQAFAAUABQAAAAAABwAHAAcABwAFAAUABwAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAHAAUABQAFAAUABQAFAAUABQAHAAcABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAFAAcABwAFAAUABQAFAAUABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAcABwAFAAUABQAFAAcABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABQAHAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAAAAAAFAAUABwAHAAcABwAFAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABwAHAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAHAAUABQAFAAUABQAFAAUABwAFAAUABwAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAAAAAAAABQAAAAUABQAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAHAAcAAAAFAAUAAAAHAAcABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAAAAAAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAUABQAFAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAABQAFAAUABQAFAAUABQAAAAUABQAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAFAAUABQAFAAUADgAOAA4ADgAOAA4ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAAAAAAAAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAMAAwADAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAAAAAAAAAAAAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAAAAAAAAAAAAsADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwACwAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAADgAOAA4AAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAAAA4ADgAOAA4ADgAOAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAA4AAAAOAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAADgAAAAAAAAAAAA4AAAAOAAAAAAAAAAAADgAOAA4AAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAA4ADgAOAA4ADgAOAA4ADgAOAAAADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4AAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAOAA4ADgAOAA4ADgAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAAAAAAA=';
-
- /*
- * utrie 1.0.2
- * Copyright (c) 2022 Niklas von Hertzen
- * Released under MIT License
- */
- var chars$1 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
- // Use a lookup table to find the index.
- var lookup$1 = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256);
- for (var i$1 = 0; i$1 < chars$1.length; i$1++) {
- lookup$1[chars$1.charCodeAt(i$1)] = i$1;
- }
- var decode = function (base64) {
- var bufferLength = base64.length * 0.75, len = base64.length, i, p = 0, encoded1, encoded2, encoded3, encoded4;
- if (base64[base64.length - 1] === '=') {
- bufferLength--;
- if (base64[base64.length - 2] === '=') {
- bufferLength--;
- }
- }
- var buffer = typeof ArrayBuffer !== 'undefined' &&
- typeof Uint8Array !== 'undefined' &&
- typeof Uint8Array.prototype.slice !== 'undefined'
- ? new ArrayBuffer(bufferLength)
- : new Array(bufferLength);
- var bytes = Array.isArray(buffer) ? buffer : new Uint8Array(buffer);
- for (i = 0; i < len; i += 4) {
- encoded1 = lookup$1[base64.charCodeAt(i)];
- encoded2 = lookup$1[base64.charCodeAt(i + 1)];
- encoded3 = lookup$1[base64.charCodeAt(i + 2)];
- encoded4 = lookup$1[base64.charCodeAt(i + 3)];
- bytes[p++] = (encoded1 << 2) | (encoded2 >> 4);
- bytes[p++] = ((encoded2 & 15) << 4) | (encoded3 >> 2);
- bytes[p++] = ((encoded3 & 3) << 6) | (encoded4 & 63);
- }
- return buffer;
- };
- var polyUint16Array = function (buffer) {
- var length = buffer.length;
- var bytes = [];
- for (var i = 0; i < length; i += 2) {
- bytes.push((buffer[i + 1] << 8) | buffer[i]);
- }
- return bytes;
- };
- var polyUint32Array = function (buffer) {
- var length = buffer.length;
- var bytes = [];
- for (var i = 0; i < length; i += 4) {
- bytes.push((buffer[i + 3] << 24) | (buffer[i + 2] << 16) | (buffer[i + 1] << 8) | buffer[i]);
- }
- return bytes;
- };
-
- /** Shift size for getting the index-2 table offset. */
- var UTRIE2_SHIFT_2 = 5;
- /** Shift size for getting the index-1 table offset. */
- var UTRIE2_SHIFT_1 = 6 + 5;
- /**
- * Shift size for shifting left the index array values.
- * Increases possible data size with 16-bit index values at the cost
- * of compactability.
- * This requires data blocks to be aligned by UTRIE2_DATA_GRANULARITY.
- */
- var UTRIE2_INDEX_SHIFT = 2;
- /**
- * Difference between the two shift sizes,
- * for getting an index-1 offset from an index-2 offset. 6=11-5
- */
- var UTRIE2_SHIFT_1_2 = UTRIE2_SHIFT_1 - UTRIE2_SHIFT_2;
- /**
- * The part of the index-2 table for U+D800..U+DBFF stores values for
- * lead surrogate code _units_ not code _points_.
- * Values for lead surrogate code _points_ are indexed with this portion of the table.
- * Length=32=0x20=0x400>>UTRIE2_SHIFT_2. (There are 1024=0x400 lead surrogates.)
- */
- var UTRIE2_LSCP_INDEX_2_OFFSET = 0x10000 >> UTRIE2_SHIFT_2;
- /** Number of entries in a data block. 32=0x20 */
- var UTRIE2_DATA_BLOCK_LENGTH = 1 << UTRIE2_SHIFT_2;
- /** Mask for getting the lower bits for the in-data-block offset. */
- var UTRIE2_DATA_MASK = UTRIE2_DATA_BLOCK_LENGTH - 1;
- var UTRIE2_LSCP_INDEX_2_LENGTH = 0x400 >> UTRIE2_SHIFT_2;
- /** Count the lengths of both BMP pieces. 2080=0x820 */
- var UTRIE2_INDEX_2_BMP_LENGTH = UTRIE2_LSCP_INDEX_2_OFFSET + UTRIE2_LSCP_INDEX_2_LENGTH;
- /**
- * The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820.
- * Length 32=0x20 for lead bytes C0..DF, regardless of UTRIE2_SHIFT_2.
- */
- var UTRIE2_UTF8_2B_INDEX_2_OFFSET = UTRIE2_INDEX_2_BMP_LENGTH;
- var UTRIE2_UTF8_2B_INDEX_2_LENGTH = 0x800 >> 6; /* U+0800 is the first code point after 2-byte UTF-8 */
- /**
- * The index-1 table, only used for supplementary code points, at offset 2112=0x840.
- * Variable length, for code points up to highStart, where the last single-value range starts.
- * Maximum length 512=0x200=0x100000>>UTRIE2_SHIFT_1.
- * (For 0x100000 supplementary code points U+10000..U+10ffff.)
- *
- * The part of the index-2 table for supplementary code points starts
- * after this index-1 table.
- *
- * Both the index-1 table and the following part of the index-2 table
- * are omitted completely if there is only BMP data.
- */
- var UTRIE2_INDEX_1_OFFSET = UTRIE2_UTF8_2B_INDEX_2_OFFSET + UTRIE2_UTF8_2B_INDEX_2_LENGTH;
- /**
- * Number of index-1 entries for the BMP. 32=0x20
- * This part of the index-1 table is omitted from the serialized form.
- */
- var UTRIE2_OMITTED_BMP_INDEX_1_LENGTH = 0x10000 >> UTRIE2_SHIFT_1;
- /** Number of entries in an index-2 block. 64=0x40 */
- var UTRIE2_INDEX_2_BLOCK_LENGTH = 1 << UTRIE2_SHIFT_1_2;
- /** Mask for getting the lower bits for the in-index-2-block offset. */
- var UTRIE2_INDEX_2_MASK = UTRIE2_INDEX_2_BLOCK_LENGTH - 1;
- var slice16 = function (view, start, end) {
- if (view.slice) {
- return view.slice(start, end);
- }
- return new Uint16Array(Array.prototype.slice.call(view, start, end));
- };
- var slice32 = function (view, start, end) {
- if (view.slice) {
- return view.slice(start, end);
- }
- return new Uint32Array(Array.prototype.slice.call(view, start, end));
- };
- var createTrieFromBase64 = function (base64, _byteLength) {
- var buffer = decode(base64);
- var view32 = Array.isArray(buffer) ? polyUint32Array(buffer) : new Uint32Array(buffer);
- var view16 = Array.isArray(buffer) ? polyUint16Array(buffer) : new Uint16Array(buffer);
- var headerLength = 24;
- var index = slice16(view16, headerLength / 2, view32[4] / 2);
- var data = view32[5] === 2
- ? slice16(view16, (headerLength + view32[4]) / 2)
- : slice32(view32, Math.ceil((headerLength + view32[4]) / 4));
- return new Trie(view32[0], view32[1], view32[2], view32[3], index, data);
- };
- var Trie = /** @class */ (function () {
- function Trie(initialValue, errorValue, highStart, highValueIndex, index, data) {
- this.initialValue = initialValue;
- this.errorValue = errorValue;
- this.highStart = highStart;
- this.highValueIndex = highValueIndex;
- this.index = index;
- this.data = data;
- }
- /**
- * Get the value for a code point as stored in the Trie.
- *
- * @param codePoint the code point
- * @return the value
- */
- Trie.prototype.get = function (codePoint) {
- var ix;
- if (codePoint >= 0) {
- if (codePoint < 0x0d800 || (codePoint > 0x0dbff && codePoint <= 0x0ffff)) {
- // Ordinary BMP code point, excluding leading surrogates.
- // BMP uses a single level lookup. BMP index starts at offset 0 in the Trie2 index.
- // 16 bit data is stored in the index array itself.
- ix = this.index[codePoint >> UTRIE2_SHIFT_2];
- ix = (ix << UTRIE2_INDEX_SHIFT) + (codePoint & UTRIE2_DATA_MASK);
- return this.data[ix];
- }
- if (codePoint <= 0xffff) {
- // Lead Surrogate Code Point. A Separate index section is stored for
- // lead surrogate code units and code points.
- // The main index has the code unit data.
- // For this function, we need the code point data.
- // Note: this expression could be refactored for slightly improved efficiency, but
- // surrogate code points will be so rare in practice that it's not worth it.
- ix = this.index[UTRIE2_LSCP_INDEX_2_OFFSET + ((codePoint - 0xd800) >> UTRIE2_SHIFT_2)];
- ix = (ix << UTRIE2_INDEX_SHIFT) + (codePoint & UTRIE2_DATA_MASK);
- return this.data[ix];
- }
- if (codePoint < this.highStart) {
- // Supplemental code point, use two-level lookup.
- ix = UTRIE2_INDEX_1_OFFSET - UTRIE2_OMITTED_BMP_INDEX_1_LENGTH + (codePoint >> UTRIE2_SHIFT_1);
- ix = this.index[ix];
- ix += (codePoint >> UTRIE2_SHIFT_2) & UTRIE2_INDEX_2_MASK;
- ix = this.index[ix];
- ix = (ix << UTRIE2_INDEX_SHIFT) + (codePoint & UTRIE2_DATA_MASK);
- return this.data[ix];
- }
- if (codePoint <= 0x10ffff) {
- return this.data[this.highValueIndex];
- }
- }
- // Fall through. The code point is outside of the legal range of 0..0x10ffff.
- return this.errorValue;
- };
- return Trie;
- }());
-
- /*
- * base64-arraybuffer 1.0.2
- * Copyright (c) 2022 Niklas von Hertzen
- * Released under MIT License
- */
- var chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
- // Use a lookup table to find the index.
- var lookup = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256);
- for (var i = 0; i < chars.length; i++) {
- lookup[chars.charCodeAt(i)] = i;
- }
-
- var Prepend = 1;
- var CR = 2;
- var LF = 3;
- var Control = 4;
- var Extend = 5;
- var SpacingMark = 7;
- var L = 8;
- var V = 9;
- var T = 10;
- var LV = 11;
- var LVT = 12;
- var ZWJ = 13;
- var Extended_Pictographic = 14;
- var RI = 15;
- var toCodePoints = function (str) {
- var codePoints = [];
- var i = 0;
- var length = str.length;
- while (i < length) {
- var value = str.charCodeAt(i++);
- if (value >= 0xd800 && value <= 0xdbff && i < length) {
- var extra = str.charCodeAt(i++);
- if ((extra & 0xfc00) === 0xdc00) {
- codePoints.push(((value & 0x3ff) << 10) + (extra & 0x3ff) + 0x10000);
- }
- else {
- codePoints.push(value);
- i--;
- }
- }
- else {
- codePoints.push(value);
- }
- }
- return codePoints;
- };
- var fromCodePoint = function () {
- var codePoints = [];
- for (var _i = 0; _i < arguments.length; _i++) {
- codePoints[_i] = arguments[_i];
- }
- if (String.fromCodePoint) {
- return String.fromCodePoint.apply(String, codePoints);
- }
- var length = codePoints.length;
- if (!length) {
- return '';
- }
- var codeUnits = [];
- var index = -1;
- var result = '';
- while (++index < length) {
- var codePoint = codePoints[index];
- if (codePoint <= 0xffff) {
- codeUnits.push(codePoint);
- }
- else {
- codePoint -= 0x10000;
- codeUnits.push((codePoint >> 10) + 0xd800, (codePoint % 0x400) + 0xdc00);
- }
- if (index + 1 === length || codeUnits.length > 0x4000) {
- result += String.fromCharCode.apply(String, codeUnits);
- codeUnits.length = 0;
- }
- }
- return result;
- };
- var UnicodeTrie = createTrieFromBase64(base64);
- var BREAK_NOT_ALLOWED = '×';
- var BREAK_ALLOWED = '÷';
- var codePointToClass = function (codePoint) { return UnicodeTrie.get(codePoint); };
- var _graphemeBreakAtIndex = function (_codePoints, classTypes, index) {
- var prevIndex = index - 2;
- var prev = classTypes[prevIndex];
- var current = classTypes[index - 1];
- var next = classTypes[index];
- // GB3 Do not break between a CR and LF
- if (current === CR && next === LF) {
- return BREAK_NOT_ALLOWED;
- }
- // GB4 Otherwise, break before and after controls.
- if (current === CR || current === LF || current === Control) {
- return BREAK_ALLOWED;
- }
- // GB5
- if (next === CR || next === LF || next === Control) {
- return BREAK_ALLOWED;
- }
- // Do not break Hangul syllable sequences.
- // GB6
- if (current === L && [L, V, LV, LVT].indexOf(next) !== -1) {
- return BREAK_NOT_ALLOWED;
- }
- // GB7
- if ((current === LV || current === V) && (next === V || next === T)) {
- return BREAK_NOT_ALLOWED;
- }
- // GB8
- if ((current === LVT || current === T) && next === T) {
- return BREAK_NOT_ALLOWED;
- }
- // GB9 Do not break before extending characters or ZWJ.
- if (next === ZWJ || next === Extend) {
- return BREAK_NOT_ALLOWED;
- }
- // Do not break before SpacingMarks, or after Prepend characters.
- // GB9a
- if (next === SpacingMark) {
- return BREAK_NOT_ALLOWED;
- }
- // GB9a
- if (current === Prepend) {
- return BREAK_NOT_ALLOWED;
- }
- // GB11 Do not break within emoji modifier sequences or emoji zwj sequences.
- if (current === ZWJ && next === Extended_Pictographic) {
- while (prev === Extend) {
- prev = classTypes[--prevIndex];
- }
- if (prev === Extended_Pictographic) {
- return BREAK_NOT_ALLOWED;
- }
- }
- // GB12 Do not break within emoji flag sequences.
- // That is, do not break between regional indicator (RI) symbols
- // if there is an odd number of RI characters before the break point.
- if (current === RI && next === RI) {
- var countRI = 0;
- while (prev === RI) {
- countRI++;
- prev = classTypes[--prevIndex];
- }
- if (countRI % 2 === 0) {
- return BREAK_NOT_ALLOWED;
- }
- }
- return BREAK_ALLOWED;
- };
- var GraphemeBreaker = function (str) {
- var codePoints = toCodePoints(str);
- var length = codePoints.length;
- var index = 0;
- var lastEnd = 0;
- var classTypes = codePoints.map(codePointToClass);
- return {
- next: function () {
- if (index >= length) {
- return { done: true, value: null };
- }
- var graphemeBreak = BREAK_NOT_ALLOWED;
- while (index < length &&
- (graphemeBreak = _graphemeBreakAtIndex(codePoints, classTypes, ++index)) === BREAK_NOT_ALLOWED) { }
- if (graphemeBreak !== BREAK_NOT_ALLOWED || index === length) {
- var value = fromCodePoint.apply(null, codePoints.slice(lastEnd, index));
- lastEnd = index;
- return { value: value, done: false };
- }
- return { done: true, value: null };
- },
- };
- };
- var splitGraphemes = function (str) {
- var breaker = GraphemeBreaker(str);
- var graphemes = [];
- var bk;
- while (!(bk = breaker.next()).done) {
- if (bk.value) {
- graphemes.push(bk.value.slice());
- }
- }
- return graphemes;
- };
-
- var testRangeBounds = function (document) {
- var TEST_HEIGHT = 123;
- if (document.createRange) {
- var range = document.createRange();
- if (range.getBoundingClientRect) {
- var testElement = document.createElement('boundtest');
- testElement.style.height = TEST_HEIGHT + "px";
- testElement.style.display = 'block';
- document.body.appendChild(testElement);
- range.selectNode(testElement);
- var rangeBounds = range.getBoundingClientRect();
- var rangeHeight = Math.round(rangeBounds.height);
- document.body.removeChild(testElement);
- if (rangeHeight === TEST_HEIGHT) {
- return true;
- }
- }
- }
- return false;
- };
- var testIOSLineBreak = function (document) {
- var testElement = document.createElement('boundtest');
- testElement.style.width = '50px';
- testElement.style.display = 'block';
- testElement.style.fontSize = '12px';
- testElement.style.letterSpacing = '0px';
- testElement.style.wordSpacing = '0px';
- document.body.appendChild(testElement);
- var range = document.createRange();
- testElement.innerHTML = typeof ''.repeat === 'function' ? '👨'.repeat(10) : '';
- var node = testElement.firstChild;
- var textList = toCodePoints$1(node.data).map(function (i) { return fromCodePoint$1(i); });
- var offset = 0;
- var prev = {};
- // ios 13 does not handle range getBoundingClientRect line changes correctly #2177
- var supports = textList.every(function (text, i) {
- range.setStart(node, offset);
- range.setEnd(node, offset + text.length);
- var rect = range.getBoundingClientRect();
- offset += text.length;
- var boundAhead = rect.x > prev.x || rect.y > prev.y;
- prev = rect;
- if (i === 0) {
- return true;
- }
- return boundAhead;
- });
- document.body.removeChild(testElement);
- return supports;
- };
- var testCORS = function () { return typeof new Image().crossOrigin !== 'undefined'; };
- var testResponseType = function () { return typeof new XMLHttpRequest().responseType === 'string'; };
- var testSVG = function (document) {
- var img = new Image();
- var canvas = document.createElement('canvas');
- var ctx = canvas.getContext('2d');
- if (!ctx) {
- return false;
- }
- img.src = "data:image/svg+xml,";
- try {
- ctx.drawImage(img, 0, 0);
- canvas.toDataURL();
- }
- catch (e) {
- return false;
- }
- return true;
- };
- var isGreenPixel = function (data) {
- return data[0] === 0 && data[1] === 255 && data[2] === 0 && data[3] === 255;
- };
- var testForeignObject = function (document) {
- var canvas = document.createElement('canvas');
- var size = 100;
- canvas.width = size;
- canvas.height = size;
- var ctx = canvas.getContext('2d');
- if (!ctx) {
- return Promise.reject(false);
- }
- ctx.fillStyle = 'rgb(0, 255, 0)';
- ctx.fillRect(0, 0, size, size);
- var img = new Image();
- var greenImageSrc = canvas.toDataURL();
- img.src = greenImageSrc;
- var svg = createForeignObjectSVG(size, size, 0, 0, img);
- ctx.fillStyle = 'red';
- ctx.fillRect(0, 0, size, size);
- return loadSerializedSVG$1(svg)
- .then(function (img) {
- ctx.drawImage(img, 0, 0);
- var data = ctx.getImageData(0, 0, size, size).data;
- ctx.fillStyle = 'red';
- ctx.fillRect(0, 0, size, size);
- var node = document.createElement('div');
- node.style.backgroundImage = "url(" + greenImageSrc + ")";
- node.style.height = size + "px";
- // Firefox 55 does not render inline tags
- return isGreenPixel(data)
- ? loadSerializedSVG$1(createForeignObjectSVG(size, size, 0, 0, node))
- : Promise.reject(false);
- })
- .then(function (img) {
- ctx.drawImage(img, 0, 0);
- // Edge does not render background-images
- return isGreenPixel(ctx.getImageData(0, 0, size, size).data);
- })
- .catch(function () { return false; });
- };
- var createForeignObjectSVG = function (width, height, x, y, node) {
- var xmlns = 'http://www.w3.org/2000/svg';
- var svg = document.createElementNS(xmlns, 'svg');
- var foreignObject = document.createElementNS(xmlns, 'foreignObject');
- svg.setAttributeNS(null, 'width', width.toString());
- svg.setAttributeNS(null, 'height', height.toString());
- foreignObject.setAttributeNS(null, 'width', '100%');
- foreignObject.setAttributeNS(null, 'height', '100%');
- foreignObject.setAttributeNS(null, 'x', x.toString());
- foreignObject.setAttributeNS(null, 'y', y.toString());
- foreignObject.setAttributeNS(null, 'externalResourcesRequired', 'true');
- svg.appendChild(foreignObject);
- foreignObject.appendChild(node);
- return svg;
- };
- var loadSerializedSVG$1 = function (svg) {
- return new Promise(function (resolve, reject) {
- var img = new Image();
- img.onload = function () { return resolve(img); };
- img.onerror = reject;
- img.src = "data:image/svg+xml;charset=utf-8," + encodeURIComponent(new XMLSerializer().serializeToString(svg));
- });
- };
- var FEATURES = {
- get SUPPORT_RANGE_BOUNDS() {
- var value = testRangeBounds(document);
- Object.defineProperty(FEATURES, 'SUPPORT_RANGE_BOUNDS', { value: value });
- return value;
- },
- get SUPPORT_WORD_BREAKING() {
- var value = FEATURES.SUPPORT_RANGE_BOUNDS && testIOSLineBreak(document);
- Object.defineProperty(FEATURES, 'SUPPORT_WORD_BREAKING', { value: value });
- return value;
- },
- get SUPPORT_SVG_DRAWING() {
- var value = testSVG(document);
- Object.defineProperty(FEATURES, 'SUPPORT_SVG_DRAWING', { value: value });
- return value;
- },
- get SUPPORT_FOREIGNOBJECT_DRAWING() {
- var value = typeof Array.from === 'function' && typeof window.fetch === 'function'
- ? testForeignObject(document)
- : Promise.resolve(false);
- Object.defineProperty(FEATURES, 'SUPPORT_FOREIGNOBJECT_DRAWING', { value: value });
- return value;
- },
- get SUPPORT_CORS_IMAGES() {
- var value = testCORS();
- Object.defineProperty(FEATURES, 'SUPPORT_CORS_IMAGES', { value: value });
- return value;
- },
- get SUPPORT_RESPONSE_TYPE() {
- var value = testResponseType();
- Object.defineProperty(FEATURES, 'SUPPORT_RESPONSE_TYPE', { value: value });
- return value;
- },
- get SUPPORT_CORS_XHR() {
- var value = 'withCredentials' in new XMLHttpRequest();
- Object.defineProperty(FEATURES, 'SUPPORT_CORS_XHR', { value: value });
- return value;
- },
- get SUPPORT_NATIVE_TEXT_SEGMENTATION() {
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- var value = !!(typeof Intl !== 'undefined' && Intl.Segmenter);
- Object.defineProperty(FEATURES, 'SUPPORT_NATIVE_TEXT_SEGMENTATION', { value: value });
- return value;
- }
- };
-
- var TextBounds = /** @class */ (function () {
- function TextBounds(text, bounds) {
- this.text = text;
- this.bounds = bounds;
- }
- return TextBounds;
- }());
- var parseTextBounds = function (context, value, styles, node) {
- var textList = breakText(value, styles);
- var textBounds = [];
- var offset = 0;
- textList.forEach(function (text) {
- if (styles.textDecorationLine.length || text.trim().length > 0) {
- if (FEATURES.SUPPORT_RANGE_BOUNDS) {
- var clientRects = createRange(node, offset, text.length).getClientRects();
- if (clientRects.length > 1) {
- var subSegments = segmentGraphemes(text);
- var subOffset_1 = 0;
- subSegments.forEach(function (subSegment) {
- textBounds.push(new TextBounds(subSegment, Bounds.fromDOMRectList(context, createRange(node, subOffset_1 + offset, subSegment.length).getClientRects())));
- subOffset_1 += subSegment.length;
- });
- }
- else {
- textBounds.push(new TextBounds(text, Bounds.fromDOMRectList(context, clientRects)));
- }
- }
- else {
- var replacementNode = node.splitText(text.length);
- textBounds.push(new TextBounds(text, getWrapperBounds(context, node)));
- node = replacementNode;
- }
- }
- else if (!FEATURES.SUPPORT_RANGE_BOUNDS) {
- node = node.splitText(text.length);
- }
- offset += text.length;
- });
- return textBounds;
- };
- var getWrapperBounds = function (context, node) {
- var ownerDocument = node.ownerDocument;
- if (ownerDocument) {
- var wrapper = ownerDocument.createElement('html2canvaswrapper');
- wrapper.appendChild(node.cloneNode(true));
- var parentNode = node.parentNode;
- if (parentNode) {
- parentNode.replaceChild(wrapper, node);
- var bounds = parseBounds(context, wrapper);
- if (wrapper.firstChild) {
- parentNode.replaceChild(wrapper.firstChild, wrapper);
- }
- return bounds;
- }
- }
- return Bounds.EMPTY;
- };
- var createRange = function (node, offset, length) {
- var ownerDocument = node.ownerDocument;
- if (!ownerDocument) {
- throw new Error('Node has no owner document');
- }
- var range = ownerDocument.createRange();
- range.setStart(node, offset);
- range.setEnd(node, offset + length);
- return range;
- };
- var segmentGraphemes = function (value) {
- if (FEATURES.SUPPORT_NATIVE_TEXT_SEGMENTATION) {
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- var segmenter = new Intl.Segmenter(void 0, { granularity: 'grapheme' });
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- return Array.from(segmenter.segment(value)).map(function (segment) { return segment.segment; });
- }
- return splitGraphemes(value);
- };
- var segmentWords = function (value, styles) {
- if (FEATURES.SUPPORT_NATIVE_TEXT_SEGMENTATION) {
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- var segmenter = new Intl.Segmenter(void 0, {
- granularity: 'word'
- });
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- return Array.from(segmenter.segment(value)).map(function (segment) { return segment.segment; });
- }
- return breakWords(value, styles);
- };
- var breakText = function (value, styles) {
- return styles.letterSpacing !== 0 ? segmentGraphemes(value) : segmentWords(value, styles);
- };
- // https://drafts.csswg.org/css-text/#word-separator
- var wordSeparators = [0x0020, 0x00a0, 0x1361, 0x10100, 0x10101, 0x1039, 0x1091];
- var breakWords = function (str, styles) {
- var breaker = LineBreaker(str, {
- lineBreak: styles.lineBreak,
- wordBreak: styles.overflowWrap === "break-word" /* BREAK_WORD */ ? 'break-word' : styles.wordBreak
- });
- var words = [];
- var bk;
- var _loop_1 = function () {
- if (bk.value) {
- var value = bk.value.slice();
- var codePoints = toCodePoints$1(value);
- var word_1 = '';
- codePoints.forEach(function (codePoint) {
- if (wordSeparators.indexOf(codePoint) === -1) {
- word_1 += fromCodePoint$1(codePoint);
- }
- else {
- if (word_1.length) {
- words.push(word_1);
- }
- words.push(fromCodePoint$1(codePoint));
- word_1 = '';
- }
- });
- if (word_1.length) {
- words.push(word_1);
- }
- }
- };
- while (!(bk = breaker.next()).done) {
- _loop_1();
- }
- return words;
- };
-
- var TextContainer = /** @class */ (function () {
- function TextContainer(context, node, styles) {
- this.text = transform(node.data, styles.textTransform);
- this.textBounds = parseTextBounds(context, this.text, styles, node);
- }
- return TextContainer;
- }());
- var transform = function (text, transform) {
- switch (transform) {
- case 1 /* LOWERCASE */:
- return text.toLowerCase();
- case 3 /* CAPITALIZE */:
- return text.replace(CAPITALIZE, capitalize);
- case 2 /* UPPERCASE */:
- return text.toUpperCase();
- default:
- return text;
- }
- };
- var CAPITALIZE = /(^|\s|:|-|\(|\))([a-z])/g;
- var capitalize = function (m, p1, p2) {
- if (m.length > 0) {
- return p1 + p2.toUpperCase();
- }
- return m;
- };
-
- var ImageElementContainer = /** @class */ (function (_super) {
- __extends(ImageElementContainer, _super);
- function ImageElementContainer(context, img) {
- var _this = _super.call(this, context, img) || this;
- _this.src = img.currentSrc || img.src;
- _this.intrinsicWidth = img.naturalWidth;
- _this.intrinsicHeight = img.naturalHeight;
- _this.context.cache.addImage(_this.src);
- return _this;
- }
- return ImageElementContainer;
- }(ElementContainer));
-
- var CanvasElementContainer = /** @class */ (function (_super) {
- __extends(CanvasElementContainer, _super);
- function CanvasElementContainer(context, canvas) {
- var _this = _super.call(this, context, canvas) || this;
- _this.canvas = canvas;
- _this.intrinsicWidth = canvas.width;
- _this.intrinsicHeight = canvas.height;
- return _this;
- }
- return CanvasElementContainer;
- }(ElementContainer));
-
- var SVGElementContainer = /** @class */ (function (_super) {
- __extends(SVGElementContainer, _super);
- function SVGElementContainer(context, img) {
- var _this = _super.call(this, context, img) || this;
- var s = new XMLSerializer();
- var bounds = parseBounds(context, img);
- img.setAttribute('width', bounds.width + "px");
- img.setAttribute('height', bounds.height + "px");
- _this.svg = "data:image/svg+xml," + encodeURIComponent(s.serializeToString(img));
- _this.intrinsicWidth = img.width.baseVal.value;
- _this.intrinsicHeight = img.height.baseVal.value;
- _this.context.cache.addImage(_this.svg);
- return _this;
- }
- return SVGElementContainer;
- }(ElementContainer));
-
- var LIElementContainer = /** @class */ (function (_super) {
- __extends(LIElementContainer, _super);
- function LIElementContainer(context, element) {
- var _this = _super.call(this, context, element) || this;
- _this.value = element.value;
- return _this;
- }
- return LIElementContainer;
- }(ElementContainer));
-
- var OLElementContainer = /** @class */ (function (_super) {
- __extends(OLElementContainer, _super);
- function OLElementContainer(context, element) {
- var _this = _super.call(this, context, element) || this;
- _this.start = element.start;
- _this.reversed = typeof element.reversed === 'boolean' && element.reversed === true;
- return _this;
- }
- return OLElementContainer;
- }(ElementContainer));
-
- var CHECKBOX_BORDER_RADIUS = [
- {
- type: 15 /* DIMENSION_TOKEN */,
- flags: 0,
- unit: 'px',
- number: 3
- }
- ];
- var RADIO_BORDER_RADIUS = [
- {
- type: 16 /* PERCENTAGE_TOKEN */,
- flags: 0,
- number: 50
- }
- ];
- var reformatInputBounds = function (bounds) {
- if (bounds.width > bounds.height) {
- return new Bounds(bounds.left + (bounds.width - bounds.height) / 2, bounds.top, bounds.height, bounds.height);
- }
- else if (bounds.width < bounds.height) {
- return new Bounds(bounds.left, bounds.top + (bounds.height - bounds.width) / 2, bounds.width, bounds.width);
- }
- return bounds;
- };
- var getInputValue = function (node) {
- var value = node.type === PASSWORD ? new Array(node.value.length + 1).join('\u2022') : node.value;
- return value.length === 0 ? node.placeholder || '' : value;
- };
- var CHECKBOX = 'checkbox';
- var RADIO = 'radio';
- var PASSWORD = 'password';
- var INPUT_COLOR = 0x2a2a2aff;
- var InputElementContainer = /** @class */ (function (_super) {
- __extends(InputElementContainer, _super);
- function InputElementContainer(context, input) {
- var _this = _super.call(this, context, input) || this;
- _this.type = input.type.toLowerCase();
- _this.checked = input.checked;
- _this.value = getInputValue(input);
- if (_this.type === CHECKBOX || _this.type === RADIO) {
- _this.styles.backgroundColor = 0xdededeff;
- _this.styles.borderTopColor =
- _this.styles.borderRightColor =
- _this.styles.borderBottomColor =
- _this.styles.borderLeftColor =
- 0xa5a5a5ff;
- _this.styles.borderTopWidth =
- _this.styles.borderRightWidth =
- _this.styles.borderBottomWidth =
- _this.styles.borderLeftWidth =
- 1;
- _this.styles.borderTopStyle =
- _this.styles.borderRightStyle =
- _this.styles.borderBottomStyle =
- _this.styles.borderLeftStyle =
- 1 /* SOLID */;
- _this.styles.backgroundClip = [0 /* BORDER_BOX */];
- _this.styles.backgroundOrigin = [0 /* BORDER_BOX */];
- _this.bounds = reformatInputBounds(_this.bounds);
- }
- switch (_this.type) {
- case CHECKBOX:
- _this.styles.borderTopRightRadius =
- _this.styles.borderTopLeftRadius =
- _this.styles.borderBottomRightRadius =
- _this.styles.borderBottomLeftRadius =
- CHECKBOX_BORDER_RADIUS;
- break;
- case RADIO:
- _this.styles.borderTopRightRadius =
- _this.styles.borderTopLeftRadius =
- _this.styles.borderBottomRightRadius =
- _this.styles.borderBottomLeftRadius =
- RADIO_BORDER_RADIUS;
- break;
- }
- return _this;
- }
- return InputElementContainer;
- }(ElementContainer));
-
- var SelectElementContainer = /** @class */ (function (_super) {
- __extends(SelectElementContainer, _super);
- function SelectElementContainer(context, element) {
- var _this = _super.call(this, context, element) || this;
- var option = element.options[element.selectedIndex || 0];
- _this.value = option ? option.text || '' : '';
- return _this;
- }
- return SelectElementContainer;
- }(ElementContainer));
-
- var TextareaElementContainer = /** @class */ (function (_super) {
- __extends(TextareaElementContainer, _super);
- function TextareaElementContainer(context, element) {
- var _this = _super.call(this, context, element) || this;
- _this.value = element.value;
- return _this;
- }
- return TextareaElementContainer;
- }(ElementContainer));
-
- var IFrameElementContainer = /** @class */ (function (_super) {
- __extends(IFrameElementContainer, _super);
- function IFrameElementContainer(context, iframe) {
- var _this = _super.call(this, context, iframe) || this;
- _this.src = iframe.src;
- _this.width = parseInt(iframe.width, 10) || 0;
- _this.height = parseInt(iframe.height, 10) || 0;
- _this.backgroundColor = _this.styles.backgroundColor;
- try {
- if (iframe.contentWindow &&
- iframe.contentWindow.document &&
- iframe.contentWindow.document.documentElement) {
- _this.tree = parseTree(context, iframe.contentWindow.document.documentElement);
- // http://www.w3.org/TR/css3-background/#special-backgrounds
- var documentBackgroundColor = iframe.contentWindow.document.documentElement
- ? parseColor(context, getComputedStyle(iframe.contentWindow.document.documentElement).backgroundColor)
- : COLORS.TRANSPARENT;
- var bodyBackgroundColor = iframe.contentWindow.document.body
- ? parseColor(context, getComputedStyle(iframe.contentWindow.document.body).backgroundColor)
- : COLORS.TRANSPARENT;
- _this.backgroundColor = isTransparent(documentBackgroundColor)
- ? isTransparent(bodyBackgroundColor)
- ? _this.styles.backgroundColor
- : bodyBackgroundColor
- : documentBackgroundColor;
- }
- }
- catch (e) { }
- return _this;
- }
- return IFrameElementContainer;
- }(ElementContainer));
-
- var LIST_OWNERS = ['OL', 'UL', 'MENU'];
- var parseNodeTree = function (context, node, parent, root) {
- for (var childNode = node.firstChild, nextNode = void 0; childNode; childNode = nextNode) {
- nextNode = childNode.nextSibling;
- if (isTextNode(childNode) && childNode.data.trim().length > 0) {
- parent.textNodes.push(new TextContainer(context, childNode, parent.styles));
- }
- else if (isElementNode(childNode)) {
- if (isSlotElement(childNode) && childNode.assignedNodes) {
- childNode.assignedNodes().forEach(function (childNode) { return parseNodeTree(context, childNode, parent, root); });
- }
- else {
- var container = createContainer(context, childNode);
- if (container.styles.isVisible()) {
- if (createsRealStackingContext(childNode, container, root)) {
- container.flags |= 4 /* CREATES_REAL_STACKING_CONTEXT */;
- }
- else if (createsStackingContext(container.styles)) {
- container.flags |= 2 /* CREATES_STACKING_CONTEXT */;
- }
- if (LIST_OWNERS.indexOf(childNode.tagName) !== -1) {
- container.flags |= 8 /* IS_LIST_OWNER */;
- }
- parent.elements.push(container);
- childNode.slot;
- if (childNode.shadowRoot) {
- parseNodeTree(context, childNode.shadowRoot, container, root);
- }
- else if (!isTextareaElement(childNode) &&
- !isSVGElement(childNode) &&
- !isSelectElement(childNode)) {
- parseNodeTree(context, childNode, container, root);
- }
- }
- }
- }
- }
- };
- var createContainer = function (context, element) {
- if (isImageElement(element)) {
- return new ImageElementContainer(context, element);
- }
- if (isCanvasElement(element)) {
- return new CanvasElementContainer(context, element);
- }
- if (isSVGElement(element)) {
- return new SVGElementContainer(context, element);
- }
- if (isLIElement(element)) {
- return new LIElementContainer(context, element);
- }
- if (isOLElement(element)) {
- return new OLElementContainer(context, element);
- }
- if (isInputElement(element)) {
- return new InputElementContainer(context, element);
- }
- if (isSelectElement(element)) {
- return new SelectElementContainer(context, element);
- }
- if (isTextareaElement(element)) {
- return new TextareaElementContainer(context, element);
- }
- if (isIFrameElement(element)) {
- return new IFrameElementContainer(context, element);
- }
- return new ElementContainer(context, element);
- };
- var parseTree = function (context, element) {
- var container = createContainer(context, element);
- container.flags |= 4 /* CREATES_REAL_STACKING_CONTEXT */;
- parseNodeTree(context, element, container, container);
- return container;
- };
- var createsRealStackingContext = function (node, container, root) {
- return (container.styles.isPositionedWithZIndex() ||
- container.styles.opacity < 1 ||
- container.styles.isTransformed() ||
- (isBodyElement(node) && root.styles.isTransparent()));
- };
- var createsStackingContext = function (styles) { return styles.isPositioned() || styles.isFloating(); };
- var isTextNode = function (node) { return node.nodeType === Node.TEXT_NODE; };
- var isElementNode = function (node) { return node.nodeType === Node.ELEMENT_NODE; };
- var isHTMLElementNode = function (node) {
- return isElementNode(node) && typeof node.style !== 'undefined' && !isSVGElementNode(node);
- };
- var isSVGElementNode = function (element) {
- return typeof element.className === 'object';
- };
- var isLIElement = function (node) { return node.tagName === 'LI'; };
- var isOLElement = function (node) { return node.tagName === 'OL'; };
- var isInputElement = function (node) { return node.tagName === 'INPUT'; };
- var isHTMLElement = function (node) { return node.tagName === 'HTML'; };
- var isSVGElement = function (node) { return node.tagName === 'svg'; };
- var isBodyElement = function (node) { return node.tagName === 'BODY'; };
- var isCanvasElement = function (node) { return node.tagName === 'CANVAS'; };
- var isVideoElement = function (node) { return node.tagName === 'VIDEO'; };
- var isImageElement = function (node) { return node.tagName === 'IMG'; };
- var isIFrameElement = function (node) { return node.tagName === 'IFRAME'; };
- var isStyleElement = function (node) { return node.tagName === 'STYLE'; };
- var isScriptElement = function (node) { return node.tagName === 'SCRIPT'; };
- var isTextareaElement = function (node) { return node.tagName === 'TEXTAREA'; };
- var isSelectElement = function (node) { return node.tagName === 'SELECT'; };
- var isSlotElement = function (node) { return node.tagName === 'SLOT'; };
- // https://html.spec.whatwg.org/multipage/custom-elements.html#valid-custom-element-name
- var isCustomElement = function (node) { return node.tagName.indexOf('-') > 0; };
-
- var CounterState = /** @class */ (function () {
- function CounterState() {
- this.counters = {};
- }
- CounterState.prototype.getCounterValue = function (name) {
- var counter = this.counters[name];
- if (counter && counter.length) {
- return counter[counter.length - 1];
- }
- return 1;
- };
- CounterState.prototype.getCounterValues = function (name) {
- var counter = this.counters[name];
- return counter ? counter : [];
- };
- CounterState.prototype.pop = function (counters) {
- var _this = this;
- counters.forEach(function (counter) { return _this.counters[counter].pop(); });
- };
- CounterState.prototype.parse = function (style) {
- var _this = this;
- var counterIncrement = style.counterIncrement;
- var counterReset = style.counterReset;
- var canReset = true;
- if (counterIncrement !== null) {
- counterIncrement.forEach(function (entry) {
- var counter = _this.counters[entry.counter];
- if (counter && entry.increment !== 0) {
- canReset = false;
- if (!counter.length) {
- counter.push(1);
- }
- counter[Math.max(0, counter.length - 1)] += entry.increment;
- }
- });
- }
- var counterNames = [];
- if (canReset) {
- counterReset.forEach(function (entry) {
- var counter = _this.counters[entry.counter];
- counterNames.push(entry.counter);
- if (!counter) {
- counter = _this.counters[entry.counter] = [];
- }
- counter.push(entry.reset);
- });
- }
- return counterNames;
- };
- return CounterState;
- }());
- var ROMAN_UPPER = {
- integers: [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1],
- values: ['M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I']
- };
- var ARMENIAN = {
- integers: [
- 9000, 8000, 7000, 6000, 5000, 4000, 3000, 2000, 1000, 900, 800, 700, 600, 500, 400, 300, 200, 100, 90, 80, 70,
- 60, 50, 40, 30, 20, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
- ],
- values: [
- 'Ք',
- 'Փ',
- 'Ւ',
- 'Ց',
- 'Ր',
- 'Տ',
- 'Վ',
- 'Ս',
- 'Ռ',
- 'Ջ',
- 'Պ',
- 'Չ',
- 'Ո',
- 'Շ',
- 'Ն',
- 'Յ',
- 'Մ',
- 'Ճ',
- 'Ղ',
- 'Ձ',
- 'Հ',
- 'Կ',
- 'Ծ',
- 'Խ',
- 'Լ',
- 'Ի',
- 'Ժ',
- 'Թ',
- 'Ը',
- 'Է',
- 'Զ',
- 'Ե',
- 'Դ',
- 'Գ',
- 'Բ',
- 'Ա'
- ]
- };
- var HEBREW = {
- integers: [
- 10000, 9000, 8000, 7000, 6000, 5000, 4000, 3000, 2000, 1000, 400, 300, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20,
- 19, 18, 17, 16, 15, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
- ],
- values: [
- 'י׳',
- 'ט׳',
- 'ח׳',
- 'ז׳',
- 'ו׳',
- 'ה׳',
- 'ד׳',
- 'ג׳',
- 'ב׳',
- 'א׳',
- 'ת',
- 'ש',
- 'ר',
- 'ק',
- 'צ',
- 'פ',
- 'ע',
- 'ס',
- 'נ',
- 'מ',
- 'ל',
- 'כ',
- 'יט',
- 'יח',
- 'יז',
- 'טז',
- 'טו',
- 'י',
- 'ט',
- 'ח',
- 'ז',
- 'ו',
- 'ה',
- 'ד',
- 'ג',
- 'ב',
- 'א'
- ]
- };
- var GEORGIAN = {
- integers: [
- 10000, 9000, 8000, 7000, 6000, 5000, 4000, 3000, 2000, 1000, 900, 800, 700, 600, 500, 400, 300, 200, 100, 90,
- 80, 70, 60, 50, 40, 30, 20, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
- ],
- values: [
- 'ჵ',
- 'ჰ',
- 'ჯ',
- 'ჴ',
- 'ხ',
- 'ჭ',
- 'წ',
- 'ძ',
- 'ც',
- 'ჩ',
- 'შ',
- 'ყ',
- 'ღ',
- 'ქ',
- 'ფ',
- 'ჳ',
- 'ტ',
- 'ს',
- 'რ',
- 'ჟ',
- 'პ',
- 'ო',
- 'ჲ',
- 'ნ',
- 'მ',
- 'ლ',
- 'კ',
- 'ი',
- 'თ',
- 'ჱ',
- 'ზ',
- 'ვ',
- 'ე',
- 'დ',
- 'გ',
- 'ბ',
- 'ა'
- ]
- };
- var createAdditiveCounter = function (value, min, max, symbols, fallback, suffix) {
- if (value < min || value > max) {
- return createCounterText(value, fallback, suffix.length > 0);
- }
- return (symbols.integers.reduce(function (string, integer, index) {
- while (value >= integer) {
- value -= integer;
- string += symbols.values[index];
- }
- return string;
- }, '') + suffix);
- };
- var createCounterStyleWithSymbolResolver = function (value, codePointRangeLength, isNumeric, resolver) {
- var string = '';
- do {
- if (!isNumeric) {
- value--;
- }
- string = resolver(value) + string;
- value /= codePointRangeLength;
- } while (value * codePointRangeLength >= codePointRangeLength);
- return string;
- };
- var createCounterStyleFromRange = function (value, codePointRangeStart, codePointRangeEnd, isNumeric, suffix) {
- var codePointRangeLength = codePointRangeEnd - codePointRangeStart + 1;
- return ((value < 0 ? '-' : '') +
- (createCounterStyleWithSymbolResolver(Math.abs(value), codePointRangeLength, isNumeric, function (codePoint) {
- return fromCodePoint$1(Math.floor(codePoint % codePointRangeLength) + codePointRangeStart);
- }) +
- suffix));
- };
- var createCounterStyleFromSymbols = function (value, symbols, suffix) {
- if (suffix === void 0) { suffix = '. '; }
- var codePointRangeLength = symbols.length;
- return (createCounterStyleWithSymbolResolver(Math.abs(value), codePointRangeLength, false, function (codePoint) { return symbols[Math.floor(codePoint % codePointRangeLength)]; }) + suffix);
- };
- var CJK_ZEROS = 1 << 0;
- var CJK_TEN_COEFFICIENTS = 1 << 1;
- var CJK_TEN_HIGH_COEFFICIENTS = 1 << 2;
- var CJK_HUNDRED_COEFFICIENTS = 1 << 3;
- var createCJKCounter = function (value, numbers, multipliers, negativeSign, suffix, flags) {
- if (value < -9999 || value > 9999) {
- return createCounterText(value, 4 /* CJK_DECIMAL */, suffix.length > 0);
- }
- var tmp = Math.abs(value);
- var string = suffix;
- if (tmp === 0) {
- return numbers[0] + string;
- }
- for (var digit = 0; tmp > 0 && digit <= 4; digit++) {
- var coefficient = tmp % 10;
- if (coefficient === 0 && contains(flags, CJK_ZEROS) && string !== '') {
- string = numbers[coefficient] + string;
- }
- else if (coefficient > 1 ||
- (coefficient === 1 && digit === 0) ||
- (coefficient === 1 && digit === 1 && contains(flags, CJK_TEN_COEFFICIENTS)) ||
- (coefficient === 1 && digit === 1 && contains(flags, CJK_TEN_HIGH_COEFFICIENTS) && value > 100) ||
- (coefficient === 1 && digit > 1 && contains(flags, CJK_HUNDRED_COEFFICIENTS))) {
- string = numbers[coefficient] + (digit > 0 ? multipliers[digit - 1] : '') + string;
- }
- else if (coefficient === 1 && digit > 0) {
- string = multipliers[digit - 1] + string;
- }
- tmp = Math.floor(tmp / 10);
- }
- return (value < 0 ? negativeSign : '') + string;
- };
- var CHINESE_INFORMAL_MULTIPLIERS = '十百千萬';
- var CHINESE_FORMAL_MULTIPLIERS = '拾佰仟萬';
- var JAPANESE_NEGATIVE = 'マイナス';
- var KOREAN_NEGATIVE = '마이너스';
- var createCounterText = function (value, type, appendSuffix) {
- var defaultSuffix = appendSuffix ? '. ' : '';
- var cjkSuffix = appendSuffix ? '、' : '';
- var koreanSuffix = appendSuffix ? ', ' : '';
- var spaceSuffix = appendSuffix ? ' ' : '';
- switch (type) {
- case 0 /* DISC */:
- return '•' + spaceSuffix;
- case 1 /* CIRCLE */:
- return '◦' + spaceSuffix;
- case 2 /* SQUARE */:
- return '◾' + spaceSuffix;
- case 5 /* DECIMAL_LEADING_ZERO */:
- var string = createCounterStyleFromRange(value, 48, 57, true, defaultSuffix);
- return string.length < 4 ? "0" + string : string;
- case 4 /* CJK_DECIMAL */:
- return createCounterStyleFromSymbols(value, '〇一二三四五六七八九', cjkSuffix);
- case 6 /* LOWER_ROMAN */:
- return createAdditiveCounter(value, 1, 3999, ROMAN_UPPER, 3 /* DECIMAL */, defaultSuffix).toLowerCase();
- case 7 /* UPPER_ROMAN */:
- return createAdditiveCounter(value, 1, 3999, ROMAN_UPPER, 3 /* DECIMAL */, defaultSuffix);
- case 8 /* LOWER_GREEK */:
- return createCounterStyleFromRange(value, 945, 969, false, defaultSuffix);
- case 9 /* LOWER_ALPHA */:
- return createCounterStyleFromRange(value, 97, 122, false, defaultSuffix);
- case 10 /* UPPER_ALPHA */:
- return createCounterStyleFromRange(value, 65, 90, false, defaultSuffix);
- case 11 /* ARABIC_INDIC */:
- return createCounterStyleFromRange(value, 1632, 1641, true, defaultSuffix);
- case 12 /* ARMENIAN */:
- case 49 /* UPPER_ARMENIAN */:
- return createAdditiveCounter(value, 1, 9999, ARMENIAN, 3 /* DECIMAL */, defaultSuffix);
- case 35 /* LOWER_ARMENIAN */:
- return createAdditiveCounter(value, 1, 9999, ARMENIAN, 3 /* DECIMAL */, defaultSuffix).toLowerCase();
- case 13 /* BENGALI */:
- return createCounterStyleFromRange(value, 2534, 2543, true, defaultSuffix);
- case 14 /* CAMBODIAN */:
- case 30 /* KHMER */:
- return createCounterStyleFromRange(value, 6112, 6121, true, defaultSuffix);
- case 15 /* CJK_EARTHLY_BRANCH */:
- return createCounterStyleFromSymbols(value, '子丑寅卯辰巳午未申酉戌亥', cjkSuffix);
- case 16 /* CJK_HEAVENLY_STEM */:
- return createCounterStyleFromSymbols(value, '甲乙丙丁戊己庚辛壬癸', cjkSuffix);
- case 17 /* CJK_IDEOGRAPHIC */:
- case 48 /* TRAD_CHINESE_INFORMAL */:
- return createCJKCounter(value, '零一二三四五六七八九', CHINESE_INFORMAL_MULTIPLIERS, '負', cjkSuffix, CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS | CJK_HUNDRED_COEFFICIENTS);
- case 47 /* TRAD_CHINESE_FORMAL */:
- return createCJKCounter(value, '零壹貳參肆伍陸柒捌玖', CHINESE_FORMAL_MULTIPLIERS, '負', cjkSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS | CJK_HUNDRED_COEFFICIENTS);
- case 42 /* SIMP_CHINESE_INFORMAL */:
- return createCJKCounter(value, '零一二三四五六七八九', CHINESE_INFORMAL_MULTIPLIERS, '负', cjkSuffix, CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS | CJK_HUNDRED_COEFFICIENTS);
- case 41 /* SIMP_CHINESE_FORMAL */:
- return createCJKCounter(value, '零壹贰叁肆伍陆柒捌玖', CHINESE_FORMAL_MULTIPLIERS, '负', cjkSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS | CJK_HUNDRED_COEFFICIENTS);
- case 26 /* JAPANESE_INFORMAL */:
- return createCJKCounter(value, '〇一二三四五六七八九', '十百千万', JAPANESE_NEGATIVE, cjkSuffix, 0);
- case 25 /* JAPANESE_FORMAL */:
- return createCJKCounter(value, '零壱弐参四伍六七八九', '拾百千万', JAPANESE_NEGATIVE, cjkSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS);
- case 31 /* KOREAN_HANGUL_FORMAL */:
- return createCJKCounter(value, '영일이삼사오육칠팔구', '십백천만', KOREAN_NEGATIVE, koreanSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS);
- case 33 /* KOREAN_HANJA_INFORMAL */:
- return createCJKCounter(value, '零一二三四五六七八九', '十百千萬', KOREAN_NEGATIVE, koreanSuffix, 0);
- case 32 /* KOREAN_HANJA_FORMAL */:
- return createCJKCounter(value, '零壹貳參四五六七八九', '拾百千', KOREAN_NEGATIVE, koreanSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS);
- case 18 /* DEVANAGARI */:
- return createCounterStyleFromRange(value, 0x966, 0x96f, true, defaultSuffix);
- case 20 /* GEORGIAN */:
- return createAdditiveCounter(value, 1, 19999, GEORGIAN, 3 /* DECIMAL */, defaultSuffix);
- case 21 /* GUJARATI */:
- return createCounterStyleFromRange(value, 0xae6, 0xaef, true, defaultSuffix);
- case 22 /* GURMUKHI */:
- return createCounterStyleFromRange(value, 0xa66, 0xa6f, true, defaultSuffix);
- case 22 /* HEBREW */:
- return createAdditiveCounter(value, 1, 10999, HEBREW, 3 /* DECIMAL */, defaultSuffix);
- case 23 /* HIRAGANA */:
- return createCounterStyleFromSymbols(value, 'あいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわゐゑをん');
- case 24 /* HIRAGANA_IROHA */:
- return createCounterStyleFromSymbols(value, 'いろはにほへとちりぬるをわかよたれそつねならむうゐのおくやまけふこえてあさきゆめみしゑひもせす');
- case 27 /* KANNADA */:
- return createCounterStyleFromRange(value, 0xce6, 0xcef, true, defaultSuffix);
- case 28 /* KATAKANA */:
- return createCounterStyleFromSymbols(value, 'アイウエオカキクケコサシスセソタチツテトナニヌネノハヒフヘホマミムメモヤユヨラリルレロワヰヱヲン', cjkSuffix);
- case 29 /* KATAKANA_IROHA */:
- return createCounterStyleFromSymbols(value, 'イロハニホヘトチリヌルヲワカヨタレソツネナラムウヰノオクヤマケフコエテアサキユメミシヱヒモセス', cjkSuffix);
- case 34 /* LAO */:
- return createCounterStyleFromRange(value, 0xed0, 0xed9, true, defaultSuffix);
- case 37 /* MONGOLIAN */:
- return createCounterStyleFromRange(value, 0x1810, 0x1819, true, defaultSuffix);
- case 38 /* MYANMAR */:
- return createCounterStyleFromRange(value, 0x1040, 0x1049, true, defaultSuffix);
- case 39 /* ORIYA */:
- return createCounterStyleFromRange(value, 0xb66, 0xb6f, true, defaultSuffix);
- case 40 /* PERSIAN */:
- return createCounterStyleFromRange(value, 0x6f0, 0x6f9, true, defaultSuffix);
- case 43 /* TAMIL */:
- return createCounterStyleFromRange(value, 0xbe6, 0xbef, true, defaultSuffix);
- case 44 /* TELUGU */:
- return createCounterStyleFromRange(value, 0xc66, 0xc6f, true, defaultSuffix);
- case 45 /* THAI */:
- return createCounterStyleFromRange(value, 0xe50, 0xe59, true, defaultSuffix);
- case 46 /* TIBETAN */:
- return createCounterStyleFromRange(value, 0xf20, 0xf29, true, defaultSuffix);
- case 3 /* DECIMAL */:
- default:
- return createCounterStyleFromRange(value, 48, 57, true, defaultSuffix);
- }
- };
-
- var IGNORE_ATTRIBUTE = 'data-html2canvas-ignore';
- var DocumentCloner = /** @class */ (function () {
- function DocumentCloner(context, element, options) {
- this.context = context;
- this.options = options;
- this.scrolledElements = [];
- this.referenceElement = element;
- this.counters = new CounterState();
- this.quoteDepth = 0;
- if (!element.ownerDocument) {
- throw new Error('Cloned element does not have an owner document');
- }
- this.documentElement = this.cloneNode(element.ownerDocument.documentElement, false);
- }
- DocumentCloner.prototype.toIFrame = function (ownerDocument, windowSize) {
- var _this = this;
- var iframe = createIFrameContainer(ownerDocument, windowSize);
- if (!iframe.contentWindow) {
- return Promise.reject("Unable to find iframe window");
- }
- var scrollX = ownerDocument.defaultView.pageXOffset;
- var scrollY = ownerDocument.defaultView.pageYOffset;
- var cloneWindow = iframe.contentWindow;
- var documentClone = cloneWindow.document;
- /* Chrome doesn't detect relative background-images assigned in inline
- '''
\ No newline at end of file
diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/demo_toolbox.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/demo_toolbox.py
deleted file mode 100644
index 7030bd5a1d57647061064aa91c734e2f496e9b83..0000000000000000000000000000000000000000
--- a/spaces/kira4424/Tacotron-zero-short-voice-clone/demo_toolbox.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from pathlib import Path
-from toolbox import Toolbox
-from utils.argutils import print_args
-from utils.modelutils import check_model_paths
-import argparse
-import os
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(
- description="Runs the toolbox",
- formatter_class=argparse.ArgumentDefaultsHelpFormatter
- )
-
- parser.add_argument("-d", "--datasets_root", type=Path, help= \
- "Path to the directory containing your datasets. See toolbox/__init__.py for a list of "
- "supported datasets.", default=None)
- parser.add_argument("-vc", "--vc_mode", action="store_true",
- help="Voice Conversion Mode(PPG based)")
- parser.add_argument("-e", "--enc_models_dir", type=Path, default="encoder/saved_models",
- help="Directory containing saved encoder models")
- parser.add_argument("-s", "--syn_models_dir", type=Path, default="synthesizer/saved_models",
- help="Directory containing saved synthesizer models")
- parser.add_argument("-v", "--voc_models_dir", type=Path, default="vocoder/saved_models",
- help="Directory containing saved vocoder models")
- parser.add_argument("-ex", "--extractor_models_dir", type=Path, default="ppg_extractor/saved_models",
- help="Directory containing saved extrator models")
- parser.add_argument("-cv", "--convertor_models_dir", type=Path, default="ppg2mel/saved_models",
- help="Directory containing saved convert models")
- parser.add_argument("--cpu", action="store_true", help=\
- "If True, processing is done on CPU, even when a GPU is available.")
- parser.add_argument("--seed", type=int, default=None, help=\
- "Optional random number seed value to make toolbox deterministic.")
- parser.add_argument("--no_mp3_support", action="store_true", help=\
- "If True, no mp3 files are allowed.")
- args = parser.parse_args()
- print_args(args, parser)
-
- if args.cpu:
- # Hide GPUs from Pytorch to force CPU processing
- os.environ["CUDA_VISIBLE_DEVICES"] = ""
- del args.cpu
-
- ## Remind the user to download pretrained models if needed
- check_model_paths(encoder_path=args.enc_models_dir, synthesizer_path=args.syn_models_dir,
- vocoder_path=args.voc_models_dir)
-
- # Launch the toolbox
- Toolbox(**vars(args))
diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/encoder/params_model.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/encoder/params_model.py
deleted file mode 100644
index 3e356472fb5a27f370cb3920976a11d12a76c1b7..0000000000000000000000000000000000000000
--- a/spaces/kira4424/Tacotron-zero-short-voice-clone/encoder/params_model.py
+++ /dev/null
@@ -1,11 +0,0 @@
-
-## Model parameters
-model_hidden_size = 256
-model_embedding_size = 256
-model_num_layers = 3
-
-
-## Training parameters
-learning_rate_init = 1e-4
-speakers_per_batch = 64
-utterances_per_speaker = 10
diff --git a/spaces/kirch/Text2Video-Zero/annotator/openpose/__init__.py b/spaces/kirch/Text2Video-Zero/annotator/openpose/__init__.py
deleted file mode 100644
index 8c26f1b37dae854f51da938da2fa67a8ef48ce5a..0000000000000000000000000000000000000000
--- a/spaces/kirch/Text2Video-Zero/annotator/openpose/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import os
-os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
-
-import torch
-import numpy as np
-from . import util
-from .body import Body
-from .hand import Hand
-from annotator.util import annotator_ckpts_path
-
-
-body_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/body_pose_model.pth"
-hand_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/hand_pose_model.pth"
-
-
-class OpenposeDetector:
- def __init__(self):
- body_modelpath = os.path.join(annotator_ckpts_path, "body_pose_model.pth")
- hand_modelpath = os.path.join(annotator_ckpts_path, "hand_pose_model.pth")
-
- if not os.path.exists(hand_modelpath):
- from basicsr.utils.download_util import load_file_from_url
- load_file_from_url(body_model_path, model_dir=annotator_ckpts_path)
- load_file_from_url(hand_model_path, model_dir=annotator_ckpts_path)
-
- self.body_estimation = Body(body_modelpath)
- self.hand_estimation = Hand(hand_modelpath)
-
- def __call__(self, oriImg, hand=False):
- oriImg = oriImg[:, :, ::-1].copy()
- with torch.no_grad():
- candidate, subset = self.body_estimation(oriImg)
- canvas = np.zeros_like(oriImg)
- canvas = util.draw_bodypose(canvas, candidate, subset)
- if hand:
- hands_list = util.handDetect(candidate, subset, oriImg)
- all_hand_peaks = []
- for x, y, w, is_left in hands_list:
- peaks = self.hand_estimation(oriImg[y:y+w, x:x+w, :])
- peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x)
- peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y)
- all_hand_peaks.append(peaks)
- canvas = util.draw_handpose(canvas, all_hand_peaks)
- return canvas, dict(candidate=candidate.tolist(), subset=subset.tolist())
diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/utils/ext_loader.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/utils/ext_loader.py
deleted file mode 100644
index 08132d2c1b9a1c28880e4bab4d4fa1ba39d9d083..0000000000000000000000000000000000000000
--- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/utils/ext_loader.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import importlib
-import os
-import pkgutil
-import warnings
-from collections import namedtuple
-
-import torch
-
-if torch.__version__ != 'parrots':
-
- def load_ext(name, funcs):
- ext = importlib.import_module('mmcv.' + name)
- for fun in funcs:
- assert hasattr(ext, fun), f'{fun} miss in module {name}'
- return ext
-else:
- from parrots import extension
- from parrots.base import ParrotsException
-
- has_return_value_ops = [
- 'nms',
- 'softnms',
- 'nms_match',
- 'nms_rotated',
- 'top_pool_forward',
- 'top_pool_backward',
- 'bottom_pool_forward',
- 'bottom_pool_backward',
- 'left_pool_forward',
- 'left_pool_backward',
- 'right_pool_forward',
- 'right_pool_backward',
- 'fused_bias_leakyrelu',
- 'upfirdn2d',
- 'ms_deform_attn_forward',
- 'pixel_group',
- 'contour_expand',
- ]
-
- def get_fake_func(name, e):
-
- def fake_func(*args, **kwargs):
- warnings.warn(f'{name} is not supported in parrots now')
- raise e
-
- return fake_func
-
- def load_ext(name, funcs):
- ExtModule = namedtuple('ExtModule', funcs)
- ext_list = []
- lib_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
- for fun in funcs:
- try:
- ext_fun = extension.load(fun, name, lib_dir=lib_root)
- except ParrotsException as e:
- if 'No element registered' not in e.message:
- warnings.warn(e.message)
- ext_fun = get_fake_func(fun, e)
- ext_list.append(ext_fun)
- else:
- if fun in has_return_value_ops:
- ext_list.append(ext_fun.op)
- else:
- ext_list.append(ext_fun.op_)
- return ExtModule(*ext_list)
-
-
-def check_ops_exist():
- ext_loader = pkgutil.find_loader('mmcv._ext')
- return ext_loader is not None
diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/core/utils/__init__.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/core/utils/__init__.py
deleted file mode 100644
index f2678b321c295bcceaef945111ac3524be19d6e4..0000000000000000000000000000000000000000
--- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/core/utils/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .misc import add_prefix
-
-__all__ = ['add_prefix']
diff --git a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/visualizers/directory.py b/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/visualizers/directory.py
deleted file mode 100644
index bc42e00500c7a5b70b2cef83b03e45b5bb471ff8..0000000000000000000000000000000000000000
--- a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/visualizers/directory.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import os
-
-import cv2
-import numpy as np
-
-from saicinpainting.training.visualizers.base import BaseVisualizer, visualize_mask_and_images_batch
-from saicinpainting.utils import check_and_warn_input_range
-
-
-class DirectoryVisualizer(BaseVisualizer):
- DEFAULT_KEY_ORDER = 'image predicted_image inpainted'.split(' ')
-
- def __init__(self, outdir, key_order=DEFAULT_KEY_ORDER, max_items_in_batch=10,
- last_without_mask=True, rescale_keys=None):
- self.outdir = outdir
- os.makedirs(self.outdir, exist_ok=True)
- self.key_order = key_order
- self.max_items_in_batch = max_items_in_batch
- self.last_without_mask = last_without_mask
- self.rescale_keys = rescale_keys
-
- def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None):
- check_and_warn_input_range(batch['image'], 0, 1, 'DirectoryVisualizer target image')
- vis_img = visualize_mask_and_images_batch(batch, self.key_order, max_items=self.max_items_in_batch,
- last_without_mask=self.last_without_mask,
- rescale_keys=self.rescale_keys)
-
- vis_img = np.clip(vis_img * 255, 0, 255).astype('uint8')
-
- curoutdir = os.path.join(self.outdir, f'epoch{epoch_i:04d}{suffix}')
- os.makedirs(curoutdir, exist_ok=True)
- rank_suffix = f'_r{rank}' if rank is not None else ''
- out_fname = os.path.join(curoutdir, f'batch{batch_i:07d}{rank_suffix}.jpg')
-
- vis_img = cv2.cvtColor(vis_img, cv2.COLOR_RGB2BGR)
- cv2.imwrite(out_fname, vis_img)
diff --git a/spaces/kukuhtw/VToonify/vtoonify/model/simple_augment.py b/spaces/kukuhtw/VToonify/vtoonify/model/simple_augment.py
deleted file mode 100644
index 515d272734e4d10d346461965099a86e53f58701..0000000000000000000000000000000000000000
--- a/spaces/kukuhtw/VToonify/vtoonify/model/simple_augment.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# almost the same as model.stylegan.non_leaking
-# we only modify the parameters in sample_affine() to make the transformations mild
-
-import math
-
-import torch
-from torch import autograd
-from torch.nn import functional as F
-import numpy as np
-
-from model.stylegan.distributed import reduce_sum
-from model.stylegan.op import upfirdn2d
-
-
-class AdaptiveAugment:
- def __init__(self, ada_aug_target, ada_aug_len, update_every, device):
- self.ada_aug_target = ada_aug_target
- self.ada_aug_len = ada_aug_len
- self.update_every = update_every
-
- self.ada_update = 0
- self.ada_aug_buf = torch.tensor([0.0, 0.0], device=device)
- self.r_t_stat = 0
- self.ada_aug_p = 0
-
- @torch.no_grad()
- def tune(self, real_pred):
- self.ada_aug_buf += torch.tensor(
- (torch.sign(real_pred).sum().item(), real_pred.shape[0]),
- device=real_pred.device,
- )
- self.ada_update += 1
-
- if self.ada_update % self.update_every == 0:
- self.ada_aug_buf = reduce_sum(self.ada_aug_buf)
- pred_signs, n_pred = self.ada_aug_buf.tolist()
-
- self.r_t_stat = pred_signs / n_pred
-
- if self.r_t_stat > self.ada_aug_target:
- sign = 1
-
- else:
- sign = -1
-
- self.ada_aug_p += sign * n_pred / self.ada_aug_len
- self.ada_aug_p = min(1, max(0, self.ada_aug_p))
- self.ada_aug_buf.mul_(0)
- self.ada_update = 0
-
- return self.ada_aug_p
-
-
-SYM6 = (
- 0.015404109327027373,
- 0.0034907120842174702,
- -0.11799011114819057,
- -0.048311742585633,
- 0.4910559419267466,
- 0.787641141030194,
- 0.3379294217276218,
- -0.07263752278646252,
- -0.021060292512300564,
- 0.04472490177066578,
- 0.0017677118642428036,
- -0.007800708325034148,
-)
-
-
-def translate_mat(t_x, t_y, device="cpu"):
- batch = t_x.shape[0]
-
- mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1)
- translate = torch.stack((t_x, t_y), 1)
- mat[:, :2, 2] = translate
-
- return mat
-
-
-def rotate_mat(theta, device="cpu"):
- batch = theta.shape[0]
-
- mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1)
- sin_t = torch.sin(theta)
- cos_t = torch.cos(theta)
- rot = torch.stack((cos_t, -sin_t, sin_t, cos_t), 1).view(batch, 2, 2)
- mat[:, :2, :2] = rot
-
- return mat
-
-
-def scale_mat(s_x, s_y, device="cpu"):
- batch = s_x.shape[0]
-
- mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1)
- mat[:, 0, 0] = s_x
- mat[:, 1, 1] = s_y
-
- return mat
-
-
-def translate3d_mat(t_x, t_y, t_z):
- batch = t_x.shape[0]
-
- mat = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1)
- translate = torch.stack((t_x, t_y, t_z), 1)
- mat[:, :3, 3] = translate
-
- return mat
-
-
-def rotate3d_mat(axis, theta):
- batch = theta.shape[0]
-
- u_x, u_y, u_z = axis
-
- eye = torch.eye(3).unsqueeze(0)
- cross = torch.tensor([(0, -u_z, u_y), (u_z, 0, -u_x), (-u_y, u_x, 0)]).unsqueeze(0)
- outer = torch.tensor(axis)
- outer = (outer.unsqueeze(1) * outer).unsqueeze(0)
-
- sin_t = torch.sin(theta).view(-1, 1, 1)
- cos_t = torch.cos(theta).view(-1, 1, 1)
-
- rot = cos_t * eye + sin_t * cross + (1 - cos_t) * outer
-
- eye_4 = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1)
- eye_4[:, :3, :3] = rot
-
- return eye_4
-
-
-def scale3d_mat(s_x, s_y, s_z):
- batch = s_x.shape[0]
-
- mat = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1)
- mat[:, 0, 0] = s_x
- mat[:, 1, 1] = s_y
- mat[:, 2, 2] = s_z
-
- return mat
-
-
-def luma_flip_mat(axis, i):
- batch = i.shape[0]
-
- eye = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1)
- axis = torch.tensor(axis + (0,))
- flip = 2 * torch.ger(axis, axis) * i.view(-1, 1, 1)
-
- return eye - flip
-
-
-def saturation_mat(axis, i):
- batch = i.shape[0]
-
- eye = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1)
- axis = torch.tensor(axis + (0,))
- axis = torch.ger(axis, axis)
- saturate = axis + (eye - axis) * i.view(-1, 1, 1)
-
- return saturate
-
-
-def lognormal_sample(size, mean=0, std=1, device="cpu"):
- return torch.empty(size, device=device).log_normal_(mean=mean, std=std)
-
-
-def category_sample(size, categories, device="cpu"):
- category = torch.tensor(categories, device=device)
- sample = torch.randint(high=len(categories), size=(size,), device=device)
-
- return category[sample]
-
-
-def uniform_sample(size, low, high, device="cpu"):
- return torch.empty(size, device=device).uniform_(low, high)
-
-
-def normal_sample(size, mean=0, std=1, device="cpu"):
- return torch.empty(size, device=device).normal_(mean, std)
-
-
-def bernoulli_sample(size, p, device="cpu"):
- return torch.empty(size, device=device).bernoulli_(p)
-
-
-def random_mat_apply(p, transform, prev, eye, device="cpu"):
- size = transform.shape[0]
- select = bernoulli_sample(size, p, device=device).view(size, 1, 1)
- select_transform = select * transform + (1 - select) * eye
-
- return select_transform @ prev
-
-
-def sample_affine(p, size, height, width, device="cpu"):
- G = torch.eye(3, device=device).unsqueeze(0).repeat(size, 1, 1)
- eye = G
-
- # flip
- param = category_sample(size, (0, 1))
- Gc = scale_mat(1 - 2.0 * param, torch.ones(size), device=device)
- G = random_mat_apply(p, Gc, G, eye, device=device)
- # print('flip', G, scale_mat(1 - 2.0 * param, torch.ones(size)), sep='\n')
-
- # 90 rotate
- #param = category_sample(size, (0, 3))
- #Gc = rotate_mat(-math.pi / 2 * param, device=device)
- #G = random_mat_apply(p, Gc, G, eye, device=device)
- # print('90 rotate', G, rotate_mat(-math.pi / 2 * param), sep='\n')
-
- # integer translate
- param = uniform_sample(size, -0.125, 0.125)
- param_height = torch.round(param * height) / height
- param_width = torch.round(param * width) / width
- Gc = translate_mat(param_width, param_height, device=device)
- G = random_mat_apply(p, Gc, G, eye, device=device)
- # print('integer translate', G, translate_mat(param_width, param_height), sep='\n')
-
- # isotropic scale
- param = lognormal_sample(size, std=0.1 * math.log(2))
- Gc = scale_mat(param, param, device=device)
- G = random_mat_apply(p, Gc, G, eye, device=device)
- # print('isotropic scale', G, scale_mat(param, param), sep='\n')
-
- p_rot = 1 - math.sqrt(1 - p)
-
- # pre-rotate
- param = uniform_sample(size, -math.pi * 0.25, math.pi * 0.25)
- Gc = rotate_mat(-param, device=device)
- G = random_mat_apply(p_rot, Gc, G, eye, device=device)
- # print('pre-rotate', G, rotate_mat(-param), sep='\n')
-
- # anisotropic scale
- param = lognormal_sample(size, std=0.1 * math.log(2))
- Gc = scale_mat(param, 1 / param, device=device)
- G = random_mat_apply(p, Gc, G, eye, device=device)
- # print('anisotropic scale', G, scale_mat(param, 1 / param), sep='\n')
-
- # post-rotate
- param = uniform_sample(size, -math.pi * 0.25, math.pi * 0.25)
- Gc = rotate_mat(-param, device=device)
- G = random_mat_apply(p_rot, Gc, G, eye, device=device)
- # print('post-rotate', G, rotate_mat(-param), sep='\n')
-
- # fractional translate
- param = normal_sample(size, std=0.125)
- Gc = translate_mat(param, param, device=device)
- G = random_mat_apply(p, Gc, G, eye, device=device)
- # print('fractional translate', G, translate_mat(param, param), sep='\n')
-
- return G
-
-
-def sample_color(p, size):
- C = torch.eye(4).unsqueeze(0).repeat(size, 1, 1)
- eye = C
- axis_val = 1 / math.sqrt(3)
- axis = (axis_val, axis_val, axis_val)
-
- # brightness
- param = normal_sample(size, std=0.2)
- Cc = translate3d_mat(param, param, param)
- C = random_mat_apply(p, Cc, C, eye)
-
- # contrast
- param = lognormal_sample(size, std=0.5 * math.log(2))
- Cc = scale3d_mat(param, param, param)
- C = random_mat_apply(p, Cc, C, eye)
-
- # luma flip
- param = category_sample(size, (0, 1))
- Cc = luma_flip_mat(axis, param)
- C = random_mat_apply(p, Cc, C, eye)
-
- # hue rotation
- param = uniform_sample(size, -math.pi, math.pi)
- Cc = rotate3d_mat(axis, param)
- C = random_mat_apply(p, Cc, C, eye)
-
- # saturation
- param = lognormal_sample(size, std=1 * math.log(2))
- Cc = saturation_mat(axis, param)
- C = random_mat_apply(p, Cc, C, eye)
-
- return C
-
-
-def make_grid(shape, x0, x1, y0, y1, device):
- n, c, h, w = shape
- grid = torch.empty(n, h, w, 3, device=device)
- grid[:, :, :, 0] = torch.linspace(x0, x1, w, device=device)
- grid[:, :, :, 1] = torch.linspace(y0, y1, h, device=device).unsqueeze(-1)
- grid[:, :, :, 2] = 1
-
- return grid
-
-
-def affine_grid(grid, mat):
- n, h, w, _ = grid.shape
- return (grid.view(n, h * w, 3) @ mat.transpose(1, 2)).view(n, h, w, 2)
-
-
-def get_padding(G, height, width, kernel_size):
- device = G.device
-
- cx = (width - 1) / 2
- cy = (height - 1) / 2
- cp = torch.tensor(
- [(-cx, -cy, 1), (cx, -cy, 1), (cx, cy, 1), (-cx, cy, 1)], device=device
- )
- cp = G @ cp.T
-
- pad_k = kernel_size // 4
-
- pad = cp[:, :2, :].permute(1, 0, 2).flatten(1)
- pad = torch.cat((-pad, pad)).max(1).values
- pad = pad + torch.tensor([pad_k * 2 - cx, pad_k * 2 - cy] * 2, device=device)
- pad = pad.max(torch.tensor([0, 0] * 2, device=device))
- pad = pad.min(torch.tensor([width - 1, height - 1] * 2, device=device))
-
- pad_x1, pad_y1, pad_x2, pad_y2 = pad.ceil().to(torch.int32)
-
- return pad_x1, pad_x2, pad_y1, pad_y2
-
-
-def try_sample_affine_and_pad(img, p, kernel_size, G=None):
- batch, _, height, width = img.shape
-
- G_try = G
-
- if G is None:
- G_try = torch.inverse(sample_affine(p, batch, height, width))
-
- pad_x1, pad_x2, pad_y1, pad_y2 = get_padding(G_try, height, width, kernel_size)
-
- img_pad = F.pad(img, (pad_x1, pad_x2, pad_y1, pad_y2), mode="reflect")
-
- return img_pad, G_try, (pad_x1, pad_x2, pad_y1, pad_y2)
-
-
-class GridSampleForward(autograd.Function):
- @staticmethod
- def forward(ctx, input, grid):
- out = F.grid_sample(
- input, grid, mode="bilinear", padding_mode="zeros", align_corners=False
- )
- ctx.save_for_backward(input, grid)
-
- return out
-
- @staticmethod
- def backward(ctx, grad_output):
- input, grid = ctx.saved_tensors
- grad_input, grad_grid = GridSampleBackward.apply(grad_output, input, grid)
-
- return grad_input, grad_grid
-
-
-class GridSampleBackward(autograd.Function):
- @staticmethod
- def forward(ctx, grad_output, input, grid):
- op = torch._C._jit_get_operation("aten::grid_sampler_2d_backward")
- grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
- ctx.save_for_backward(grid)
-
- return grad_input, grad_grid
-
- @staticmethod
- def backward(ctx, grad_grad_input, grad_grad_grid):
- grid, = ctx.saved_tensors
- grad_grad_output = None
-
- if ctx.needs_input_grad[0]:
- grad_grad_output = GridSampleForward.apply(grad_grad_input, grid)
-
- return grad_grad_output, None, None
-
-
-grid_sample = GridSampleForward.apply
-
-
-def scale_mat_single(s_x, s_y):
- return torch.tensor(((s_x, 0, 0), (0, s_y, 0), (0, 0, 1)), dtype=torch.float32)
-
-
-def translate_mat_single(t_x, t_y):
- return torch.tensor(((1, 0, t_x), (0, 1, t_y), (0, 0, 1)), dtype=torch.float32)
-
-
-def random_apply_affine(img, p, G=None, antialiasing_kernel=SYM6):
- kernel = antialiasing_kernel
- len_k = len(kernel)
-
- kernel = torch.as_tensor(kernel).to(img)
- # kernel = torch.ger(kernel, kernel).to(img)
- kernel_flip = torch.flip(kernel, (0,))
-
- img_pad, G, (pad_x1, pad_x2, pad_y1, pad_y2) = try_sample_affine_and_pad(
- img, p, len_k, G
- )
-
- G_inv = (
- translate_mat_single((pad_x1 - pad_x2).item() / 2, (pad_y1 - pad_y2).item() / 2)
- @ G
- )
- up_pad = (
- (len_k + 2 - 1) // 2,
- (len_k - 2) // 2,
- (len_k + 2 - 1) // 2,
- (len_k - 2) // 2,
- )
- img_2x = upfirdn2d(img_pad, kernel.unsqueeze(0), up=(2, 1), pad=(*up_pad[:2], 0, 0))
- img_2x = upfirdn2d(img_2x, kernel.unsqueeze(1), up=(1, 2), pad=(0, 0, *up_pad[2:]))
- G_inv = scale_mat_single(2, 2) @ G_inv @ scale_mat_single(1 / 2, 1 / 2)
- G_inv = translate_mat_single(-0.5, -0.5) @ G_inv @ translate_mat_single(0.5, 0.5)
- batch_size, channel, height, width = img.shape
- pad_k = len_k // 4
- shape = (batch_size, channel, (height + pad_k * 2) * 2, (width + pad_k * 2) * 2)
- G_inv = (
- scale_mat_single(2 / img_2x.shape[3], 2 / img_2x.shape[2])
- @ G_inv
- @ scale_mat_single(1 / (2 / shape[3]), 1 / (2 / shape[2]))
- )
- grid = F.affine_grid(G_inv[:, :2, :].to(img_2x), shape, align_corners=False)
- img_affine = grid_sample(img_2x, grid)
- d_p = -pad_k * 2
- down_pad = (
- d_p + (len_k - 2 + 1) // 2,
- d_p + (len_k - 2) // 2,
- d_p + (len_k - 2 + 1) // 2,
- d_p + (len_k - 2) // 2,
- )
- img_down = upfirdn2d(
- img_affine, kernel_flip.unsqueeze(0), down=(2, 1), pad=(*down_pad[:2], 0, 0)
- )
- img_down = upfirdn2d(
- img_down, kernel_flip.unsqueeze(1), down=(1, 2), pad=(0, 0, *down_pad[2:])
- )
-
- return img_down, G
-
-
-def apply_color(img, mat):
- batch = img.shape[0]
- img = img.permute(0, 2, 3, 1)
- mat_mul = mat[:, :3, :3].transpose(1, 2).view(batch, 1, 3, 3)
- mat_add = mat[:, :3, 3].view(batch, 1, 1, 3)
- img = img @ mat_mul + mat_add
- img = img.permute(0, 3, 1, 2)
-
- return img
-
-
-def random_apply_color(img, p, C=None):
- if C is None:
- C = sample_color(p, img.shape[0])
-
- img = apply_color(img, C.to(img))
-
- return img, C
-
-
-def augment(img, p, transform_matrix=(None, None)):
- img, G = random_apply_affine(img, p, transform_matrix[0])
- img, C = random_apply_color(img, p, transform_matrix[1])
-
- return img, (G, C)
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/_deprecate.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/_deprecate.py
deleted file mode 100644
index 81f2189dcfcb789861c5054dac0838fca01a28bf..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/_deprecate.py
+++ /dev/null
@@ -1,71 +0,0 @@
-from __future__ import annotations
-
-import warnings
-
-from . import __version__
-
-
-def deprecate(
- deprecated: str,
- when: int | None,
- replacement: str | None = None,
- *,
- action: str | None = None,
- plural: bool = False,
-) -> None:
- """
- Deprecations helper.
-
- :param deprecated: Name of thing to be deprecated.
- :param when: Pillow major version to be removed in.
- :param replacement: Name of replacement.
- :param action: Instead of "replacement", give a custom call to action
- e.g. "Upgrade to new thing".
- :param plural: if the deprecated thing is plural, needing "are" instead of "is".
-
- Usually of the form:
-
- "[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd).
- Use [replacement] instead."
-
- You can leave out the replacement sentence:
-
- "[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd)"
-
- Or with another call to action:
-
- "[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd).
- [action]."
- """
-
- is_ = "are" if plural else "is"
-
- if when is None:
- removed = "a future version"
- elif when <= int(__version__.split(".")[0]):
- msg = f"{deprecated} {is_} deprecated and should be removed."
- raise RuntimeError(msg)
- elif when == 10:
- removed = "Pillow 10 (2023-07-01)"
- elif when == 11:
- removed = "Pillow 11 (2024-10-15)"
- else:
- msg = f"Unknown removal version: {when}. Update {__name__}?"
- raise ValueError(msg)
-
- if replacement and action:
- msg = "Use only one of 'replacement' and 'action'"
- raise ValueError(msg)
-
- if replacement:
- action = f". Use {replacement} instead."
- elif action:
- action = f". {action.rstrip('.')}."
- else:
- action = ""
-
- warnings.warn(
- f"{deprecated} {is_} deprecated and will be removed in {removed}{action}",
- DeprecationWarning,
- stacklevel=3,
- )
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/anyio/_core/_eventloop.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/anyio/_core/_eventloop.py
deleted file mode 100644
index ae9864851baee17613175361a9983f6756a2b0d1..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/anyio/_core/_eventloop.py
+++ /dev/null
@@ -1,153 +0,0 @@
-from __future__ import annotations
-
-import math
-import sys
-import threading
-from contextlib import contextmanager
-from importlib import import_module
-from typing import (
- Any,
- Awaitable,
- Callable,
- Generator,
- TypeVar,
-)
-
-import sniffio
-
-# This must be updated when new backends are introduced
-from ._compat import DeprecatedAwaitableFloat
-
-BACKENDS = "asyncio", "trio"
-
-T_Retval = TypeVar("T_Retval")
-threadlocals = threading.local()
-
-
-def run(
- func: Callable[..., Awaitable[T_Retval]],
- *args: object,
- backend: str = "asyncio",
- backend_options: dict[str, Any] | None = None,
-) -> T_Retval:
- """
- Run the given coroutine function in an asynchronous event loop.
-
- The current thread must not be already running an event loop.
-
- :param func: a coroutine function
- :param args: positional arguments to ``func``
- :param backend: name of the asynchronous event loop implementation – currently either
- ``asyncio`` or ``trio``
- :param backend_options: keyword arguments to call the backend ``run()`` implementation with
- (documented :ref:`here `)
- :return: the return value of the coroutine function
- :raises RuntimeError: if an asynchronous event loop is already running in this thread
- :raises LookupError: if the named backend is not found
-
- """
- try:
- asynclib_name = sniffio.current_async_library()
- except sniffio.AsyncLibraryNotFoundError:
- pass
- else:
- raise RuntimeError(f"Already running {asynclib_name} in this thread")
-
- try:
- asynclib = import_module(f"..._backends._{backend}", package=__name__)
- except ImportError as exc:
- raise LookupError(f"No such backend: {backend}") from exc
-
- token = None
- if sniffio.current_async_library_cvar.get(None) is None:
- # Since we're in control of the event loop, we can cache the name of the async library
- token = sniffio.current_async_library_cvar.set(backend)
-
- try:
- backend_options = backend_options or {}
- return asynclib.run(func, *args, **backend_options)
- finally:
- if token:
- sniffio.current_async_library_cvar.reset(token)
-
-
-async def sleep(delay: float) -> None:
- """
- Pause the current task for the specified duration.
-
- :param delay: the duration, in seconds
-
- """
- return await get_asynclib().sleep(delay)
-
-
-async def sleep_forever() -> None:
- """
- Pause the current task until it's cancelled.
-
- This is a shortcut for ``sleep(math.inf)``.
-
- .. versionadded:: 3.1
-
- """
- await sleep(math.inf)
-
-
-async def sleep_until(deadline: float) -> None:
- """
- Pause the current task until the given time.
-
- :param deadline: the absolute time to wake up at (according to the internal monotonic clock of
- the event loop)
-
- .. versionadded:: 3.1
-
- """
- now = current_time()
- await sleep(max(deadline - now, 0))
-
-
-def current_time() -> DeprecatedAwaitableFloat:
- """
- Return the current value of the event loop's internal clock.
-
- :return: the clock value (seconds)
-
- """
- return DeprecatedAwaitableFloat(get_asynclib().current_time(), current_time)
-
-
-def get_all_backends() -> tuple[str, ...]:
- """Return a tuple of the names of all built-in backends."""
- return BACKENDS
-
-
-def get_cancelled_exc_class() -> type[BaseException]:
- """Return the current async library's cancellation exception class."""
- return get_asynclib().CancelledError
-
-
-#
-# Private API
-#
-
-
-@contextmanager
-def claim_worker_thread(backend: str) -> Generator[Any, None, None]:
- module = sys.modules["anyio._backends._" + backend]
- threadlocals.current_async_module = module
- try:
- yield
- finally:
- del threadlocals.current_async_module
-
-
-def get_asynclib(asynclib_name: str | None = None) -> Any:
- if asynclib_name is None:
- asynclib_name = sniffio.current_async_library()
-
- modulename = "anyio._backends._" + asynclib_name
- try:
- return sys.modules[modulename]
- except KeyError:
- return import_module(modulename)
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/functorch/_src/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/functorch/_src/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/lambdalabs/LambdaSuperRes/KAIR/main_download_pretrained_models.py b/spaces/lambdalabs/LambdaSuperRes/KAIR/main_download_pretrained_models.py
deleted file mode 100644
index 0205359857df34981746b8c35025a7fad2152123..0000000000000000000000000000000000000000
--- a/spaces/lambdalabs/LambdaSuperRes/KAIR/main_download_pretrained_models.py
+++ /dev/null
@@ -1,141 +0,0 @@
-import argparse
-import os
-import requests
-import re
-
-
-"""
-How to use:
-download all the models:
- python main_download_pretrained_models.py --models "all" --model_dir "model_zoo"
-
-download DnCNN models:
- python main_download_pretrained_models.py --models "DnCNN" --model_dir "model_zoo"
-
-download SRMD models:
- python main_download_pretrained_models.py --models "SRMD" --model_dir "model_zoo"
-
-download BSRGAN models:
- python main_download_pretrained_models.py --models "BSRGAN" --model_dir "model_zoo"
-
-download FFDNet models:
- python main_download_pretrained_models.py --models "FFDNet" --model_dir "model_zoo"
-
-download DPSR models:
- python main_download_pretrained_models.py --models "DPSR" --model_dir "model_zoo"
-
-download SwinIR models:
- python main_download_pretrained_models.py --models "SwinIR" --model_dir "model_zoo"
-
-download VRT models:
- python main_download_pretrained_models.py --models "VRT" --model_dir "model_zoo"
-
-download other models:
- python main_download_pretrained_models.py --models "others" --model_dir "model_zoo"
-
-------------------------------------------------------------------
-
-download 'dncnn_15.pth' and 'dncnn_50.pth'
- python main_download_pretrained_models.py --models "dncnn_15.pth dncnn_50.pth" --model_dir "model_zoo"
-
-------------------------------------------------------------------
-
-download DnCNN models and 'BSRGAN.pth'
- python main_download_pretrained_models.py --models "DnCNN BSRGAN.pth" --model_dir "model_zoo"
-
-"""
-
-
-def download_pretrained_model(model_dir='model_zoo', model_name='dncnn3.pth'):
- if os.path.exists(os.path.join(model_dir, model_name)):
- print(f'already exists, skip downloading [{model_name}]')
- else:
- os.makedirs(model_dir, exist_ok=True)
- if 'SwinIR' in model_name:
- url = 'https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/{}'.format(model_name)
- elif 'VRT' in model_name:
- url = 'https://github.com/JingyunLiang/VRT/releases/download/v0.0/{}'.format(model_name)
- else:
- url = 'https://github.com/cszn/KAIR/releases/download/v1.0/{}'.format(model_name)
- r = requests.get(url, allow_redirects=True)
- print(f'downloading [{model_dir}/{model_name}] ...')
- open(os.path.join(model_dir, model_name), 'wb').write(r.content)
- print('done!')
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--models',
- type=lambda s: re.split(' |, ', s),
- default = "dncnn3.pth",
- help='comma or space delimited list of characters, e.g., "DnCNN", "DnCNN BSRGAN.pth", "dncnn_15.pth dncnn_50.pth"')
- parser.add_argument('--model_dir', type=str, default='model_zoo', help='path of model_zoo')
- args = parser.parse_args()
-
- print(f'trying to download {args.models}')
-
- method_model_zoo = {'DnCNN': ['dncnn_15.pth', 'dncnn_25.pth', 'dncnn_50.pth', 'dncnn3.pth', 'dncnn_color_blind.pth', 'dncnn_gray_blind.pth'],
- 'SRMD': ['srmdnf_x2.pth', 'srmdnf_x3.pth', 'srmdnf_x4.pth', 'srmd_x2.pth', 'srmd_x3.pth', 'srmd_x4.pth'],
- 'DPSR': ['dpsr_x2.pth', 'dpsr_x3.pth', 'dpsr_x4.pth', 'dpsr_x4_gan.pth'],
- 'FFDNet': ['ffdnet_color.pth', 'ffdnet_gray.pth', 'ffdnet_color_clip.pth', 'ffdnet_gray_clip.pth'],
- 'USRNet': ['usrgan.pth', 'usrgan_tiny.pth', 'usrnet.pth', 'usrnet_tiny.pth'],
- 'DPIR': ['drunet_gray.pth', 'drunet_color.pth', 'drunet_deblocking_color.pth', 'drunet_deblocking_grayscale.pth'],
- 'BSRGAN': ['BSRGAN.pth', 'BSRNet.pth', 'BSRGANx2.pth'],
- 'IRCNN': ['ircnn_color.pth', 'ircnn_gray.pth'],
- 'SwinIR': ['001_classicalSR_DF2K_s64w8_SwinIR-M_x2.pth', '001_classicalSR_DF2K_s64w8_SwinIR-M_x3.pth',
- '001_classicalSR_DF2K_s64w8_SwinIR-M_x4.pth', '001_classicalSR_DF2K_s64w8_SwinIR-M_x8.pth',
- '001_classicalSR_DIV2K_s48w8_SwinIR-M_x2.pth', '001_classicalSR_DIV2K_s48w8_SwinIR-M_x3.pth',
- '001_classicalSR_DIV2K_s48w8_SwinIR-M_x4.pth', '001_classicalSR_DIV2K_s48w8_SwinIR-M_x8.pth',
- '002_lightweightSR_DIV2K_s64w8_SwinIR-S_x2.pth', '002_lightweightSR_DIV2K_s64w8_SwinIR-S_x3.pth',
- '002_lightweightSR_DIV2K_s64w8_SwinIR-S_x4.pth', '003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_GAN.pth',
- '003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_PSNR.pth', '004_grayDN_DFWB_s128w8_SwinIR-M_noise15.pth',
- '004_grayDN_DFWB_s128w8_SwinIR-M_noise25.pth', '004_grayDN_DFWB_s128w8_SwinIR-M_noise50.pth',
- '005_colorDN_DFWB_s128w8_SwinIR-M_noise15.pth', '005_colorDN_DFWB_s128w8_SwinIR-M_noise25.pth',
- '005_colorDN_DFWB_s128w8_SwinIR-M_noise50.pth', '006_CAR_DFWB_s126w7_SwinIR-M_jpeg10.pth',
- '006_CAR_DFWB_s126w7_SwinIR-M_jpeg20.pth', '006_CAR_DFWB_s126w7_SwinIR-M_jpeg30.pth',
- '006_CAR_DFWB_s126w7_SwinIR-M_jpeg40.pth'],
- 'VRT': ['001_VRT_videosr_bi_REDS_6frames.pth', '002_VRT_videosr_bi_REDS_16frames.pth',
- '003_VRT_videosr_bi_Vimeo_7frames.pth', '004_VRT_videosr_bd_Vimeo_7frames.pth',
- '005_VRT_videodeblurring_DVD.pth', '006_VRT_videodeblurring_GoPro.pth',
- '007_VRT_videodeblurring_REDS.pth', '008_VRT_videodenoising_DAVIS.pth'],
- 'others': ['msrresnet_x4_psnr.pth', 'msrresnet_x4_gan.pth', 'imdn_x4.pth', 'RRDB.pth', 'ESRGAN.pth',
- 'FSSR_DPED.pth', 'FSSR_JPEG.pth', 'RealSR_DPED.pth', 'RealSR_JPEG.pth']
- }
-
- method_zoo = list(method_model_zoo.keys())
- model_zoo = []
- for b in list(method_model_zoo.values()):
- model_zoo += b
-
- if 'all' in args.models:
- for method in method_zoo:
- for model_name in method_model_zoo[method]:
- download_pretrained_model(args.model_dir, model_name)
- else:
- for method_model in args.models:
- if method_model in method_zoo: # method, need for loop
- for model_name in method_model_zoo[method_model]:
- if 'SwinIR' in model_name:
- download_pretrained_model(os.path.join(args.model_dir, 'swinir'), model_name)
- elif 'VRT' in model_name:
- download_pretrained_model(os.path.join(args.model_dir, 'vrt'), model_name)
- else:
- download_pretrained_model(args.model_dir, model_name)
- elif method_model in model_zoo: # model, do not need for loop
- if 'SwinIR' in method_model:
- download_pretrained_model(os.path.join(args.model_dir, 'swinir'), method_model)
- elif 'VRT' in method_model:
- download_pretrained_model(os.path.join(args.model_dir, 'vrt'), method_model)
- else:
- download_pretrained_model(args.model_dir, method_model)
- else:
- print(f'Do not find {method_model} from the pre-trained model zoo!')
-
-
-
-
-
-
-
-
-
diff --git a/spaces/leogabraneth/text-generation-webui-main/api-examples/api-example-chat.py b/spaces/leogabraneth/text-generation-webui-main/api-examples/api-example-chat.py
deleted file mode 100644
index 0f7a44aa79fae6c8697f0426248c55c3189ec0f3..0000000000000000000000000000000000000000
--- a/spaces/leogabraneth/text-generation-webui-main/api-examples/api-example-chat.py
+++ /dev/null
@@ -1,94 +0,0 @@
-import html
-import json
-
-import requests
-
-# For local streaming, the websockets are hosted without ssl - http://
-HOST = 'localhost:5000'
-URI = f'http://{HOST}/api/v1/chat'
-
-# For reverse-proxied streaming, the remote will likely host with ssl - https://
-# URI = 'https://your-uri-here.trycloudflare.com/api/v1/chat'
-
-
-def run(user_input, history):
- request = {
- 'user_input': user_input,
- 'max_new_tokens': 250,
- 'auto_max_new_tokens': False,
- 'max_tokens_second': 0,
- 'history': history,
- 'mode': 'instruct', # Valid options: 'chat', 'chat-instruct', 'instruct'
- 'character': 'Example',
- 'instruction_template': 'Vicuna-v1.1', # Will get autodetected if unset
- 'your_name': 'You',
- # 'name1': 'name of user', # Optional
- # 'name2': 'name of character', # Optional
- # 'context': 'character context', # Optional
- # 'greeting': 'greeting', # Optional
- # 'name1_instruct': 'You', # Optional
- # 'name2_instruct': 'Assistant', # Optional
- # 'context_instruct': 'context_instruct', # Optional
- # 'turn_template': 'turn_template', # Optional
- 'regenerate': False,
- '_continue': False,
- 'chat_instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>',
-
- # Generation params. If 'preset' is set to different than 'None', the values
- # in presets/preset-name.yaml are used instead of the individual numbers.
- 'preset': 'None',
- 'do_sample': True,
- 'temperature': 0.7,
- 'top_p': 0.1,
- 'typical_p': 1,
- 'epsilon_cutoff': 0, # In units of 1e-4
- 'eta_cutoff': 0, # In units of 1e-4
- 'tfs': 1,
- 'top_a': 0,
- 'repetition_penalty': 1.18,
- 'presence_penalty': 0,
- 'frequency_penalty': 0,
- 'repetition_penalty_range': 0,
- 'top_k': 40,
- 'min_length': 0,
- 'no_repeat_ngram_size': 0,
- 'num_beams': 1,
- 'penalty_alpha': 0,
- 'length_penalty': 1,
- 'early_stopping': False,
- 'mirostat_mode': 0,
- 'mirostat_tau': 5,
- 'mirostat_eta': 0.1,
- 'grammar_string': '',
- 'guidance_scale': 1,
- 'negative_prompt': '',
-
- 'seed': -1,
- 'add_bos_token': True,
- 'truncation_length': 2048,
- 'ban_eos_token': False,
- 'custom_token_bans': '',
- 'skip_special_tokens': True,
- 'stopping_strings': []
- }
-
- response = requests.post(URI, json=request)
-
- if response.status_code == 200:
- result = response.json()['results'][0]['history']
- print(json.dumps(result, indent=4))
- print()
- print(html.unescape(result['visible'][-1][1]))
-
-
-if __name__ == '__main__':
- user_input = "Please give me a step-by-step guide on how to plant a tree in my backyard."
-
- # Basic example
- history = {'internal': [], 'visible': []}
-
- # "Continue" example. Make sure to set '_continue' to True above
- # arr = [user_input, 'Surely, here is']
- # history = {'internal': [arr], 'visible': [arr]}
-
- run(user_input, history)
diff --git a/spaces/leurez/moss/src/api/index.ts b/spaces/leurez/moss/src/api/index.ts
deleted file mode 100644
index 0ca33fd30456f53e2c784a3e09187c61137b65d2..0000000000000000000000000000000000000000
--- a/spaces/leurez/moss/src/api/index.ts
+++ /dev/null
@@ -1,66 +0,0 @@
-import type { AxiosProgressEvent, GenericAbortSignal } from 'axios'
-import { post } from '@/utils/request'
-import { useAuthStore, useSettingStore } from '@/store'
-
-export function fetchChatAPI(
- prompt: string,
- options?: { conversationId?: string; parentMessageId?: string },
- signal?: GenericAbortSignal,
-) {
- return post({
- url: '/chat',
- data: { prompt, options },
- signal,
- })
-}
-
-export function fetchChatConfig() {
- return post({
- url: '/config',
- })
-}
-
-export function fetchChatAPIProcess(
- params: {
- prompt: string
- options?: { conversationId?: string; parentMessageId?: string }
- signal?: GenericAbortSignal
- onDownloadProgress?: (progressEvent: AxiosProgressEvent) => void },
-) {
- const settingStore = useSettingStore()
- const authStore = useAuthStore()
-
- let data: Record = {
- prompt: params.prompt,
- options: params.options,
- }
-
- if (authStore.isChatGPTAPI) {
- data = {
- ...data,
- systemMessage: settingStore.systemMessage,
- temperature: settingStore.temperature,
- top_p: settingStore.top_p,
- }
- }
-
- return post({
- url: '/chat-process',
- data,
- signal: params.signal,
- onDownloadProgress: params.onDownloadProgress,
- })
-}
-
-export function fetchSession() {
- return post({
- url: '/session',
- })
-}
-
-export function fetchVerify(token: string) {
- return post({
- url: '/verify',
- data: { token },
- })
-}
diff --git a/spaces/lightli/bingo-newbing/src/components/ui/textarea.tsx b/spaces/lightli/bingo-newbing/src/components/ui/textarea.tsx
deleted file mode 100644
index e25af722c7a5dc1121a9ab58d6716952f9f76081..0000000000000000000000000000000000000000
--- a/spaces/lightli/bingo-newbing/src/components/ui/textarea.tsx
+++ /dev/null
@@ -1,24 +0,0 @@
-import * as React from 'react'
-
-import { cn } from '@/lib/utils'
-
-export interface TextareaProps
- extends React.TextareaHTMLAttributes {}
-
-const Textarea = React.forwardRef(
- ({ className, ...props }, ref) => {
- return (
-
- )
- }
-)
-Textarea.displayName = 'Textarea'
-
-export { Textarea }
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Free Download Reading Into Writing 2 By Concepcion D Dadufalza Red Cover ((EXCLUSIVE)).md b/spaces/lincquiQcaudo/Top-20-Diffusion/Free Download Reading Into Writing 2 By Concepcion D Dadufalza Red Cover ((EXCLUSIVE)).md
deleted file mode 100644
index 4577a2b5eee26829a123452630b45384e37d3506..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Free Download Reading Into Writing 2 By Concepcion D Dadufalza Red Cover ((EXCLUSIVE)).md
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
Free Download Reading Into Writing 2 By Concepcion D Dadufalza Red Cover
-
If you are looking for a book that can help you improve your academic and professional writing skills, you might want to check out Reading Into Writing 2 by Concepcion D. Dadufalza. This book is a handbook-workshop-reader for critical reading and writing in expository discourse. It covers various topics such as grammar, vocabulary, sentence structure, paragraph development, essay organization, research skills, and citation styles.
-
free download reading into writing 2 by concepcion d dadufalza red cover
In this article, we will give you an overview of the book, its features, benefits, and how you can get a free download of Reading Into Writing 2 by Concepcion D. Dadufalza red cover.
-
What is Reading Into Writing 2 by Concepcion D. Dadufalza?
-
Reading Into Writing 2 by Concepcion D. Dadufalza is a book that aims to help students and professionals develop their critical reading and writing skills in expository discourse. Expository discourse is a type of writing that explains, informs, or describes something. It is commonly used in academic and professional settings, such as essays, reports, articles, manuals, proposals, etc.
-
The book is divided into three parts: a handbook, a workshop, and a reader. The handbook provides the basic rules and guidelines for effective expository writing. The workshop offers exercises and activities to practice and apply the concepts learned from the handbook. The reader contains selected readings from various sources that illustrate different types of expository writing.
-
-
What are the features and benefits of Reading Into Writing 2 by Concepcion D. Dadufalza?
-
Reading Into Writing 2 by Concepcion D. Dadufalza has many features and benefits that make it a valuable resource for anyone who wants to improve their expository writing skills. Some of them are:
-
-
It covers a wide range of topics related to expository writing, such as grammar, vocabulary, sentence structure, paragraph development, essay organization, research skills, and citation styles.
-
It provides clear explanations, examples, and tips for each topic.
-
It includes exercises and activities that allow the readers to practice and apply what they have learned.
-
It offers feedback and suggestions for improvement for each exercise and activity.
-
It contains selected readings from various sources that demonstrate different types of expository writing.
-
It helps the readers develop their critical thinking, analysis, synthesis, and evaluation skills.
-
It enhances the readers' confidence and competence in expository writing.
-
-
How can you get a free download of Reading Into Writing 2 by Concepcion D. Dadufalza red cover?
-
If you are interested in getting a free download of Reading Into Writing 2 by Concepcion D. Dadufalza red cover, you can follow these steps:
-
-
Go to SoundCloud.com and search for "free download reading into writing 2 by concepcion d dadufalza red cover".
-
You will see several results that offer audiobooks and excerpts of the book.
-
Select the one that suits your preference and click on the play button.
-
You can listen to the book online or download it to your device.
-
Enjoy reading into writing 2 by concepcion d dadufalza red cover!
-
-
We hope this article has given you some useful information about Reading Into Writing 2 by Concepcion D. Dadufalza red cover. This book is a great tool for anyone who wants to improve their expository writing skills. You can get a free download of it from SoundCloud.com and start learning today!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/HD Online Player (kastor All Video Downloader Key Crack) VERIFIED.md b/spaces/lincquiQcaudo/Top-20-Diffusion/HD Online Player (kastor All Video Downloader Key Crack) VERIFIED.md
deleted file mode 100644
index 9054dc3bc7384e00c1cc762b43a65a0e5ee1c199..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/HD Online Player (kastor All Video Downloader Key Crack) VERIFIED.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
HD Online Player (kastor all video downloader key crack)
-
-kastor v5.9 premium version key. Kastor All Video Downloader with crack and serial Kastor All Video Downloader is ... PowerDVD 9 is simply the best video playback software for enjoying HD and Blu-ray movies. ... With all that you do online, it's a challenge to create and remember ... Adobe Flash Player ActiveX 32.0.0.255. 1fdad05405
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Lazzat Un Nisa Book Pdf [2021].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Lazzat Un Nisa Book Pdf [2021].md
deleted file mode 100644
index 12546b68e9788c2a91d62f22c29e17b4daf891d0..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Lazzat Un Nisa Book Pdf [2021].md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-
Lazzat Un Nisa Book Pdf: A Rare and Secret Book of Love and Magic
-
If you are looking for a book that can teach you the secrets of love and magic, then you might be interested in Lazzat Un Nisa Book Pdf. Lazzat Un Nisa, which means "The Pleasure of Women" in Arabic, is a rare and ancient book that contains various recipes, spells, charms, talismans, and rituals for attracting and pleasing women. It is also known as Koka Pandit, which means "The Wise Man of Koka" in Urdu, because it was written by a mysterious author who claimed to be from Koka, a region in India.
-
Lazzat Un Nisa Book Pdf is not an ordinary book. It is a book of secrets that was hidden from the public eye for centuries. It was only available to a few elite and wealthy people who could afford to buy it or borrow it from private libraries. It was also considered dangerous and forbidden by many religious authorities who feared its power and influence. Some even say that it was cursed and that anyone who read it would suffer from misfortune or madness.
However, despite its controversial and mysterious nature, Lazzat Un Nisa Book Pdf is also a book of wisdom and knowledge that can help you improve your life and relationships. It can teach you how to enhance your beauty and charm, how to seduce and satisfy your partner, how to protect yourself from evil and enemies, how to achieve success and wealth, how to heal diseases and ailments, how to communicate with spirits and jinn, and much more.
-
How to Download and Read Lazzat Un Nisa Book Pdf for Free
-
If you want to download and read Lazzat Un Nisa Book Pdf for free, you can follow these simple steps:
-
-
Go to [https://archive.org/details/koka-pundit-lazzatun-nisa-pdf-free-download](https://archive.org/details/koka-pundit-lazzatun-nisa-pdf-free-download) or [https://archive.org/details/lazzatun-nisa-pdf](https://archive.org/details/lazzatun-nisa-pdf) where you can find the scanned copies of the book in Urdu.
-
Click on the PDF icon on the right side of the page to download the file to your computer or mobile device.
-
Open the file using a PDF reader program or app.
-
Enjoy reading Lazzat Un Nisa Book Pdf for free!
-
-
Note: Lazzat Un Nisa Book Pdf is written in Urdu language using Arabic script. If you are not familiar with this language or script, you might need to use a translator tool or app to understand the content.
-
Lazzat Un Nisa Book Pdf: The Ultimate Guide to Islamic Learning for Women
-
Lazzat Un Nisa Book Pdf is not only a book of love and magic, but also a book of Islamic learning for women. It contains many verses from the Quran and Hadiths (sayings of Prophet Muhammad) that explain the rights and duties of women in Islam. It also provides guidance on various aspects of women's life such as marriage, divorce, inheritance, education, hygiene, dress code, etiquette, prayer, fasting, charity, pilgrimage, etc.
-
Lazzat Un Nisa Book Pdf is a valuable resource for Muslim women who want to learn more about their religion and culture. It can help them understand their role and status in society and family. It can also inspire them to practice their faith with devotion and sincerity. It can also empower them to seek knowledge and education from various sources and fields.
-
Lazzat Un Nisa Book Pdf is also a useful reference for non-Muslim women who want to learn more about Islam and its teachings on women. It can help them clear some misconceptions and stereotypes about Islam and Muslim women. It can also help them appreciate the diversity and richness of Islamic culture and history.
-
What is Lazzat Un Nisa Book Pdf and Why You Should Read It
-
Lazzat Un Nisa Book Pdf is a rare and secret book of love and magic that was written by a mysterious author who claimed to be from Koka, a region in India. It contains various recipes, spells, charms, talismans, and rituals for attracting and pleasing women. It also contains verses from the Quran and Hadiths that explain the rights and duties of women in Islam. It also provides guidance on various aspects of women's life such as marriage, divorce, inheritance, education, hygiene, dress code, etiquette, prayer, fasting, charity, pilgrimage, etc.
-
-
You should read Lazzat Un Nisa Book Pdf because it can teach you the secrets of love and magic that can improve your life and relationships. It can also teach you the wisdom and knowledge of Islam that can help you practice your faith with devotion and sincerity. It can also teach you about the culture and history of Islam that can help you appreciate its diversity and richness.
-
Lazzat Un Nisa Book Pdf is a book that will inspire you to learn more about yourself and your partner, your religion and your culture, your past and your future.
-
Lazzat Un Nisa Book Pdf: A Treasure of Mystical Concepts and Figures in Modern Arabic Literature
-
Lazzat Un Nisa Book Pdf is not only a book of love and magic, but also a book of mystical concepts and figures in modern Arabic literature. It contains many references to the Quran and the Hadiths, as well as to other sources of Islamic mysticism such as Sufism, Ismailism, and Shiism. It also draws inspiration from other traditions of mysticism such as Hinduism, Buddhism, Zoroastrianism, and Judaism.
-
Lazzat Un Nisa Book Pdf is a book that explores the themes of love, beauty, desire, ecstasy, transcendence, and union with the divine. It uses various symbols and metaphors to express these themes, such as the moon, the sun, the rose, the nightingale, the wine, the cupbearer, the beloved, the lover, etc. It also uses various forms and styles of poetry and prose to convey these themes, such as ghazals, qasidas, masnavis, rubais, etc.
-
Lazzat Un Nisa Book Pdf is a book that can enrich your appreciation of modern Arabic literature and its connection to the mystical heritage of Islam. It can also inspire you to explore your own spirituality and creativity through the language of love and magic.
-
How to Use Lazzat Un Nisa Book Pdf Safely and Responsibly
-
Lazzat Un Nisa Book Pdf is a book that can offer you many benefits and advantages, but it can also pose some risks and challenges. It is important to use this book safely and responsibly, and to be aware of the possible consequences of your actions. Here are some tips on how to use Lazzat Un Nisa Book Pdf safely and responsibly:
-
-
Do not use this book for evil or harmful purposes. This book contains many recipes, spells, charms, talismans, and rituals that can affect other people's lives and free will. Do not use them to harm, hurt, manipulate, or control anyone. Do not use them to cause mischief, trouble, or chaos. Do not use them to seek revenge, hatred, or enmity. Do not use them to violate the laws of God or the laws of the land.
-
Do not use this book without proper knowledge and guidance. This book contains many secrets and mysteries that require proper knowledge and guidance to understand and apply. Do not use this book without consulting a qualified and trustworthy teacher or mentor who can guide you through the book and its contents. Do not use this book without studying the Quran and the Hadiths, which are the primary sources of Islamic knowledge and guidance.
-
Do not use this book without proper intention and sincerity. This book contains many verses from the Quran and the Hadiths that require proper intention and sincerity to benefit from them. Do not use this book without having a clear and noble intention for your actions. Do not use this book without having a sincere and pure heart for your actions. Do not use this book without seeking the pleasure of God and His approval for your actions.
-
Do not use this book without proper respect and gratitude. This book contains many blessings and gifts from God that require proper respect and gratitude to appreciate them. Do not use this book without respecting its author, its content, its source, and its purpose. Do not use this book without thanking God for His generosity, His mercy, His wisdom, and His guidance.
-
-
Lazzat Un Nisa Book Pdf is a book that can help you achieve your goals and dreams, but it can also test your faith and morals. It is up to you to use this book wisely and ethically, and to avoid any harm or wrongdoing.
-
Where to Find and Buy Lazzat Un Nisa Book Pdf Online or Offline
-
Lazzat Un Nisa Book Pdf is a rare and secret book that is not easily available in the market. It is a book that is sought after by many people who are interested in love and magic, as well as Islamic learning and culture. However, finding and buying this book can be a challenge, especially if you do not know where to look for it. Here are some tips on where to find and buy Lazzat Un Nisa Book Pdf online or offline:
-
-
Online: One of the easiest ways to find and buy Lazzat Un Nisa Book Pdf online is to use a search engine such as Google or Bing and type in the keyword "Lazzat Un Nisa Book Pdf". You will get many results that will direct you to various websites that offer this book for download or purchase. Some of these websites are reliable and trustworthy, while others may be fraudulent or scam. You should be careful and cautious when choosing a website to download or buy this book from. You should check the reviews, ratings, feedback, and testimonials of other customers who have used the website before. You should also check the security, privacy, and refund policies of the website before making any payment or transaction.
-
Offline: Another way to find and buy Lazzat Un Nisa Book Pdf offline is to visit a local bookstore or library that specializes in Islamic books or books on mysticism. You can ask the staff or the owner if they have this book in stock or if they can order it for you. You can also browse through their catalog or shelves and look for this book among other similar books. However, you should be aware that this book may not be available in every bookstore or library, as it is a rare and secret book that is not widely distributed or circulated.
-
-
Lazzat Un Nisa Book Pdf is a book that can offer you many benefits and advantages, but it can also be hard to find and buy. It is a book that requires proper knowledge, guidance, intention, respect, and gratitude to use it safely and responsibly. It is a book that can help you achieve your goals and dreams, but it can also test your faith and morals. It is up to you to use this book wisely and ethically, and to avoid any harm or wrongdoing.
-
Conclusion
-
Lazzat Un Nisa Book Pdf is a book that can enrich your appreciation of modern Arabic literature and its connection to the mystical heritage of Islam. It is a book that can inspire you to explore your own spirituality and creativity through the language of love and magic. It is a book that can help you achieve your goals and dreams, but it can also pose some risks and challenges. It is important to use this book safely and responsibly, and to be aware of the possible consequences of your actions. It is also important to find and buy this book from reliable and trustworthy sources, whether online or offline. Lazzat Un Nisa Book Pdf is a book that can offer you many benefits and advantages, but it can also test your faith and morals. It is up to you to use this book wisely and ethically, and to avoid any harm or wrongdoing.
"Likelihoods" will show more than N tokens if a top completion for one sentence is unlikely for the other sentence.')
- .parent().parent()
- .append('div.flex-row')
- .appendMany('div.button', [30, 200, 1000, 5000, 99999])
- .text(d => d > 5000 ? 'All' : d)
- .st({textAlign: 'center'})
- .on('click', d => {
- pair.count = d
- updateChart()
- })
-
- var typeSel = optionSel.append('div')
- .append('b').text('Chart Type')
- .append('info').text('ⓘ').call(addLockedTooltip)
- .datum('"Likelihoods" shows the logits from both models plotted directly with a shared linear scale.
To better contrast the outputs, "Differences" shows logitA - logitB on the y-axis and mean(logitA, logitB) on the x-axis with separate linear scales.')
- .parent().parent()
- .append('div.flex-row')
- .appendMany('div.button', ['Likelihoods', 'Differences'])
- .text(d => d)
- .st({textAlign: 'center'})
- .on('click', d => {
- pair.type = d
- updateChart()
- })
-
- var modelSel = optionSel.append('div')
- .st({display: pair.model == 'BERT' ? 'none' : ''})
- .append('b').text('Model')
- .parent()
- .append('div.flex-row')
- .appendMany('div.button', ['BERT', 'Zari'])
- .text(d => d)
- .st({textAlign: 'center'})
- .on('click', d => {
- pair.model = d
- updateChart()
- })
-
- // TODO add loading spinner
- var updateSel = optionSel
- .append('div.flex-row')
- .append('div.button.update').on('click', updateChart)
- .text('Update')
- .st({display: isMobile ? 'none' : ''})
-
- var warningSel = optionSel.append('div.warning')
- .text('⚠️Some of the text this model was trained on includes harmful stereotypes. This is a tool to uncover these associations—not an endorsement of them.')
-
- var resetSel = optionSel.append('div.reset')
- .html('↻ Reset')
- .on('click', () => {
- pair = JSON.parse(pair.pairStr)
- pair.pairStr = JSON.stringify(pair)
-
- input0Sel.node().value = pair.s0
- input1Sel.node().value = pair.s1
-
- updateChart(true)
- })
-
- if (pair.alts){
- d3.select('.' + pair.class + '-alts').html('')
- .classed('alt-block', 1).st({display: 'block'})
- .appendMany('span.p-button-link', pair.alts)
- .html(d => d.str)
- .on('click', d => {
- input0Sel.node().value = d.s0
- input1Sel.node().value = d.s1
-
- updateChart()
- })
- }
-
-
- var margin = {bottom: 50, left: 25, top: 5, right: 20}
- var graphSel = sel.append('div.graph')
- var totalWidth = graphSel.node().offsetWidth
- var width = totalWidth - margin.left - margin.right
-
- var c = d3.conventions({
- sel: graphSel.append('div').st({marginTop: isMobile ? 20 : -5}),
- width,
- height: width,
- margin,
- layers: 'sdds',
- })
-
-
- var nTicks = 4
- var tickScale = d3.scaleLinear().range([0, c.width])
- c.svg.appendMany('path.bg-tick', d3.range(nTicks + 1))
- .at({d: d => `M ${.5 + Math.round(tickScale(d/nTicks))} 0 V ${c.height}`})
- c.svg.appendMany('path.bg-tick', d3.range(nTicks + 1))
- .at({d: d => `M 0 ${.5 + Math.round(tickScale(d/nTicks))} H ${c.width}`})
-
-
- var annotationSel = c.layers[1].appendMany('div.annotations', pair.annotations)
- .translate(d => d.pos)
- .html(d => d.str)
- .st({color: d => d.color, width: 250, postion: 'absolute'})
-
- var scatter = window.initScatter(c)
-
- updateChart(true)
-
-
- async function updateChart(isFirst){
- sel.classed('changed', 0)
- warningSel.st({opacity: isFirst ? 0 : 1})
- resetSel.st({opacity: isFirst ? 0 : 1})
- annotationSel.st({opacity: isFirst ? 1 : 0})
-
- countSel.classed('active', d => d == pair.count)
- typeSel.classed('active', d => d == pair.type)
- modelSel.classed('active', d => d == pair.model)
-
- function getStr(sel){
- return sel.node().value.replace('_', '[MASK]')
- }
-
- var modelPath = pair.model == 'Zari' ? 'embed_zari_cda' : 'embed'
-
- pair.s0 = input0Sel.node().value.replace('_', '[MASK]')
- pair.s1 = input1Sel.node().value.replace('_', '[MASK]')
-
- updateSel.classed('loading', 1)
- var vals0 = await post(modelPath, {sentence: pair.s0})
- var vals1 = await post(modelPath, {sentence: pair.s1})
- updateSel.classed('loading', 0)
-
-
- var allTokens = vals0.map((v0, i) => {
- return {word: tokenizer.vocab[i], v0, i, v1: vals1[i]}
- })
- allTokens.forEach(d => {
- d.dif = d.v0 - d.v1
- d.meanV = (d.v0 + d.v1) / 2
- d.isVisible = false
- })
-
- _.sortBy(allTokens, d => -d.v1).forEach((d, i) => d.v1i = i)
- _.sortBy(allTokens, d => -d.v0).forEach((d, i) => d.v0i = i)
-
- var topTokens = allTokens.filter(d => d.v0i <= pair.count || d.v1i <= pair.count)
-
-
- var logitExtent = d3.extent(topTokens.map(d => d.v0).concat(topTokens.map(d => d.v1)))
-
- var tokens = allTokens
- .filter(d => logitExtent[0] <= d.v0 && logitExtent[0] <= d.v1)
-
- var mag = logitExtent[1] - logitExtent[0]
- logitExtent = [logitExtent[0] - mag*.002, logitExtent[1] + mag*.002]
-
- if (pair.type == 'Differences') tokens = _.sortBy(allTokens, d => -d.meanV).slice(0, pair.count)
-
- tokens.forEach(d => {
- d.isVisible = true
- })
-
- var maxDif = d3.max(d3.extent(tokens, d => d.dif).map(Math.abs))
- var color = palette(-maxDif*.8, maxDif*.8)
-
- updateSentenceLabels()
-
- if (pair.type == 'Likelihoods'){
- drawXY()
- } else{
- drawRotated()
- }
-
- sel.classed('is-xy', pair.type == 'Likelihoods')
- sel.classed('is-rotate', pair.type != 'Likelihoods')
-
-
- function drawXY(){
- c.x.domain(logitExtent)
- c.y.domain(logitExtent)
-
- d3.drawAxis(c)
-
- var s = {30: 4, 200: 3, 1000: 3}[pair.count] || 2
- var scatterData = allTokens.map(d => {
- var x = c.x(d.v0)
- var y = c.y(d.v1)
- var fill = color(d.dif)
- var dif = d.dif
- var word = d.word
- var show = ''
- var isVisible = d.isVisible
-
- return {x, y, s, dif, fill, word, show, isVisible}
- })
-
- var textCandidates = _.sortBy(scatterData.filter(d => d.isVisible), d => d.dif)
- d3.nestBy(textCandidates.slice(0, 1000), d => Math.round(d.y/10))
- .forEach(d => d[0].show = 'uf')
- d3.nestBy(textCandidates.reverse().slice(0, 1000), d => Math.round(d.y/10))
- .forEach(d => d[0].show = 'lr')
-
- logitExtent.pair = pair
- scatter.draw(c, scatterData, true)
-
- c.svg.selectAppend('text.x-axis-label.xy-only')
- .translate([c.width/2, c.height + 24])
- .text(pair.label0 ? ' __ likelihood, ' + pair.label0 + ' sentence →' : '__ likelihood, sentence two →')
- .st({fill: util.colors[0]})
- .at({textAnchor: 'middle'})
-
-
- c.svg.selectAppend('g.y-axis-label.xy-only')
- .translate([c.width + 20, c.height/2])
- .selectAppend('text')
- .text(pair.label1 ? ' __ likelihood, ' + pair.label1 + ' sentence →' : '__ likelihood, sentence one →')
- .st({fill: util.colors[1]})
- .at({textAnchor: 'middle', transform: 'rotate(-90)'})
- }
-
- function drawRotated(){
- c.x.domain(d3.extent(tokens, d => d.meanV))
- c.y.domain([maxDif, -maxDif])
-
- d3.drawAxis(c)
-
- var scatterData = allTokens.map(d => {
- var x = c.x(d.meanV)
- var y = c.y(d.dif)
- var fill = color(d.dif)
- var word = d.word
- var show = ''
- var isVisible = d.isVisible
-
- return {x, y, s: 2, fill, word, show, isVisible}
- })
-
- scatterData.forEach(d => {
- d.dx = d.x - c.width/2
- d.dy = d.y - c.height/2
- })
-
- var textCandidates = _.sortBy(scatterData, d => -d.dx*d.dx - d.dy*d.dy)
- .filter(d => d.isVisible)
- .slice(0, 5000)
- d3.nestBy(textCandidates, d => Math.round(12*Math.atan2(d.dx, d.dy)))
- .map(d => d[0])
- .forEach(d => d.show = (d.dy < 0 ? 'u' : 'l') + (d.dx < 0 ? 'l' : 'r'))
-
- scatter.draw(c, scatterData, false)
-
- c.svg.selectAppend('text.rotate-only.x-axis-label')
- .translate([c.width/2, c.height + 24])
- .text('__ likelihood, both sentences →')
- .at({textAnchor: 'middle'})
- .st({fill: '#000'})
-
- c.svg.selectAll('g.rotate-only.sent-1,g.rotate-only.sent-1').remove()
- c.svg.selectAppend('g.rotate-only.sent-1')
- .translate([c.width + 20, c.height/2])
- .append('text')
- .text(`Higher likelihood, ${pair.label1 ? pair.label1 + ' sentence ' : 'sentence one'} →`)
- .at({textAnchor: 'start', transform: 'rotate(-90)', x: 20})
- .st({fill: util.colors[1]})
-
- c.svg.selectAppend('g.rotate-only.sent-1')
- .translate([c.width + 20, c.height/2 + 0])
- .append('text')
- .text(`← Higher likelihood, ${pair.label0 ? pair.label0 + ' sentence ' : 'sentence two'}`)
- .at({textAnchor: 'end', transform: 'rotate(-90)', x: -20})
- .st({fill: util.colors[0]})
- }
- }
-
- function updateSentenceLabels(){
- var t0 = tokenizer.tokenize(pair.s0)
- var t1 = tokenizer.tokenize(pair.s1)
-
- var i = 0
- while (t0[i] == t1[i] && i < t0.length) i++
-
- var j = 1
- while (t0[t0.length - j] == t1[t1.length - j] && j < t0.length) j++
-
- pair.label0 = tokens2origStr(t0, pair.s0)
- pair.label1 = tokens2origStr(t1, pair.s1)
-
- function tokens2origStr(t, s){
- var tokenStr = tokenizer.decode(t.slice(i, -j + 1)).trim()
- var lowerStr = s.toLowerCase()
-
- var startI = lowerStr.indexOf(tokenStr)
- return s.slice(startI, startI + tokenStr.length)
- }
-
- if (
- !pair.label0.length ||
- !pair.label1.length ||
- pair.label0.length > 15 ||
- pair.label1.length > 15){
- pair.label0 = ''
- pair.label1 = ''
- }
-
- // console.log(i, j, pair.label0, pair.label1)
- }
-}
-
-if (window.init) init()
diff --git a/spaces/merve/dataset-worldviews/public/data-leak/index.html b/spaces/merve/dataset-worldviews/public/data-leak/index.html
deleted file mode 100644
index 48382c629935410818fbefd120b3f743019c4f40..0000000000000000000000000000000000000000
--- a/spaces/merve/dataset-worldviews/public/data-leak/index.html
+++ /dev/null
@@ -1,170 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Why Some Models Leak Data
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Machine learning models use large amounts of data, some of which can be sensitive. If they're not trained correctly, sometimes that data is inadvertently revealed.
-
-
-
-
Let’s take a look at a game of soccer.
-
-
-
-
-
-
Using the position of each player as training data, we can teach a model to predict which team would get to a loose ball first at each spot on the field, indicated by the color of the pixel.
-
-
-
It updates in real-time—drag the players around to see the model change.
-
-
This model reveals quite a lot about the data used to train it. Even without the actual positions of the players, it is simple to see where players might be.
-
-
-
Click this button to move the players
-
Take a guess at where the yellow team’s goalie is now, then check their actual position. How close were you?
-
Sensitive Salary Data
-
-
In this specific soccer example, being able to make educated guesses about the data a model was trained on doesn’t matter too much. But what if our data points represent something more sensitive?
-
-
-
We’ve fed the same numbers into the model, but now they represent salary data instead of soccer data. Building models like this is a common technique to detect discrimination. A union might test if a company is paying men and women fairly by building a salary model that takes into account years of experience. They can then publish the results to bring pressure for change or show improvement.
-
In this hypothetical salary study, even though no individual salaries have been published, it is easy to infer the salary of the newest male hire. And carefully cross referencing public start dates on LinkedIn with the model could almost perfectly reveal everyone’s salary.
-
Because the model here is so flexible (there are hundreds of square patches with independently calculated predictions) and we have so few data points (just 22 people), it is able to “memorize” individual data points. If we’re looking to share information about patterns in salaries, a simpler and more constrained model like a linear regression might be more appropriate.
-
-
-
By boiling down the 22 data points to two lines we’re able to see broad trends without being able to guess anyone’s salary.
-
Subtle Leaks
-
-
Removing complexity isn’t a complete solution though. Depending on how the data is distributed, even a simple line can inadvertently reveal information.
-
-
-
In this company, almost all the men started several years ago, so the slope of the line is especially sensitive to the salary of the new hire.
-
Is their salary higher or lower than average? Based on the line, we can make a pretty good guess.
-
Notice that changing the salary of someone with a more common tenure barely moves the line. In general, more typical data points are less susceptible to being leaked. This sets up a tricky trade off: we want models to learn about edge cases while being sure they haven’t memorized individual data points.
-
Real World Data
-
-
Models of real world data are often quite complex—this can improve accuracy, but makes them more susceptible to unexpectedly leaking information. Medical models have inadvertently revealed patients’ genetic markers. Language models have memorized credit card numbers. Faces can even be reconstructed from image models:
-
-
-
Fredrikson et al were able to extract the image on the left by repeatedly querying a facial recognition API. It isn’t an exact match with the individual’s actual face (on the right), but this attack only required access to the model’s predictions, not its internal state.
-
Protecting Private Data
-
-
Training models with differential privacy stops the training data from leaking by limiting how much the model can learn from any one data point. Differentially private models are still at the cutting edge of research, but they’re being packaged into machine learning frameworks, making them much easier to use. When it isn’t possible to train differentially private models, there are also tools that can measure how much data is the model memorizing. Also, standard techniques such as aggregation and limiting how much data a single source can contribute are still useful and usually improve the privacy of the model.
-
As we saw in the Collecting Sensitive Information Explorable, adding enough random noise with differential privacy to protect outliers like the new hire can increase the amount of data required to reach a good level of accuracy. Depending on the application, the constraints of differential privacy could even improve the model—for instance, not learning too much from one data point can help prevent overfitting.
-
Given the increasing utility of machine learning models for many real-world tasks, it’s clear that more and more systems, devices and apps will be powered, to some extent, by machine learning in the future. While standard privacy best practices developed for non-machine learning systems still apply to those with machine learning, the introduction of machine learning introduces new challenges, including the ability of the model to memorize some specific training data points and thus be vulnerable to privacy attacks that seek to extract this data from the model. Fortunately, techniques such as differential privacy exist that can be helpful in overcoming this specific challenge. Just as with other areas of Responsible AI, it’s important to be aware of these new challenges that come along with machine learning and what steps can be taken to mitigate them.
-
Credits
-
-
Adam Pearce and Ellen Jiang // December 2020
-
Thanks to Andreas Terzis, Ben Wedin, Carey Radebaugh, David Weinberger, Emily Reif, Fernanda Viégas, Hal Abelson, Kristen Olson, Martin Wattenberg, Michael Terry, Miguel Guevara, Thomas Steinke, Yannick Assogba, Zan Armstrong and our other colleagues at Google for their help with this piece.
Models trained on real-world data can encode real-world bias. Hiding information about protected classes doesn't always fix things — sometimes it can even hurt.
-
-
-
-
-
-
-
-
-
Modeling College GPA
-
-
Let's pretend we're college admissions officers trying to predict the GPA students will have in college (in these examples we'll use simulated data).
-
-
One simple approach: predict that students will have the same GPA in college as they did in high school.
-
-
-
-
-
This is at best a very rough approximation, and it misses a key feature of this data set: students usually have better grades in high school than in college
-
-
We're over-predicting college grades more often than we under-predict.
-
-
-
-
-
Predicting with ML
-
If we switched to using a machine learning model and entered these student grades, it would recognize this pattern and adjust the prediction.
-
-
The model does this without knowing anything about the real-life context of grading in high school versus college.
-
-
-
-
-
Giving the model more information about students increases accuracy more...
-
-
-
-
-
...and more.
-
-
-
-
-
Models can encode previous bias
-
All of this sensitive information about students is just a long list of numbers to model.
-
-
If a sexist college culture has historically led to lower grades for female students, the model will pick up on that correlation and predict lower grades for women.
-
-
Training on historical data bakes in historical biases. Here the sexist culture has improved, but the model learned from the past correlation and still predicts higher grades for men.
-
-
-
-
Hiding protected classes from the model might not stop discrimination
-
-
Even if we don't tell the model students' genders, it might still score female students poorly.
-
-
With detailed enough information about every student, the model can still synthesize a proxy for gender out of other variables.
-
-
-
-
-
Including a protected attribute may even decrease discrimination
-
-
Let's look at a simplified model, one only taking into account the recommendation of an alumni interviewer.
-
-
-
-
-
The interviewer is quite accurate, except that they're biased against students with a low household income.
-
-
In our toy model, students' grades don't depend on their income once they're in college. In other words, we have biased inputs and unbiased outcomes—the opposite of the previous example, where the inputs weren't biased, but the toxic culture biased the outcomes.
-
-
-
-
-
If we also tell the model each student's household income, it will naturally correct for the interviewer's overrating of high-income students just like it corrected for the difference between high school and college GPAs.
-
-
By carefully considering and accounting for bias, we've made the model fairer and more accurate. This isn't always easy to do, especially in circumstances like the historically toxic college culture where unbiased data is limited.
-
-
And there are fundamental fairness trade-offs that have to be made. Check out the Measuring Fairness explorable to see how those tradeoffs work.
-
-
-
-
-
Adam Pearce // May 2020
-
-
Thanks to Carey Radebaugh, Dan Nanas, David Weinberger, Emily Denton, Emily Reif, Fernanda Viégas, Hal Abelson, James Wexler, Kristen Olson, Lucas Dixon, Mahima Pushkarna, Martin Wattenberg, Michael Terry, Rebecca Salois, Timnit Gebru, Tulsee Doshi, Yannick Assogba, Yoni Halpern, Zan Armstrong, and my other colleagues at Google for their help with this piece.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/merve/fill-in-the-blank/source/style.css b/spaces/merve/fill-in-the-blank/source/style.css
deleted file mode 100644
index ad619bacc7b5b7f61788de06850a80ccc7561b83..0000000000000000000000000000000000000000
--- a/spaces/merve/fill-in-the-blank/source/style.css
+++ /dev/null
@@ -1,434 +0,0 @@
-/* Copyright 2020 Google LLC. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-
-
-
-html{
- background-color: #fff;
- font-weight: normal;
-}
-
-
-body{
- max-width: 850px;
- margin: 0px auto;
- font-family: 'Roboto Slab', serif;
- font-family: 'Roboto', Helvetica, sans-serif;
- font-weight: 300;
- line-height: 1.55em;
- font-size: 16px;
- margin-top: 5px;
- margin-bottom: 80px;
- color: #3C4043;
- font-smoothing: antialiased;
-}
-
-@media (max-width: 760px){
- body{
- padding: 5px;
- }
-}
-
-p{
- line-height: 1.55em;
- font-size: 16px;
- /*line-height: 28px;*/
- color: #3C4043;
- letter-spacing: 0.1px;
-}
-
-a{
- color: black;
-}
-
-.header{
- position: relative;
- color: black;
- font-size: 16px;
- height: 24px;
- overflow: visible;
- font-family: 'Google Sans', sans-serif;
- font-weight: 100;
- font-size: 20px;
- margin: 0px auto;
- margin-top: 15px;
- padding-left: 20px;
-}
-.header-left{
- vertical-align: middle;
- font-size: 20px;
- margin: 0px auto;
- width: 300px;
-}
-.header-left img{
- width: 100px;
- opacity: 1;
- top: 0px;
- position: relative;
-}
-.header-left a:first-child{
- float: left;
-}
-.header-left a:last-child{
- position: relative;
- top: 8px;
- margin-left: 20px;
- float: left;
-}
-.header-left a{
- line-height: 20px;
- -webkit-font-smoothing: antialiased;
- letter-spacing: 0.1px;
- font-size: 20px;
- text-transform: uppercase;
- font-family: "Google Sans";
- text-align: right;
- -webkit-tap-highlight-color: rgba(255,255,255,0);
- font-weight: 300;
- text-decoration: none;
- /*margin: 50px 0 0 50px;*/
- display: inline-block;
- color: #00695C !important;
-}
-.header-left a:hover{
- color: #ff4081 !important;
-}
-
-@media (max-width: 750px){
- .header-right span{
- opacity: 0;
- }
-}
-.header a{
- /*opacity: .5;*/
- text-decoration: none;
-}
-.header a:hover{
- opacity: 1
-}
-
-
-p{
- max-width: 750px;
- margin: 0px auto;
- margin-block-start: 1em;
- margin-block-end: 1em;
-}
-
-/*TODO mobile padding?*/
-
-h3{
- max-width: 750px;
- margin: 0px auto;
- font-weight: 100;
- line-height: 1.3em;
-}
-
-h1,h2,h3,h4,h5{
- font-family: 'Google Sans', sans-serif;
- font-weight: 100;
- margin-top: 1.5em;
- margin-bottom: .5em;
-}
-h1{
- font-weight: 100;
- font-size: 34px;
- margin-bottom: .5em;
- line-height: 1.3em;
- margin-top: 1.4em;
- text-align: center;
- font-family: "Google Sans";
- /*color: #00695C;*/
-}
-h2,h3,h4,h5{
- font-size: 22px;
-}
-
-/*wp classes*/
-img.aligncenter {
- display: block;
- margin: auto;
- max-width: 750px;
-}
-
-
-
-html{
- overflow-x: hidden;
-}
-
-.full-width{
- width: 100vw;
- position: relative;
- left: 50%;
- right: 50%;
- margin-left: -50vw;
- margin-right: -50vw;
- overflow: hidden;
-}
-
-.full-width img{
- max-width: 100%;
- display: block;
- margin: 0 auto;
-}
-
-.full-width.px980 img, .full-width.px980 div{
- max-width: 980px;
-}
-.full-width > div, .full-width > div > div{
- margin: 0px auto;
-}
-.full-width.px750 img, .full-width.px750 div{
- max-width: 750px;
-}
-
-draft{
- display: none;
- /*visibility: collapse;*/
-}
-
-
-h1, .post-summary{
- max-width: 750px;
- margin: 0px auto;
-}
-.post-summary{
- font-size: 19px;
- margin-bottom: 65px;
- line-height: 1.5em;
-}
-
-h1{
- margin-bottom: 40px;
- margin-top: 50px;
-}
-
-.post-tags{
- line-height: 1.55em;
- font-style: italic;
-}
-
-.thumbnail-caption{
- font-style: italic;
-}
-
-
-
-
-
-
-/*graph scroll stuff*/
-
-#container{
- position: relative;
- width: 900px;
- margin-left: -25px;
-}
-
-#container h3{
- line-height: 1.3em;
-}
-
-
-
-
-
-
-.tooltip {
- top: -1000px;
- position: fixed;
- padding: 10px;
- background: rgba(255, 255, 255, .90);
- border: 1px solid lightgray;
- pointer-events: none;
- width: 300px;
-}
-.tooltip-hidden{
- opacity: 0;
- transition: all .3s;
- transition-delay: .1s;
-}
-
-@media (max-width: 590px){
- div.tooltip{
- bottom: -1px;
- width: calc(100%);
- left: -1px !important;
- right: -1px !important;
- top: auto !important;
- width: auto !important;
- }
-}
-
-
-
-
-.footend{
- margin-left: -9px;
- width: 10px;
-}
-
-
-.footstart, .footend{
- text-decoration: none;
-}
-
-.footstart:hover, .footend:hover{
- text-decoration: underline;
-}
-
-
-
-
-#recirc{
-}
-
-#recirc .img{
- outline: 1px solid #ccc;
-}
-
-#recirc .post:hover .img{
- outline: 1px solid #333;
-}
-
-#recirc .title{
- /*color: #00695C;*/
- font-size: 18px;
- font-weight: 500;
- margin-bottom: -10px;
- /*height: 10px !important;*/
- /*opacity: 0;*/
-}
-
-#recirc .post:hover .title{
- text-decoration: underline !important;
-}
-
-#recirc .post{
- margin-bottom: 30px;
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-/*Nav Style*/
-#nav-container{
- width: 100vw;
- margin-left: calc(50% - 50vw);
- display: inline-block;
- /*display: none;*/
-}
-#navigation {
- margin: 0 auto;
- max-width: 1260px;
- -webkit-font-smoothing: antialiased;
- font-family: 'Open Sans', Helvetica, sans-serif;
- font-weight: 300;
- letter-spacing: 0.1px;
-
-
- color: rgba(0,0,0,.87);
- font-size: 14px;
- line-height: 20px;
- -webkit-font-smoothing: antialiased;
- font-family: 'Open Sans', Helvetica, sans-serif;
- font-weight: 300;
- letter-spacing: 0.1px;
- display: flex;
- flex-flow: row wrap;
- align-items: stretch;
- padding: 8px;
- margin: 0 auto;
- max-width: 1260px;
-}
-.mdl-grid {
- display: -webkit-flex;
- display: -ms-flexbox;
- display: flex;
- -webkit-flex-flow: row wrap;
- -ms-flex-flow: row wrap;
- flex-flow: row wrap;
- margin: 0 auto;
- -webkit-align-items: stretch;
- -ms-flex-align: stretch;
- align-items: stretch;
-}
-
-.mdl-cell {
- box-sizing: border-box;
-}
-
-.nav-links {
- font-size: 20px;
- text-transform: uppercase;
- font-family: "Google Sans";
- color: #4a4a4a;
- text-align: right;
-}
-
-.nav-logo-small {
- width: 110px;
- margin: 42px 0 0 0;
-}
-.nav-links .selected {
- color: #00695C !important;
-}
-/*.nav-links a:visited {
- color: #4a4a4a;
-}
-a:visited {
- color: #7B1FA2;
-}
-*/
-.nav-links a {
- color: inherit;
- text-decoration: none;
- margin: 50px 0 0 50px;
- display: inline-block;
-}
-
-
-@media screen and (max-width: 1035px){
- .nav-links {
- font-size: 16px;
- }
-}
-
-.nav-links{
- line-height: 20px;
- -webkit-font-smoothing: antialiased;
- font-weight: 300;
- letter-spacing: 0.1px;
- box-sizing: border-box;
- margin: 8px;
- width: calc(66.6666666667% - 16px);
- font-size: 20px;
- text-transform: uppercase;
- font-family: "Google Sans";
- color: #4a4a4a;
- text-align: right;
-}
-
diff --git a/spaces/mfkeles/Track-Anything/app_test.py b/spaces/mfkeles/Track-Anything/app_test.py
deleted file mode 100644
index cd10fe77cec552dffba84c6516ec33a6622b6c38..0000000000000000000000000000000000000000
--- a/spaces/mfkeles/Track-Anything/app_test.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# import gradio as gr
-
-# def update_iframe(slider_value):
-# return f'''
-#
-#
-# '''
-
-# iface = gr.Interface(
-# fn=update_iframe,
-# inputs=gr.inputs.Slider(minimum=0, maximum=100, step=1, default=50),
-# outputs=gr.outputs.HTML(),
-# allow_flagging=False,
-# )
-
-# iface.launch(server_name='0.0.0.0', server_port=12212)
-
-import gradio as gr
-
-
-def change_mask(drop):
- return gr.update(choices=["hello", "kitty"])
-
-with gr.Blocks() as iface:
- drop = gr.Dropdown(
- choices=["cat", "dog", "bird"], label="Animal", info="Will add more animals later!"
- )
- radio = gr.Radio(["park", "zoo", "road"], label="Location", info="Where did they go?")
- multi_drop = gr.Dropdown(
- ["ran", "swam", "ate", "slept"], value=["swam", "slept"], multiselect=True, label="Activity", info="Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl."
- )
-
- multi_drop.change(
- fn=change_mask,
- inputs = multi_drop,
- outputs=multi_drop
- )
-
-iface.launch(server_name='0.0.0.0', server_port=1223)
\ No newline at end of file
diff --git a/spaces/mfoud2023/Alhareq/Dockerfile b/spaces/mfoud2023/Alhareq/Dockerfile
deleted file mode 100644
index 008854d1849465cd12a48eec2341225dedd854d1..0000000000000000000000000000000000000000
--- a/spaces/mfoud2023/Alhareq/Dockerfile
+++ /dev/null
@@ -1,26 +0,0 @@
-# Stage 1: Clone the repository and set up the environment
-FROM alpine:latest AS build
-
-# Install Git
-RUN apk add --no-cache git
-
-# Get the code
-RUN git clone --depth 1 https://github.com/supabase/supabase
-
-# Go to the docker folder
-WORKDIR /supabase/docker
-
-# Copy the fake environment variables
-RUN cp .env.example .env
-
-# Stage 2: Build the final image for running Docker Compose
-FROM docker/compose:latest AS final
-
-# Copy the repository and environment files from the previous stage
-COPY --from=build /supabase /supabase
-
-# Set the working directory
-WORKDIR /supabase/docker
-
-# Start Docker Compose
-CMD ["docker-compose", "up"]
diff --git a/spaces/mfrashad/CharacterGAN/netdissect/segmodel/resnext.py b/spaces/mfrashad/CharacterGAN/netdissect/segmodel/resnext.py
deleted file mode 100644
index cdbb7461a6c8eb126717967cdca5d5ce392aecea..0000000000000000000000000000000000000000
--- a/spaces/mfrashad/CharacterGAN/netdissect/segmodel/resnext.py
+++ /dev/null
@@ -1,182 +0,0 @@
-import os
-import sys
-import torch
-import torch.nn as nn
-import math
-try:
- from lib.nn import SynchronizedBatchNorm2d
-except ImportError:
- from torch.nn import BatchNorm2d as SynchronizedBatchNorm2d
-try:
- from urllib import urlretrieve
-except ImportError:
- from urllib.request import urlretrieve
-
-
-__all__ = ['ResNeXt', 'resnext101'] # support resnext 101
-
-
-model_urls = {
- #'resnext50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnext50-imagenet.pth',
- 'resnext101': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnext101-imagenet.pth'
-}
-
-
-def conv3x3(in_planes, out_planes, stride=1):
- "3x3 convolution with padding"
- return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
- padding=1, bias=False)
-
-
-class GroupBottleneck(nn.Module):
- expansion = 2
-
- def __init__(self, inplanes, planes, stride=1, groups=1, downsample=None):
- super(GroupBottleneck, self).__init__()
- self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
- self.bn1 = SynchronizedBatchNorm2d(planes)
- self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
- padding=1, groups=groups, bias=False)
- self.bn2 = SynchronizedBatchNorm2d(planes)
- self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1, bias=False)
- self.bn3 = SynchronizedBatchNorm2d(planes * 2)
- self.relu = nn.ReLU(inplace=True)
- self.downsample = downsample
- self.stride = stride
-
- def forward(self, x):
- residual = x
-
- out = self.conv1(x)
- out = self.bn1(out)
- out = self.relu(out)
-
- out = self.conv2(out)
- out = self.bn2(out)
- out = self.relu(out)
-
- out = self.conv3(out)
- out = self.bn3(out)
-
- if self.downsample is not None:
- residual = self.downsample(x)
-
- out += residual
- out = self.relu(out)
-
- return out
-
-
-class ResNeXt(nn.Module):
-
- def __init__(self, block, layers, groups=32, num_classes=1000):
- self.inplanes = 128
- super(ResNeXt, self).__init__()
- self.conv1 = conv3x3(3, 64, stride=2)
- self.bn1 = SynchronizedBatchNorm2d(64)
- self.relu1 = nn.ReLU(inplace=True)
- self.conv2 = conv3x3(64, 64)
- self.bn2 = SynchronizedBatchNorm2d(64)
- self.relu2 = nn.ReLU(inplace=True)
- self.conv3 = conv3x3(64, 128)
- self.bn3 = SynchronizedBatchNorm2d(128)
- self.relu3 = nn.ReLU(inplace=True)
- self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
-
- self.layer1 = self._make_layer(block, 128, layers[0], groups=groups)
- self.layer2 = self._make_layer(block, 256, layers[1], stride=2, groups=groups)
- self.layer3 = self._make_layer(block, 512, layers[2], stride=2, groups=groups)
- self.layer4 = self._make_layer(block, 1024, layers[3], stride=2, groups=groups)
- self.avgpool = nn.AvgPool2d(7, stride=1)
- self.fc = nn.Linear(1024 * block.expansion, num_classes)
-
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels // m.groups
- m.weight.data.normal_(0, math.sqrt(2. / n))
- elif isinstance(m, SynchronizedBatchNorm2d):
- m.weight.data.fill_(1)
- m.bias.data.zero_()
-
- def _make_layer(self, block, planes, blocks, stride=1, groups=1):
- downsample = None
- if stride != 1 or self.inplanes != planes * block.expansion:
- downsample = nn.Sequential(
- nn.Conv2d(self.inplanes, planes * block.expansion,
- kernel_size=1, stride=stride, bias=False),
- SynchronizedBatchNorm2d(planes * block.expansion),
- )
-
- layers = []
- layers.append(block(self.inplanes, planes, stride, groups, downsample))
- self.inplanes = planes * block.expansion
- for i in range(1, blocks):
- layers.append(block(self.inplanes, planes, groups=groups))
-
- return nn.Sequential(*layers)
-
- def forward(self, x):
- x = self.relu1(self.bn1(self.conv1(x)))
- x = self.relu2(self.bn2(self.conv2(x)))
- x = self.relu3(self.bn3(self.conv3(x)))
- x = self.maxpool(x)
-
- x = self.layer1(x)
- x = self.layer2(x)
- x = self.layer3(x)
- x = self.layer4(x)
-
- x = self.avgpool(x)
- x = x.view(x.size(0), -1)
- x = self.fc(x)
-
- return x
-
-
-'''
-def resnext50(pretrained=False, **kwargs):
- """Constructs a ResNet-50 model.
-
- Args:
- pretrained (bool): If True, returns a model pre-trained on Places
- """
- model = ResNeXt(GroupBottleneck, [3, 4, 6, 3], **kwargs)
- if pretrained:
- model.load_state_dict(load_url(model_urls['resnext50']), strict=False)
- return model
-'''
-
-
-def resnext101(pretrained=False, **kwargs):
- """Constructs a ResNet-101 model.
-
- Args:
- pretrained (bool): If True, returns a model pre-trained on Places
- """
- model = ResNeXt(GroupBottleneck, [3, 4, 23, 3], **kwargs)
- if pretrained:
- model.load_state_dict(load_url(model_urls['resnext101']), strict=False)
- return model
-
-
-# def resnext152(pretrained=False, **kwargs):
-# """Constructs a ResNeXt-152 model.
-#
-# Args:
-# pretrained (bool): If True, returns a model pre-trained on Places
-# """
-# model = ResNeXt(GroupBottleneck, [3, 8, 36, 3], **kwargs)
-# if pretrained:
-# model.load_state_dict(load_url(model_urls['resnext152']))
-# return model
-
-
-def load_url(url, model_dir='./pretrained', map_location=None):
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- filename = url.split('/')[-1]
- cached_file = os.path.join(model_dir, filename)
- if not os.path.exists(cached_file):
- sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
- urlretrieve(url, cached_file)
- return torch.load(cached_file, map_location=map_location)
diff --git a/spaces/mithril-security/blind_chat/postcss.config.js b/spaces/mithril-security/blind_chat/postcss.config.js
deleted file mode 100644
index 7b75c83aff1c05e0e0e315638e07a22314603d4d..0000000000000000000000000000000000000000
--- a/spaces/mithril-security/blind_chat/postcss.config.js
+++ /dev/null
@@ -1,6 +0,0 @@
-export default {
- plugins: {
- tailwindcss: {},
- autoprefixer: {},
- },
-};
diff --git a/spaces/mithril-security/blind_chat/src/lib/types/Settings.ts b/spaces/mithril-security/blind_chat/src/lib/types/Settings.ts
deleted file mode 100644
index b14b45e07ae9356f98a87efe6fe11a603eea0774..0000000000000000000000000000000000000000
--- a/spaces/mithril-security/blind_chat/src/lib/types/Settings.ts
+++ /dev/null
@@ -1,26 +0,0 @@
-import { defaultModel } from "$lib/server/models";
-import type { Timestamps } from "./Timestamps";
-import type { User } from "./User";
-
-export interface Settings extends Timestamps {
- userId?: User["_id"];
- sessionId?: string;
-
- /**
- * Note: Only conversations with this settings explicitly set to true should be shared.
- *
- * This setting is explicitly set to true when users accept the ethics modal.
- * */
- shareConversationsWithModelAuthors: boolean;
- ethicsModalAcceptedAt: Date | null;
- activeModel: string;
-
- // model name and system prompts
- customPrompts?: Record;
-}
-
-// TODO: move this to a constant file along with other constants
-export const DEFAULT_SETTINGS = {
- shareConversationsWithModelAuthors: true,
- activeModel: defaultModel.id,
-};
diff --git a/spaces/mixcard/image-2-text-largecoco/app.py b/spaces/mixcard/image-2-text-largecoco/app.py
deleted file mode 100644
index 8074f2754b62740dff6eb3f52a13c1f974ca8358..0000000000000000000000000000000000000000
--- a/spaces/mixcard/image-2-text-largecoco/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/microsoft/git-large-coco").launch()
\ No newline at end of file
diff --git a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/utils/events.py b/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/utils/events.py
deleted file mode 100644
index cbe82ce80a7110a1018167763ba3adc90f58faa0..0000000000000000000000000000000000000000
--- a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/utils/events.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# Copyright (c) Meta Platforms, Inc. All Rights Reserved
-
-import os
-import wandb
-from detectron2.utils import comm
-from detectron2.utils.events import EventWriter, get_event_storage
-
-
-def setup_wandb(cfg, args):
- if comm.is_main_process():
- init_args = {
- k.lower(): v
- for k, v in cfg.WANDB.items()
- if isinstance(k, str) and k not in ["config", "name"]
- }
- # only include most related part to avoid too big table
- # TODO: add configurable params to select which part of `cfg` should be saved in config
- if "config_exclude_keys" in init_args:
- init_args["config"] = cfg
- init_args["config"]["cfg_file"] = args.config_file
- else:
- init_args["config"] = {
- "model": cfg.MODEL,
- "solver": cfg.SOLVER,
- "cfg_file": args.config_file,
- }
- if ("name" not in init_args) or (init_args["name"] is None):
- init_args["name"] = os.path.basename(args.config_file)
- wandb.init(**init_args)
-
-
-class BaseRule(object):
- def __call__(self, target):
- return target
-
-
-class IsIn(BaseRule):
- def __init__(self, keyword: str):
- self.keyword = keyword
-
- def __call__(self, target):
- return self.keyword in target
-
-
-class Prefix(BaseRule):
- def __init__(self, keyword: str):
- self.keyword = keyword
-
- def __call__(self, target):
- return "/".join([self.keyword, target])
-
-
-class WandbWriter(EventWriter):
- """
- Write all scalars to a tensorboard file.
- """
-
- def __init__(self):
- """
- Args:
- log_dir (str): the directory to save the output events
- kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`
- """
- self._last_write = -1
- self._group_rules = [
- (IsIn("/"), BaseRule()),
- (IsIn("loss"), Prefix("train")),
- ]
-
- def write(self):
-
- storage = get_event_storage()
-
- def _group_name(scalar_name):
- for (rule, op) in self._group_rules:
- if rule(scalar_name):
- return op(scalar_name)
- return scalar_name
-
- stats = {
- _group_name(name): scalars[0]
- for name, scalars in storage.latest().items()
- if scalars[1] > self._last_write
- }
- if len(stats) > 0:
- self._last_write = max([v[1] for k, v in storage.latest().items()])
-
- # storage.put_{image,histogram} is only meant to be used by
- # tensorboard writer. So we access its internal fields directly from here.
- if len(storage._vis_data) >= 1:
- stats["image"] = [
- wandb.Image(img, caption=img_name)
- for img_name, img, step_num in storage._vis_data
- ]
- # Storage stores all image data and rely on this writer to clear them.
- # As a result it assumes only one writer will use its image data.
- # An alternative design is to let storage store limited recent
- # data (e.g. only the most recent image) that all writers can access.
- # In that case a writer may not see all image data if its period is long.
- storage.clear_images()
-
- if len(storage._histograms) >= 1:
-
- def create_bar(tag, bucket_limits, bucket_counts, **kwargs):
- data = [
- [label, val] for (label, val) in zip(bucket_limits, bucket_counts)
- ]
- table = wandb.Table(data=data, columns=["label", "value"])
- return wandb.plot.bar(table, "label", "value", title=tag)
-
- stats["hist"] = [create_bar(**params) for params in storage._histograms]
-
- storage.clear_histograms()
-
- if len(stats) == 0:
- return
- wandb.log(stats, step=storage.iter)
-
- def close(self):
- wandb.finish()
diff --git a/spaces/mshukor/UnIVAL/fairseq/tests/test_token_block_dataset.py b/spaces/mshukor/UnIVAL/fairseq/tests/test_token_block_dataset.py
deleted file mode 100644
index c4d7b76dcd55fe7869dbb1fa188f7b36fb639bda..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/tests/test_token_block_dataset.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import unittest
-
-import tests.utils as test_utils
-import torch
-from fairseq.data import TokenBlockDataset
-
-
-class TestTokenBlockDataset(unittest.TestCase):
- def _build_dataset(self, data, **kwargs):
- sizes = [len(x) for x in data]
- underlying_ds = test_utils.TestDataset(data)
- return TokenBlockDataset(underlying_ds, sizes, **kwargs)
-
- def test_eos_break_mode(self):
- data = [
- torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
- torch.tensor([1], dtype=torch.long),
- torch.tensor([8, 7, 6, 1], dtype=torch.long),
- ]
- ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos")
- self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
- self.assertEqual(ds[1].tolist(), [1])
- self.assertEqual(ds[2].tolist(), [8, 7, 6, 1])
-
- data = [
- torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
- torch.tensor([8, 7, 6, 1], dtype=torch.long),
- torch.tensor([1], dtype=torch.long),
- ]
- ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos")
- self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
- self.assertEqual(ds[1].tolist(), [8, 7, 6, 1])
- self.assertEqual(ds[2].tolist(), [1])
-
- def test_block_break_mode(self):
- data = [
- torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
- torch.tensor([8, 7, 6, 1], dtype=torch.long),
- torch.tensor([9, 1], dtype=torch.long),
- ]
- ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode="none")
- self.assertEqual(ds[0].tolist(), [5, 4, 3])
- self.assertEqual(ds[1].tolist(), [2, 1, 8])
- self.assertEqual(ds[2].tolist(), [7, 6, 1])
- self.assertEqual(ds[3].tolist(), [9, 1])
-
- def test_complete_break_mode(self):
- data = [
- torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
- torch.tensor([8, 7, 6, 1], dtype=torch.long),
- torch.tensor([9, 1], dtype=torch.long),
- ]
- ds = self._build_dataset(
- data, block_size=6, pad=0, eos=1, break_mode="complete"
- )
- self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
- self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1])
-
- data = [
- torch.tensor([4, 3, 2, 1], dtype=torch.long),
- torch.tensor([5, 1], dtype=torch.long),
- torch.tensor([1], dtype=torch.long),
- torch.tensor([6, 1], dtype=torch.long),
- ]
- ds = self._build_dataset(
- data, block_size=3, pad=0, eos=1, break_mode="complete"
- )
- self.assertEqual(ds[0].tolist(), [4, 3, 2, 1])
- self.assertEqual(ds[1].tolist(), [5, 1, 1])
- self.assertEqual(ds[2].tolist(), [6, 1])
-
- def test_4billion_tokens(self):
- """Regression test for numpy type promotion issue https://github.com/numpy/numpy/issues/5745"""
- data = [torch.tensor(list(range(10000)), dtype=torch.long)] * 430000
- ds = self._build_dataset(
- data, block_size=6, pad=0, eos=1, break_mode="complete"
- )
- ds[-1] # __getitem__ works
- start, end = ds.slice_indices[-1]
- assert end > 4294967295 # data must be sufficiently large to overflow uint32
- assert not isinstance(
- end + 1, float
- ) # this would also raise, since np.uint64(1) + 1 => 2.0
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/train.py b/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/train.py
deleted file mode 100644
index 60be962d447f8a45ed57aec398369b3331e7165a..0000000000000000000000000000000000000000
--- a/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/train.py
+++ /dev/null
@@ -1,643 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-"""
-Train a YOLOv5 model on a custom dataset.
-
-Models and datasets download automatically from the latest YOLOv5 release.
-Models: https://github.com/ultralytics/yolov5/tree/master/models
-Datasets: https://github.com/ultralytics/yolov5/tree/master/data
-Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data
-
-Usage:
- $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (RECOMMENDED)
- $ python path/to/train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch
-"""
-
-import argparse
-import math
-import os
-import random
-import sys
-import time
-from copy import deepcopy
-from datetime import datetime
-from pathlib import Path
-
-import numpy as np
-import torch
-import torch.distributed as dist
-import torch.nn as nn
-import yaml
-from torch.cuda import amp
-from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.optim import SGD, Adam, AdamW, lr_scheduler
-from tqdm import tqdm
-
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[0] # YOLOv5 root directory
-if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
-ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
-
-import val # for end-of-epoch mAP
-from models.experimental import attempt_load
-from models.yolo import Model
-from utils.autoanchor import check_anchors
-from utils.autobatch import check_train_batch_size
-from utils.callbacks import Callbacks
-from utils.datasets import create_dataloader
-from utils.downloads import attempt_download
-from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements,
- check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds,
- intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle,
- print_args, print_mutation, strip_optimizer)
-from utils.loggers import Loggers
-from utils.loggers.wandb.wandb_utils import check_wandb_resume
-from utils.loss import ComputeLoss
-from utils.metrics import fitness
-from utils.plots import plot_evolve, plot_labels
-from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first
-
-LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
-RANK = int(os.getenv('RANK', -1))
-WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
-
-
-def train(hyp, # path/to/hyp.yaml or hyp dictionary
- opt,
- device,
- callbacks
- ):
- save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \
- Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
- opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
-
- # Directories
- w = save_dir / 'weights' # weights dir
- (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
- last, best = w / 'last.pt', w / 'best.pt'
-
- # Hyperparameters
- if isinstance(hyp, str):
- with open(hyp, errors='ignore') as f:
- hyp = yaml.safe_load(f) # load hyps dict
- LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
-
- # Save run settings
- if not evolve:
- with open(save_dir / 'hyp.yaml', 'w') as f:
- yaml.safe_dump(hyp, f, sort_keys=False)
- with open(save_dir / 'opt.yaml', 'w') as f:
- yaml.safe_dump(vars(opt), f, sort_keys=False)
-
- # Loggers
- data_dict = None
- if RANK in [-1, 0]:
- loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
- if loggers.wandb:
- data_dict = loggers.wandb.data_dict
- if resume:
- weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
-
- # Register actions
- for k in methods(loggers):
- callbacks.register_action(k, callback=getattr(loggers, k))
-
- # Config
- plots = not evolve # create plots
- cuda = device.type != 'cpu'
- init_seeds(1 + RANK)
- with torch_distributed_zero_first(LOCAL_RANK):
- data_dict = data_dict or check_dataset(data) # check if None
- train_path, val_path = data_dict['train'], data_dict['val']
- nc = 1 if single_cls else int(data_dict['nc']) # number of classes
- names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
- assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check
- is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
-
- # Model
- check_suffix(weights, '.pt') # check weights
- pretrained = weights.endswith('.pt')
- if pretrained:
- with torch_distributed_zero_first(LOCAL_RANK):
- weights = attempt_download(weights) # download if not found locally
- ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
- model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
- exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
- csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
- csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
- model.load_state_dict(csd, strict=False) # load
- LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
- else:
- model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
-
- # Freeze
- freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
- for k, v in model.named_parameters():
- v.requires_grad = True # train all layers
- if any(x in k for x in freeze):
- LOGGER.info(f'freezing {k}')
- v.requires_grad = False
-
- # Image size
- gs = max(int(model.stride.max()), 32) # grid size (max stride)
- imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
-
- # Batch size
- if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
- batch_size = check_train_batch_size(model, imgsz)
- loggers.on_params_update({"batch_size": batch_size})
-
- # Optimizer
- nbs = 64 # nominal batch size
- accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
- hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
- LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}")
-
- g0, g1, g2 = [], [], [] # optimizer parameter groups
- for v in model.modules():
- if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias
- g2.append(v.bias)
- if isinstance(v, nn.BatchNorm2d): # weight (no decay)
- g0.append(v.weight)
- elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay)
- g1.append(v.weight)
-
- if opt.optimizer == 'Adam':
- optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
- elif opt.optimizer == 'AdamW':
- optimizer = AdamW(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
- else:
- optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
-
- optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay
- optimizer.add_param_group({'params': g2}) # add g2 (biases)
- LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups "
- f"{len(g0)} weight (no decay), {len(g1)} weight, {len(g2)} bias")
- del g0, g1, g2
-
- # Scheduler
- if opt.cos_lr:
- lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
- else:
- lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
- scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
-
- # EMA
- ema = ModelEMA(model) if RANK in [-1, 0] else None
-
- # Resume
- start_epoch, best_fitness = 0, 0.0
- if pretrained:
- # Optimizer
- if ckpt['optimizer'] is not None:
- optimizer.load_state_dict(ckpt['optimizer'])
- best_fitness = ckpt['best_fitness']
-
- # EMA
- if ema and ckpt.get('ema'):
- ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
- ema.updates = ckpt['updates']
-
- # Epochs
- start_epoch = ckpt['epoch'] + 1
- if resume:
- assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.'
- if epochs < start_epoch:
- LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.")
- epochs += ckpt['epoch'] # finetune additional epochs
-
- del ckpt, csd
-
- # DP mode
- if cuda and RANK == -1 and torch.cuda.device_count() > 1:
- LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
- 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
- model = torch.nn.DataParallel(model)
-
- # SyncBatchNorm
- if opt.sync_bn and cuda and RANK != -1:
- model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
- LOGGER.info('Using SyncBatchNorm()')
-
- # Trainloader
- train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls,
- hyp=hyp, augment=True, cache=None if opt.cache == 'val' else opt.cache,
- rect=opt.rect, rank=LOCAL_RANK, workers=workers,
- image_weights=opt.image_weights, quad=opt.quad,
- prefix=colorstr('train: '), shuffle=True)
- mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class
- nb = len(train_loader) # number of batches
- assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
-
- # Process 0
- if RANK in [-1, 0]:
- val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls,
- hyp=hyp, cache=None if noval else opt.cache,
- rect=True, rank=-1, workers=workers * 2, pad=0.5,
- prefix=colorstr('val: '))[0]
-
- if not resume:
- labels = np.concatenate(dataset.labels, 0)
- # c = torch.tensor(labels[:, 0]) # classes
- # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
- # model._initialize_biases(cf.to(device))
- if plots:
- plot_labels(labels, names, save_dir)
-
- # Anchors
- if not opt.noautoanchor:
- check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
- model.half().float() # pre-reduce anchor precision
-
- callbacks.run('on_pretrain_routine_end')
-
- # DDP mode
- if cuda and RANK != -1:
- model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)
-
- # Model attributes
- nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
- hyp['box'] *= 3 / nl # scale to layers
- hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
- hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
- hyp['label_smoothing'] = opt.label_smoothing
- model.nc = nc # attach number of classes to model
- model.hyp = hyp # attach hyperparameters to model
- model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
- model.names = names
-
- # Start training
- t0 = time.time()
- nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
- # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
- last_opt_step = -1
- maps = np.zeros(nc) # mAP per class
- results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
- scheduler.last_epoch = start_epoch - 1 # do not move
- scaler = amp.GradScaler(enabled=cuda)
- stopper = EarlyStopping(patience=opt.patience)
- compute_loss = ComputeLoss(model) # init loss class
- LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
- f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
- f"Logging results to {colorstr('bold', save_dir)}\n"
- f'Starting training for {epochs} epochs...')
- for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
- model.train()
-
- # Update image weights (optional, single-GPU only)
- if opt.image_weights:
- cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
- iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
- dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
-
- # Update mosaic border (optional)
- # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
- # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
-
- mloss = torch.zeros(3, device=device) # mean losses
- if RANK != -1:
- train_loader.sampler.set_epoch(epoch)
- pbar = enumerate(train_loader)
- LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size'))
- if RANK in [-1, 0]:
- pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
- optimizer.zero_grad()
- for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
- ni = i + nb * epoch # number integrated batches (since train start)
- imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
-
- # Warmup
- if ni <= nw:
- xi = [0, nw] # x interp
- # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
- accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
- for j, x in enumerate(optimizer.param_groups):
- # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
- x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
- if 'momentum' in x:
- x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
-
- # Multi-scale
- if opt.multi_scale:
- sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
- sf = sz / max(imgs.shape[2:]) # scale factor
- if sf != 1:
- ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
- imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
-
- # Forward
- with amp.autocast(enabled=cuda):
- pred = model(imgs) # forward
- loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
- if RANK != -1:
- loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
- if opt.quad:
- loss *= 4.
-
- # Backward
- scaler.scale(loss).backward()
-
- # Optimize
- if ni - last_opt_step >= accumulate:
- scaler.step(optimizer) # optimizer.step
- scaler.update()
- optimizer.zero_grad()
- if ema:
- ema.update(model)
- last_opt_step = ni
-
- # Log
- if RANK in [-1, 0]:
- mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
- mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
- pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % (
- f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
- callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn)
- if callbacks.stop_training:
- return
- # end batch ------------------------------------------------------------------------------------------------
-
- # Scheduler
- lr = [x['lr'] for x in optimizer.param_groups] # for loggers
- scheduler.step()
-
- if RANK in [-1, 0]:
- # mAP
- callbacks.run('on_train_epoch_end', epoch=epoch)
- ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
- final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
- if not noval or final_epoch: # Calculate mAP
- results, maps, _ = val.run(data_dict,
- batch_size=batch_size // WORLD_SIZE * 2,
- imgsz=imgsz,
- model=ema.ema,
- single_cls=single_cls,
- dataloader=val_loader,
- save_dir=save_dir,
- plots=False,
- callbacks=callbacks,
- compute_loss=compute_loss)
-
- # Update best mAP
- fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
- if fi > best_fitness:
- best_fitness = fi
- log_vals = list(mloss) + list(results) + lr
- callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
-
- # Save model
- if (not nosave) or (final_epoch and not evolve): # if save
- ckpt = {'epoch': epoch,
- 'best_fitness': best_fitness,
- 'model': deepcopy(de_parallel(model)).half(),
- 'ema': deepcopy(ema.ema).half(),
- 'updates': ema.updates,
- 'optimizer': optimizer.state_dict(),
- 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None,
- 'date': datetime.now().isoformat()}
-
- # Save last, best and delete
- torch.save(ckpt, last)
- if best_fitness == fi:
- torch.save(ckpt, best)
- if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0):
- torch.save(ckpt, w / f'epoch{epoch}.pt')
- del ckpt
- callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
-
- # Stop Single-GPU
- if RANK == -1 and stopper(epoch=epoch, fitness=fi):
- break
-
- # Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576
- # stop = stopper(epoch=epoch, fitness=fi)
- # if RANK == 0:
- # dist.broadcast_object_list([stop], 0) # broadcast 'stop' to all ranks
-
- # Stop DPP
- # with torch_distributed_zero_first(RANK):
- # if stop:
- # break # must break all DDP ranks
-
- # end epoch ----------------------------------------------------------------------------------------------------
- # end training -----------------------------------------------------------------------------------------------------
- if RANK in [-1, 0]:
- LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
- for f in last, best:
- if f.exists():
- strip_optimizer(f) # strip optimizers
- if f is best:
- LOGGER.info(f'\nValidating {f}...')
- results, _, _ = val.run(data_dict,
- batch_size=batch_size // WORLD_SIZE * 2,
- imgsz=imgsz,
- model=attempt_load(f, device).half(),
- iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65
- single_cls=single_cls,
- dataloader=val_loader,
- save_dir=save_dir,
- save_json=is_coco,
- verbose=True,
- plots=True,
- callbacks=callbacks,
- compute_loss=compute_loss) # val best model with plots
- if is_coco:
- callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
-
- callbacks.run('on_train_end', last, best, plots, epoch, results)
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
-
- torch.cuda.empty_cache()
- return results
-
-
-def parse_opt(known=False):
- parser = argparse.ArgumentParser()
- parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path')
- parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
- parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
- parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
- parser.add_argument('--epochs', type=int, default=300)
- parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
- parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
- parser.add_argument('--rect', action='store_true', help='rectangular training')
- parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
- parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
- parser.add_argument('--noval', action='store_true', help='only validate final epoch')
- parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
- parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
- parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
- parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
- parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
- parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
- parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
- parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
- parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
- parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
- parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
- parser.add_argument('--name', default='exp', help='save to project/name')
- parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
- parser.add_argument('--quad', action='store_true', help='quad dataloader')
- parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
- parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
- parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
- parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
- parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
- parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
-
- # Weights & Biases arguments
- parser.add_argument('--entity', default=None, help='W&B: Entity')
- parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option')
- parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval')
- parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use')
-
- opt = parser.parse_known_args()[0] if known else parser.parse_args()
- return opt
-
-
-def main(opt, callbacks=Callbacks()):
- # Checks
- if RANK in [-1, 0]:
- print_args(FILE.stem, opt)
- check_git_status()
- check_requirements(exclude=['thop'])
-
- # Resume
- if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run
- ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
- assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
- with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f:
- opt = argparse.Namespace(**yaml.safe_load(f)) # replace
- opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate
- LOGGER.info(f'Resuming training from {ckpt}')
- else:
- opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
- check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
- assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
- if opt.evolve:
- if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve
- opt.project = str(ROOT / 'runs/evolve')
- opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
- opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
-
- # DDP mode
- device = select_device(opt.device, batch_size=opt.batch_size)
- if LOCAL_RANK != -1:
- msg = 'is not compatible with YOLOv5 Multi-GPU DDP training'
- assert not opt.image_weights, f'--image-weights {msg}'
- assert not opt.evolve, f'--evolve {msg}'
- assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
- assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
- assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
- torch.cuda.set_device(LOCAL_RANK)
- device = torch.device('cuda', LOCAL_RANK)
- dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
-
- # Train
- if not opt.evolve:
- train(opt.hyp, opt, device, callbacks)
- if WORLD_SIZE > 1 and RANK == 0:
- LOGGER.info('Destroying process group... ')
- dist.destroy_process_group()
-
- # Evolve hyperparameters (optional)
- else:
- # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
- meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
- 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
- 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
- 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
- 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
- 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
- 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
- 'box': (1, 0.02, 0.2), # box loss gain
- 'cls': (1, 0.2, 4.0), # cls loss gain
- 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
- 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
- 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
- 'iou_t': (0, 0.1, 0.7), # IoU training threshold
- 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
- 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
- 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
- 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
- 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
- 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
- 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
- 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
- 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
- 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
- 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
- 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
- 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
- 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
- 'mixup': (1, 0.0, 1.0), # image mixup (probability)
- 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
-
- with open(opt.hyp, errors='ignore') as f:
- hyp = yaml.safe_load(f) # load hyps dict
- if 'anchors' not in hyp: # anchors commented in hyp.yaml
- hyp['anchors'] = 3
- opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
- # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
- evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
- if opt.bucket:
- os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists
-
- for _ in range(opt.evolve): # generations to evolve
- if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
- # Select parent(s)
- parent = 'single' # parent selection method: 'single' or 'weighted'
- x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
- n = min(5, len(x)) # number of previous results to consider
- x = x[np.argsort(-fitness(x))][:n] # top n mutations
- w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
- if parent == 'single' or len(x) == 1:
- # x = x[random.randint(0, n - 1)] # random selection
- x = x[random.choices(range(n), weights=w)[0]] # weighted selection
- elif parent == 'weighted':
- x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
-
- # Mutate
- mp, s = 0.8, 0.2 # mutation probability, sigma
- npr = np.random
- npr.seed(int(time.time()))
- g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
- ng = len(meta)
- v = np.ones(ng)
- while all(v == 1): # mutate until a change occurs (prevent duplicates)
- v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
- for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
- hyp[k] = float(x[i + 7] * v[i]) # mutate
-
- # Constrain to limits
- for k, v in meta.items():
- hyp[k] = max(hyp[k], v[1]) # lower limit
- hyp[k] = min(hyp[k], v[2]) # upper limit
- hyp[k] = round(hyp[k], 5) # significant digits
-
- # Train mutation
- results = train(hyp.copy(), opt, device, callbacks)
- callbacks = Callbacks()
- # Write mutation results
- print_mutation(results, hyp.copy(), save_dir, opt.bucket)
-
- # Plot results
- plot_evolve(evolve_csv)
- LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
- f"Results saved to {colorstr('bold', save_dir)}\n"
- f'Usage example: $ python train.py --hyp {evolve_yaml}')
-
-
-def run(**kwargs):
- # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
- opt = parse_opt(True)
- for k, v in kwargs.items():
- setattr(opt, k, v)
- main(opt)
- return opt
-
-
-if __name__ == "__main__":
- opt = parse_opt()
- main(opt)
diff --git a/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/apps/prt_util.py b/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/apps/prt_util.py
deleted file mode 100644
index 7eba32fa0b396f420b2e332abbb67135dbc14d6b..0000000000000000000000000000000000000000
--- a/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/apps/prt_util.py
+++ /dev/null
@@ -1,142 +0,0 @@
-import os
-import trimesh
-import numpy as np
-import math
-from scipy.special import sph_harm
-import argparse
-from tqdm import tqdm
-
-def factratio(N, D):
- if N >= D:
- prod = 1.0
- for i in range(D+1, N+1):
- prod *= i
- return prod
- else:
- prod = 1.0
- for i in range(N+1, D+1):
- prod *= i
- return 1.0 / prod
-
-def KVal(M, L):
- return math.sqrt(((2 * L + 1) / (4 * math.pi)) * (factratio(L - M, L + M)))
-
-def AssociatedLegendre(M, L, x):
- if M < 0 or M > L or np.max(np.abs(x)) > 1.0:
- return np.zeros_like(x)
-
- pmm = np.ones_like(x)
- if M > 0:
- somx2 = np.sqrt((1.0 + x) * (1.0 - x))
- fact = 1.0
- for i in range(1, M+1):
- pmm = -pmm * fact * somx2
- fact = fact + 2
-
- if L == M:
- return pmm
- else:
- pmmp1 = x * (2 * M + 1) * pmm
- if L == M+1:
- return pmmp1
- else:
- pll = np.zeros_like(x)
- for i in range(M+2, L+1):
- pll = (x * (2 * i - 1) * pmmp1 - (i + M - 1) * pmm) / (i - M)
- pmm = pmmp1
- pmmp1 = pll
- return pll
-
-def SphericalHarmonic(M, L, theta, phi):
- if M > 0:
- return math.sqrt(2.0) * KVal(M, L) * np.cos(M * phi) * AssociatedLegendre(M, L, np.cos(theta))
- elif M < 0:
- return math.sqrt(2.0) * KVal(-M, L) * np.sin(-M * phi) * AssociatedLegendre(-M, L, np.cos(theta))
- else:
- return KVal(0, L) * AssociatedLegendre(0, L, np.cos(theta))
-
-def save_obj(mesh_path, verts):
- file = open(mesh_path, 'w')
- for v in verts:
- file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2]))
- file.close()
-
-def sampleSphericalDirections(n):
- xv = np.random.rand(n,n)
- yv = np.random.rand(n,n)
- theta = np.arccos(1-2 * xv)
- phi = 2.0 * math.pi * yv
-
- phi = phi.reshape(-1)
- theta = theta.reshape(-1)
-
- vx = -np.sin(theta) * np.cos(phi)
- vy = -np.sin(theta) * np.sin(phi)
- vz = np.cos(theta)
- return np.stack([vx, vy, vz], 1), phi, theta
-
-def getSHCoeffs(order, phi, theta):
- shs = []
- for n in range(0, order+1):
- for m in range(-n,n+1):
- s = SphericalHarmonic(m, n, theta, phi)
- shs.append(s)
-
- return np.stack(shs, 1)
-
-def computePRT(mesh_path, n, order):
- mesh = trimesh.load(mesh_path, process=False)
- vectors_orig, phi, theta = sampleSphericalDirections(n)
- SH_orig = getSHCoeffs(order, phi, theta)
-
- w = 4.0 * math.pi / (n*n)
-
- origins = mesh.vertices
- normals = mesh.vertex_normals
- n_v = origins.shape[0]
-
- origins = np.repeat(origins[:,None], n, axis=1).reshape(-1,3)
- normals = np.repeat(normals[:,None], n, axis=1).reshape(-1,3)
- PRT_all = None
- for i in tqdm(range(n)):
- SH = np.repeat(SH_orig[None,(i*n):((i+1)*n)], n_v, axis=0).reshape(-1,SH_orig.shape[1])
- vectors = np.repeat(vectors_orig[None,(i*n):((i+1)*n)], n_v, axis=0).reshape(-1,3)
-
- dots = (vectors * normals).sum(1)
- front = (dots > 0.0)
-
- delta = 1e-3*min(mesh.bounding_box.extents)
- hits = mesh.ray.intersects_any(origins + delta * normals, vectors)
- nohits = np.logical_and(front, np.logical_not(hits))
-
- PRT = (nohits.astype(np.float) * dots)[:,None] * SH
-
- if PRT_all is not None:
- PRT_all += (PRT.reshape(-1, n, SH.shape[1]).sum(1))
- else:
- PRT_all = (PRT.reshape(-1, n, SH.shape[1]).sum(1))
-
- PRT = w * PRT_all
-
- # NOTE: trimesh sometimes break the original vertex order, but topology will not change.
- # when loading PRT in other program, use the triangle list from trimesh.
- return PRT, mesh.faces
-
-def testPRT(dir_path, n=40):
- if dir_path[-1] == '/':
- dir_path = dir_path[:-1]
- sub_name = dir_path.split('/')[-1][:-4]
- obj_path = os.path.join(dir_path, sub_name + '_100k.obj')
- os.makedirs(os.path.join(dir_path, 'bounce'), exist_ok=True)
-
- PRT, F = computePRT(obj_path, n, 2)
- np.savetxt(os.path.join(dir_path, 'bounce', 'bounce0.txt'), PRT, fmt='%.8f')
- np.save(os.path.join(dir_path, 'bounce', 'face.npy'), F)
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('-i', '--input', type=str, default='/home/shunsuke/Downloads/rp_dennis_posed_004_OBJ')
- parser.add_argument('-n', '--n_sample', type=int, default=40, help='squared root of number of sampling. the higher, the more accurate, but slower')
- args = parser.parse_args()
-
- testPRT(args.input)
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Arsen Dedic Diskografija.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Arsen Dedic Diskografija.md
deleted file mode 100644
index 131189cdd6fa4f0ea284fd0655e804308f8f2203..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Arsen Dedic Diskografija.md
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
Arsen Dedic Diskografija: A Tribute to the Croatian Chanson Singer
-
Arsen Dedic was a Croatian chanson singer, poet and composer who left a lasting mark on the Croatian music scene. He was born in Sibenik in 1938 and died in Zagreb in 2015. He recorded more than 30 albums, won many music awards and wrote songs for himself and other famous singers. He also published several collections of poetry and translated and adapted songs by renowned artists such as Gino Paoli, Sergio Endrigo and Jacques Brel.
-
In this article, we will present a brief overview of his discography, which spans over five decades and showcases his unique style and voice. We will focus on his studio albums, but also mention some of his compilations, live albums and collaborations.
Arsen Dedic released his first studio album, Covjek kao ja (A Man Like Me), in 1970. It featured songs such as Ne daj se Ines (Don't Give Up Ines), Modra rijeka (Blue River) and O mladosti (About Youth), which became classics of his repertoire. The album was influenced by French chanson and Italian canzone, but also reflected his personal experiences and views.
-
His second album, Arsen 2, came out in 1971 and included songs such as Kuca pored mora (House by the Sea), Djevojka za jedan dan (Girl for One Day) and Pamtim samo sretne dane (I Only Remember Happy Days). The album was more experimental and diverse, incorporating elements of rock, jazz and folk music.
-
In 1973, he released Homo Volans (Flying Man), which was inspired by his fascination with flying and aviation. The album featured songs such as Piloti (Pilots), Letece zvijezde (Flying Stars) and Zagrli me (Hug Me). The album was praised for its originality and creativity.
-
In 1975, he released Vracam se (I'm Coming Back), which was a more intimate and introspective album. It included songs such as Ostavljam te samu (I'm Leaving You Alone), Tvoje njezne godine (Your Tender Years) and Vracam se (I'm Coming Back). The album was dedicated to his wife Gabi Novak, a famous Croatian singer herself.
-
-
In 1976, he released two albums: Otisak autora (Author's Imprint) and Porodicno stablo (Family Tree). The former was a collection of songs he wrote for other singers, such as Gabi Novak, Ibrica Jusic, Zoran Predin and others. The latter was a concept album that explored his family history and roots. It featured songs such as Moj otac i ja (My Father and I), Moj brat i ja (My Brother and I) and Moj djed i ja (My Grandfather and I).
-
In 1977, he collaborated with poet Matija Golob on the album Dedic - Golob, which combined their poetry and music. The album included songs such as Pjesma o jednom petlu (Song About a Rooster), Pjesma o jednoj zeni (Song About a Woman) and Pjesma o jednom psu (Song About a Dog).
-
In 1978, he released Kuca pored mora (House by the Sea), which was a re-recording of his second album with new arrangements by composer Kresimir Oblak. The album featured songs such
7196e7f11a
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/File72579 Zip [download [2021] Film Kiraz Mevsimi Sub Indo].md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/File72579 Zip [download [2021] Film Kiraz Mevsimi Sub Indo].md
deleted file mode 100644
index f1d39125b9c923b1ddd746e79388e0dbf781851c..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/File72579 Zip [download [2021] Film Kiraz Mevsimi Sub Indo].md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
How to Download Kiraz Mevsimi Sub Indo
-
Kiraz Mevsimi is a Turkish drama series that aired on Fox TV from 2014 to 2015. It tells the story of Oyku, an aspiring fashion designer, and Ayaz, a successful architect, who fall in love despite their different backgrounds and personalities. The series is also known as Cherry Season in English, and has been dubbed in Arabic, Urdu, Indonesian, and Italian.
-
File72579 Zip [download Film Kiraz Mevsimi Sub Indo]
If you are interested in watching Kiraz Mevsimi with Indonesian subtitles, you might be wondering how to download it. Here are some steps you can follow:
-
-
Go to http://107.152.46.41/kiraz-mevsimi-2018-tluuzz, a website that offers Kiraz Mevsimi episodes with Indonesian subtitles for free[^1^]. You can also find other Turkish dramas on this website.
-
Select the episode you want to watch from the list. You can choose from 59 episodes in total.
-
Click on the play button to start streaming the video. You can adjust the quality and the volume of the video according to your preference.
-
If you want to download the video, click on the download icon at the bottom right corner of the video player. You will be redirected to another page where you can choose the format and the size of the file you want to download.
-
Click on the download button and wait for the file to be downloaded to your device. You might need to enter a captcha code or complete a survey before you can download the file.
-
Enjoy watching Kiraz Mevsimi with Indonesian subtitles!
-
-
Alternatively, you can also watch Kiraz Mevsimi with Indonesian subtitles on https://amara.org/uz/videos/V7U0ciRZRcv4/id/1051072/ [^2^] or https://amara.org/en/videos/tr5g1Uj409Tq/id/1059313/ [^3^], where you can find fan-made subtitles for the first two episodes of the series. You can also contribute to the subtitling project by joining Amara, a platform that allows anyone to create and share subtitles for videos.
-
Kiraz Mevsimi is a romantic comedy that will make you laugh, cry, and swoon over the chemistry between Oyku and Ayaz. If you are looking for a fun and heartwarming series to watch, give Kiraz Mevsimi a try!
-
-
Kiraz Mevsimi is not only a love story between Oyku and Ayaz, but also a story of friendship, family, and dreams. Oyku has a close bond with her mother and her brother Cem, who support her in pursuing her passion for fashion design. She also has a loyal best friend, Seyma, who accompanies her to the interview for a prestigious fashion company. However, things do not go as planned when Oyku accidentally gives her portfolio to Ayaz instead of the company's owner, Mete.
-
Ayaz is a handsome and successful architect who works for his father's company. He is also Mete's best friend and roommate. He has a complicated relationship with his father, who wants him to marry his business partner's daughter, Burcu. Ayaz is not interested in Burcu, but he does not want to disappoint his father either. He is intrigued by Oyku's portfolio, which contains sketches of him and his dog. He decides to hire Oyku as his personal assistant, without telling her the truth about his identity.
-
Oyku and Ayaz start off on the wrong foot, as they constantly bicker and annoy each other. However, they also develop feelings for each other, which they try to deny or hide. Their relationship faces many challenges, such as misunderstandings, jealousy, secrets, and lies. They also have to deal with other people who want to separate them, such as Burcu, Ilker (Oyku's ex-boyfriend), and Sibel (Ayaz's ex-girlfriend). Will Oyku and Ayaz overcome these obstacles and find their happy ending?
- 7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Filza File Manager VERIFIED Cracked Repo Iphone.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Filza File Manager VERIFIED Cracked Repo Iphone.md
deleted file mode 100644
index dfee35870837e75cde6cf29813a8ad3bdb066bae..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Filza File Manager VERIFIED Cracked Repo Iphone.md
+++ /dev/null
@@ -1,32 +0,0 @@
-
-Here is a possible title and article with SEO optimization and HTML formatting for the keyword "filza file manager cracked repo iphone":
-
-
How to Download Filza File Manager Cracked Repo for iPhone
-
Filza File Manager is one of the best file managers for iOS devices, allowing you to access and manage files and folders on your iPhone, iPad, or iPod Touch. With Filza, you can view, edit, copy, move, delete, compress, decompress, install, and execute files of various types and formats. You can also connect to cloud services, SMB servers, FTP servers, WebDAV servers, and more.
However, Filza File Manager is a paid app on Cydia, costing $5.99. If you want to get it for free, you need to download a cracked repo that hosts the app. In this article, we will show you how to download Filza File Manager cracked repo for iPhone using a jailbreak tool.
-
What is a Cracked Repo?
-
A cracked repo is a third-party repository that hosts pirated or modified versions of apps and tweaks that are normally paid or restricted on the official sources. Cracked repos are not recommended by the developers or the jailbreak community, as they may contain malware, viruses, or spyware that can harm your device or steal your personal information. They may also cause conflicts or errors with other apps and tweaks on your device.
-
Therefore, we advise you to use cracked repos at your own risk and only download apps and tweaks from trusted sources. We do not condone piracy or illegal activities in any way.
-
How to Download Filza File Manager Cracked Repo for iPhone
-
To download Filza File Manager cracked repo for iPhone, you need to have a jailbroken device running iOS 7 to iOS 16.x. Jailbreaking is the process of removing the software restrictions imposed by Apple on iOS devices, allowing you to install apps and tweaks that are not available on the App Store or Cydia. Jailbreaking also gives you root access to your device's file system, which is required for Filza File Manager to work.
-
There are various jailbreak tools available for different iOS versions and devices. Some of the most popular ones are checkra1n[^1^], unc0ver[^2^], Odyssey[^3^], and Taurine[^4^]. You can choose the one that is compatible with your device and follow the instructions on their websites to jailbreak your device.
-
-
Once you have jailbroken your device, you need to add a cracked repo that hosts Filza File Manager to your package manager. A package manager is an app that allows you to browse, install, update, and uninstall apps and tweaks from various sources or repositories. The most common package managers are Cydia, Sileo, Zebra, and Installer. You can use any of them to add a cracked repo for Filza File Manager.
-
One of the cracked repos that hosts Filza File Manager is https://repo.hackyouriphone.org/. To add this repo to your package manager, follow these steps:
-
-
Open your package manager app and tap on the Sources tab.
-
Tap on the Edit button at the top right corner and then tap on the Add button at the top left corner.
-
Type or paste https://repo.hackyouriphone.org/ in the text box and tap on Add Source.
-
Wait for the repo to be added and refreshed.
-
Tap on the Return to Cydia (or Sileo/Zebra/Installer) button at the bottom.
-
-
Now you have added the cracked repo for Filza File Manager to your package manager. To download and install Filza File Manager from this repo, follow these steps:
-
-
Tap on the Search tab at the bottom right corner of your package manager app.
-
Type or paste Filza File Manager in the search bar and tap on Search.
-
Tap on the result from HackYouriPhone repo.
-
Tap on the Install (or Get/Queue) button at the top right corner.
-
Tap on Confirm (or Queue/Install) at the top 7196e7f11a
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/METAL GEAR SURVIVE Download For Pc [torrent Full] NEW!.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/METAL GEAR SURVIVE Download For Pc [torrent Full] NEW!.md
deleted file mode 100644
index 97b368c1651d7d3ff0861c18fa6923c08ac90d6d..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/METAL GEAR SURVIVE Download For Pc [torrent Full] NEW!.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
METAL GEAR SURVIVE Download For PC [Torrent Full]
-
METAL GEAR SURVIVE is a spin-off from the main METAL GEAR SOLID V story that takes place in a strange alternative universe. You are one of the survivors of the attack on Mother Base, and you must fight for your life against hostile creatures and other enemies. You will need to scavenge resources, craft weapons and equipment, and build a base to survive in this harsh environment.
METAL GEAR SURVIVE builds upon METAL GEAR SOLID V's enduring stealth action gameplay while introducing the new elements of exploration and survival to create a bold new experience. You can play solo or co-op with up to four players online. You will also encounter some familiar faces from the METAL GEAR series, such as Ocelot and Miller.
-
If you want to download METAL GEAR SURVIVE for PC, you can use the torrent link below. You will need a torrent client such as uTorrent or BitTorrent to download the game file. After downloading, you will need to install the game and follow the instructions. Make sure your PC meets the minimum system requirements before playing.
METAL GEAR SURVIVE is a thrilling and challenging game that will test your skills and creativity. Download it now and enjoy the ultimate survival experience!
Here are some tips and tricks to help you play METAL GEAR SURVIVE better:
-
-
Explore the map and collect resources. You will need them to craft items, upgrade your base, and heal yourself. You can also find blueprints and recipes to unlock new items and weapons.
-
Manage your hunger, thirst, and oxygen levels. You will need to eat, drink, and breathe regularly to avoid losing health and stamina. You can find food and water in the environment, or grow them in your base. You can also use oxygen tanks to explore the dust areas.
-
Use stealth and strategy. You can avoid or distract enemies by using decoys, traps, or noise. You can also use the environment to your advantage, such as hiding behind cover or using high ground. You can also customize your loadout and equipment to suit your playstyle.
-
Cooperate with other players. You can join online co-op missions to earn rewards and resources. You can also share items and resources with other players, or help them in combat. You can also communicate with them using gestures or voice chat.
-
Enjoy the story and the characters. You will encounter some familiar faces from the METAL GEAR series, such as Ocelot and Miller. You will also learn more about the mysterious wormhole and the origin of the creatures. You will also face some epic boss battles and challenges.
-
-
METAL GEAR SURVIVE is a game that will keep you hooked for hours. Download it now and join the adventure!
7196e7f11a
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Tareekh E Pakistan In Urdu Pdf __LINK__ Downloadl.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Tareekh E Pakistan In Urdu Pdf __LINK__ Downloadl.md
deleted file mode 100644
index 0bcd4154d22f13aaa6b5b07871b32d0e7aa101ec..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Tareekh E Pakistan In Urdu Pdf __LINK__ Downloadl.md
+++ /dev/null
@@ -1,54 +0,0 @@
-
-Here is what I came up with:
-
-
Tareekh E Pakistan In Urdu Pdf Downloadl: A Comprehensive Guide to the History of Pakistan
-
Tareekh E Pakistan In Urdu Pdf Downloadl is a book that covers the history of Pakistan from its inception to the present day. It is written in Urdu language and is available for free download online. The book is divided into four parts:
-
-
Part 1: The Pre-Partition Era: This part covers the history of the subcontinent from ancient times to the arrival of the British Raj. It also discusses the various movements and personalities that shaped the Muslim identity and nationalism in India.
-
Part 2: The Partition and Independence: This part covers the events and factors that led to the creation of Pakistan in 1947. It also narrates the challenges and achievements of the new nation in its early years.
-
Part 3: The Post-Independence Era: This part covers the history of Pakistan from 1958 to 2008. It discusses the various political, economic, social and cultural developments that took place in this period. It also analyzes the role of the military, the judiciary, the media and the civil society in Pakistan's history.
-
Part 4: The Contemporary Era: This part covers the history of Pakistan from 2008 to 2020. It highlights the major events and issues that have shaped Pakistan's current situation. It also explores the prospects and challenges for Pakistan's future.
-
-
Tareekh E Pakistan In Urdu Pdf Downloadl is a comprehensive and informative book that provides a balanced and objective account of Pakistan's history. It is suitable for students, researchers, teachers and general readers who want to learn more about Pakistan's past, present and future.
In this article, we will focus on the first part of the book, which covers the pre-partition era of Pakistan's history. This part consists of 12 chapters, each covering a different aspect of the subcontinent's history. The chapters are as follows:
-
-
Chapter 1: The Ancient Civilizations: This chapter gives an overview of the Indus Valley Civilization, the Vedic Civilization, the Mauryan Empire, the Kushan Empire, the Gupta Empire and the Harsha Empire. It also discusses the cultural and religious diversity of the ancient subcontinent.
-
Chapter 2: The Arrival of Islam: This chapter traces the advent and spread of Islam in the subcontinent. It covers the Arab conquests, the Ghaznavid Empire, the Delhi Sultanate, the Mughal Empire and the Deccan Sultanates. It also highlights the contributions of Muslim rulers, scholars, saints and poets to the subcontinent's culture and civilization.
-
Chapter 3: The British Raj: This chapter describes the rise and fall of the British rule in India. It covers the East India Company, the Sepoy Mutiny, the Crown Rule, the Indian National Congress, the Muslim League and the Partition Plan. It also analyzes the impact of British policies on the subcontinent's economy, society and politics.
-
Chapter 4: The Muslim Identity and Nationalism: This chapter explores the evolution and expression of Muslim identity and nationalism in India. It covers the Aligarh Movement, the Khilafat Movement, the Two-Nation Theory, the Lahore Resolution and the Pakistan Movement. It also examines the role of leaders like Sir Syed Ahmed Khan, Allama Iqbal, Muhammad Ali Jinnah and Liaquat Ali Khan in shaping Muslim aspirations and demands.
-
Chapter 5: The Hindu-Muslim Relations: This chapter reviews the history of Hindu-Muslim relations in India. It covers the periods of harmony and conflict, cooperation and competition, dialogue and violence. It also discusses the factors that influenced Hindu-Muslim relations such as religion, culture, politics and economics.
-
Chapter 6: The Sikh History: This chapter narrates the history of Sikhism and Sikhs in India. It covers the life and teachings of Guru Nanak, the formation and expansion of Sikhism, the Khalsa Panth, the Sikh Gurus, the Sikh Empire and the Sikh Wars. It also evaluates the role of Sikhs in Indian history and their relations with Muslims and Hindus.
-
Chapter 7: The Kashmir Issue: This chapter explains the origin and development of the Kashmir issue. It covers the history of Kashmir from ancient times to modern times, its accession to India or Pakistan in 1947, its status and autonomy under Indian constitution or Pakistani administration, its involvement in wars and conflicts between India and Pakistan, its internal political dynamics and human rights situation. It also explores possible solutions to resolve this issue peacefully.
-
Chapter 8: The Balochistan Issue: This chapter elucidates
-the origin and development of the Balochistan issue. It covers
-the history of Balochistan from ancient times to modern times,
-its accession to Pakistan in 1948, its status and autonomy under
-Pakistani constitution or administration, its involvement in
-insurgencies and operations by Pakistani forces, its internal
-political dynamics and human rights situation. It also explores
-possible solutions to resolve this issue peacefully.
-
Chapter 9: The Bengal Issue: This chapter clarifies
-the origin and development of the Bengal issue. It covers
-the history of Bengal from ancient times to modern times,
-its partition into East Bengal (later East Pakistan) and West
-Bengal in 1947, its status and autonomy under Pakistani
-constitution or administration, its involvement in language
-movement and liberation war by Bengali nationalists,
-its emergence as Bangladesh in 1971. It also evaluates
-the role of Bengal in Pakistani history and its relations with
-Pakistan after independence.
-
Chapter 10: The Sindh Issue: This chapter illustrates
-the origin and development of the Sindh issue. It covers
-the history of Sindh from ancient times to modern times,
-its accession to Pakistan in 1947,
-its status and autonomy under Pakistani constitution or administration,
-its involvement in movements for provincial rights,
-cultural identity and self-determination by Sindhi nationalists,
-its internal political dynamics and human rights situation.
-It also assesses
-the role of Sindh in Pakistani e93f5a0c3f
-
-
\ No newline at end of file
diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/export/caffe2_export.py b/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/export/caffe2_export.py
deleted file mode 100644
index d609c27c7deb396352967dbcbc79b1e00f2a2de1..0000000000000000000000000000000000000000
--- a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/export/caffe2_export.py
+++ /dev/null
@@ -1,203 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import copy
-import io
-import logging
-import numpy as np
-from typing import List
-import onnx
-import onnx.optimizer
-import torch
-from caffe2.proto import caffe2_pb2
-from caffe2.python import core
-from caffe2.python.onnx.backend import Caffe2Backend
-from tabulate import tabulate
-from termcolor import colored
-from torch.onnx import OperatorExportTypes
-
-from .shared import (
- ScopedWS,
- construct_init_net_from_params,
- fuse_alias_placeholder,
- fuse_copy_between_cpu_and_gpu,
- get_params_from_init_net,
- group_norm_replace_aten_with_caffe2,
- infer_device_type,
- remove_dead_end_ops,
- remove_reshape_for_fc,
- save_graph,
-)
-
-logger = logging.getLogger(__name__)
-
-
-def export_onnx_model(model, inputs):
- """
- Trace and export a model to onnx format.
-
- Args:
- model (nn.Module):
- inputs (tuple[args]): the model will be called by `model(*inputs)`
-
- Returns:
- an onnx model
- """
- assert isinstance(model, torch.nn.Module)
-
- # make sure all modules are in eval mode, onnx may change the training state
- # of the module if the states are not consistent
- def _check_eval(module):
- assert not module.training
-
- model.apply(_check_eval)
-
- # Export the model to ONNX
- with torch.no_grad():
- with io.BytesIO() as f:
- torch.onnx.export(
- model,
- inputs,
- f,
- operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
- # verbose=True, # NOTE: uncomment this for debugging
- # export_params=True,
- )
- onnx_model = onnx.load_from_string(f.getvalue())
-
- return onnx_model
-
-
-def _op_stats(net_def):
- type_count = {}
- for t in [op.type for op in net_def.op]:
- type_count[t] = type_count.get(t, 0) + 1
- type_count_list = sorted(type_count.items(), key=lambda kv: kv[0]) # alphabet
- type_count_list = sorted(type_count_list, key=lambda kv: -kv[1]) # count
- return "\n".join("{:>4}x {}".format(count, name) for name, count in type_count_list)
-
-
-def _assign_device_option(
- predict_net: caffe2_pb2.NetDef, init_net: caffe2_pb2.NetDef, tensor_inputs: List[torch.Tensor]
-):
- """
- ONNX exported network doesn't have concept of device, assign necessary
- device option for each op in order to make it runable on GPU runtime.
- """
-
- def _get_device_type(torch_tensor):
- assert torch_tensor.device.type in ["cpu", "cuda"]
- assert torch_tensor.device.index == 0
- return torch_tensor.device.type
-
- def _assign_op_device_option(net_proto, net_ssa, blob_device_types):
- for op, ssa_i in zip(net_proto.op, net_ssa):
- if op.type in ["CopyCPUToGPU", "CopyGPUToCPU"]:
- op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
- else:
- devices = [blob_device_types[b] for b in ssa_i[0] + ssa_i[1]]
- assert all(d == devices[0] for d in devices)
- if devices[0] == "cuda":
- op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
-
- # update ops in predict_net
- predict_net_input_device_types = {
- (name, 0): _get_device_type(tensor)
- for name, tensor in zip(predict_net.external_input, tensor_inputs)
- }
- predict_net_device_types = infer_device_type(
- predict_net, known_status=predict_net_input_device_types, device_name_style="pytorch"
- )
- predict_net_ssa, _ = core.get_ssa(predict_net)
- _assign_op_device_option(predict_net, predict_net_ssa, predict_net_device_types)
-
- # update ops in init_net
- init_net_ssa, versions = core.get_ssa(init_net)
- init_net_output_device_types = {
- (name, versions[name]): predict_net_device_types[(name, 0)]
- for name in init_net.external_output
- }
- init_net_device_types = infer_device_type(
- init_net, known_status=init_net_output_device_types, device_name_style="pytorch"
- )
- _assign_op_device_option(init_net, init_net_ssa, init_net_device_types)
-
-
-def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]):
- """
- Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX.
-
- Arg:
- model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
- tensor_inputs: a list of tensors that caffe2 model takes as input.
- """
- model = copy.deepcopy(model)
- assert isinstance(model, torch.nn.Module)
- assert hasattr(model, "encode_additional_info")
-
- # Export via ONNX
- logger.info(
- "Exporting a {} model via ONNX ...".format(type(model).__name__)
- + " Some warnings from ONNX are expected and are usually not to worry about."
- )
- onnx_model = export_onnx_model(model, (tensor_inputs,))
- # Convert ONNX model to Caffe2 protobuf
- init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
- ops_table = [[op.type, op.input, op.output] for op in predict_net.op]
- table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe")
- logger.info(
- "ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan")
- )
-
- # Apply protobuf optimization
- fuse_alias_placeholder(predict_net, init_net)
- if any(t.device.type != "cpu" for t in tensor_inputs):
- fuse_copy_between_cpu_and_gpu(predict_net)
- remove_dead_end_ops(init_net)
- _assign_device_option(predict_net, init_net, tensor_inputs)
- params, device_options = get_params_from_init_net(init_net)
- predict_net, params = remove_reshape_for_fc(predict_net, params)
- init_net = construct_init_net_from_params(params, device_options)
- group_norm_replace_aten_with_caffe2(predict_net)
-
- # Record necessary information for running the pb model in Detectron2 system.
- model.encode_additional_info(predict_net, init_net)
-
- logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net)))
- logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))
-
- return predict_net, init_net
-
-
-def run_and_save_graph(predict_net, init_net, tensor_inputs, graph_save_path):
- """
- Run the caffe2 model on given inputs, recording the shape and draw the graph.
-
- predict_net/init_net: caffe2 model.
- tensor_inputs: a list of tensors that caffe2 model takes as input.
- graph_save_path: path for saving graph of exported model.
- """
-
- logger.info("Saving graph of ONNX exported model to {} ...".format(graph_save_path))
- save_graph(predict_net, graph_save_path, op_only=False)
-
- # Run the exported Caffe2 net
- logger.info("Running ONNX exported model ...")
- with ScopedWS("__ws_tmp__", True) as ws:
- ws.RunNetOnce(init_net)
- initialized_blobs = set(ws.Blobs())
- uninitialized = [inp for inp in predict_net.external_input if inp not in initialized_blobs]
- for name, blob in zip(uninitialized, tensor_inputs):
- ws.FeedBlob(name, blob)
-
- try:
- ws.RunNetOnce(predict_net)
- except RuntimeError as e:
- logger.warning("Encountered RuntimeError: \n{}".format(str(e)))
-
- ws_blobs = {b: ws.FetchBlob(b) for b in ws.Blobs()}
- blob_sizes = {b: ws_blobs[b].shape for b in ws_blobs if isinstance(ws_blobs[b], np.ndarray)}
-
- logger.info("Saving graph with blob shapes to {} ...".format(graph_save_path))
- save_graph(predict_net, graph_save_path, op_only=False, blob_sizes=blob_sizes)
-
- return ws_blobs
diff --git a/spaces/nomic-ai/vicgalle_alpaca-gpt4/README.md b/spaces/nomic-ai/vicgalle_alpaca-gpt4/README.md
deleted file mode 100644
index ba0c8fa6375d5fb7cf53366e2869363e7b5770ea..0000000000000000000000000000000000000000
--- a/spaces/nomic-ai/vicgalle_alpaca-gpt4/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: vicgalle/alpaca-gpt4
-emoji: 🗺️
-colorFrom: purple
-colorTo: red
-sdk: static
-pinned: false
----
diff --git a/spaces/oconnoob/audio-intelligence-dashboard/app/css_components/build_css.py b/spaces/oconnoob/audio-intelligence-dashboard/app/css_components/build_css.py
deleted file mode 100644
index e8da2200e3051f74714deffa4c228387fcc3d804..0000000000000000000000000000000000000000
--- a/spaces/oconnoob/audio-intelligence-dashboard/app/css_components/build_css.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Strings together css files in this folder and exports to `../styles.css`
-
-import os
-
-css_filepaths = [f for f in os.listdir() if f.endswith(".css")]
-
-css_filepaths.remove('file.css')
-css_filepaths.insert(0, 'file.css')
-
-css = ""
-for filepath in css_filepaths:
- with open(filepath, 'r') as file:
- css += file.read()
-
-with open("../styles.css", 'w') as f:
- f.write(css)
\ No newline at end of file
diff --git a/spaces/panpan06/Image2OCR/README.md b/spaces/panpan06/Image2OCR/README.md
deleted file mode 100644
index 2523ebc7dd5ab7acdaa4b3398f66ec1ef5985c39..0000000000000000000000000000000000000000
--- a/spaces/panpan06/Image2OCR/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Image2OCR
-emoji: 📊
-colorFrom: gray
-colorTo: gray
-sdk: gradio
-sdk_version: 3.1.5
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/patent/demo3/app.py b/spaces/patent/demo3/app.py
deleted file mode 100644
index 1695dc2ba5e990b8f671eaae3b4826a41ff86c4c..0000000000000000000000000000000000000000
--- a/spaces/patent/demo3/app.py
+++ /dev/null
@@ -1,303 +0,0 @@
-import streamlit as st
-import time
-import requests
-
-import os
-import json
-import glob
-import re
-import random
-import difflib
-
-from random import randrange
-
-enable_summary_button = False
-
-prefix_lst = [
- "pgj_d_4096",
- "pgj_d_2048",
- "pgj_d_1024_v2",
- "pgj_d_1024_layer_14",
- "pgj_d_1024_layer_7",
- "pgj_d_1024_layer_2",
- "pgj_d_1024_layer_1" ]
-
-model_names = {
- prefix_lst[0]: 'PatentGPT-J-6B',
- prefix_lst[1]: 'PatentGPT-J-1.6B',
- prefix_lst[2]: 'PatentGPT-J-456M',
- prefix_lst[3]: 'PatentGPT-J-279M',
- prefix_lst[4]: 'PatentGPT-J-191M',
- prefix_lst[5]: 'PatentGPT-J-128M',
- prefix_lst[6]: 'PatentGPT-J-115M',}
-
-# experiment 3
-folder = os.path.join('experiments', 'non_patent')
-id_to_scroll = 1 # which of the above to scroll through
-first_claim_only = True
-
-#experiment 2
-# folder = os.path.join('experiments', 'ipg20220104_500')
-# #folder = "device_serve_results"
-# id_to_scroll = 1 # which of the above to scroll through
-# first_claim_only = False
-
-# prefix_lst = ["my_gptj_6b_tpu_size_8", "pgj_d_4096", "pgj_d_2048", "pgj_d_1024_layer_14", "pgj_d_1024_layer_7", "pgj_d_1024_layer_2", "pgj_d_1024_layer_1"]
-# #, "pgj_large", "pgj_medium", "pgj_small", ]
-# # "pgj_d_1024_layer_14"
-
-# experiment 1
-# folder = os.path.join('experiments', 'ipg22_500')
-# # (previous) folder = "eval_ipg22_500"
-# id_to_scroll = 1 # which of the above to scroll through
-# first_claim_only = True
-
-ignore_outscope = True # ignore pick > 10
-
-def handle_char_return(text):
- if text == '(none)': # unicorn text
- text == ''
-
- return text
-
-def calc_details(base_fn):
- full_fn = os.path.join(folder, base_fn)
- if os.path.exists(full_fn) == False:
- return None, -1, -1, None, None, None, None, None
-
- with open(full_fn) as f:
- result = json.loads(f.read())
- print("Loaded: %s" % full_fn)
-
- lst = result['output']
- recv = result['recv']
- sum_pick = 0
- sum_prob = 0
- sum_outscope_count = 0
- sum_outscope_len = 0
- sum_hit_1 = 0
- sum_top_10_len = 0
- full_text = ''
-
- token_count = 0
- for i, tk in enumerate(lst[:-1]):
- token_text = handle_char_return(tk['actual_next_token_text'])
- next_top_seq = int(tk['actual_next_token_top_seq'])
- next_top_prob = float(tk['actual_next_token_top_prob'])
-
- full_text += token_text
- if next_top_seq == 0:
- sum_hit_1 += 1 # press "tab" for the top pick
-
- if ignore_outscope and next_top_seq>=10:
- sum_outscope_count += 1
- sum_outscope_len += len(token_text) # use length as keystrokes
- else:
- sum_pick += min(next_top_seq+1, len(token_text))
- #sum_pick += (next_top_seq+1) # press "down" & "tab"
- sum_prob += next_top_prob
- sum_top_10_len += len(token_text)
-
- token_count += 1
-
- if ignore_outscope:
- if token_count == 0: # unlikely
- avg_pick = 0
- avg_prob = 0
- else:
- avg_pick = float(sum_pick) / token_count
- avg_prob = float(sum_prob) / token_count
- else:
- avg_pick = float(sum_pick) / token_count
- avg_prob = float(sum_prob) / token_count
-
- return result, avg_pick, avg_prob, token_count, sum_pick, sum_prob, sum_outscope_count, sum_outscope_len, sum_hit_1, sum_top_10_len, full_text
-
-def show_avg(base_fn, model_name, patent_claim_num, show_pick=False):
- result, avg_pick, avg_prob, token_count, sum_pick, sum_prob, sum_outscope_count, sum_outscope_len, sum_hit_1, sum_top_10_len, full_text = calc_details(base_fn)
-
- if result is None:
- return None
-
- lst = result['output']
- result = ''
- sum_all = {}
- colors = [
- ['00ff00', '000000', '1'],
- ['008800', 'ffffff', '2-10'],
- ['ff0000', 'ffffff', 'out of top 10'],
- ]
-
- for i, tk in enumerate(lst):
- if i == len(lst)-1:
- break
-
- token_text = handle_char_return(tk['actual_next_token_text'])
- if token_text == '<|end_of_claim|>':
- break
-
- if token_text == '(none)': # for unicorn text
- break
-
- pick = int(tk['actual_next_token_top_seq'])
- prob = float(tk['actual_next_token_top_prob'])
-
- for j, item in enumerate(colors):
- sum_all[item[2]] = 0
-
- if pick == 0:
- bg_color = colors[0][0]
- fg_color = colors[0][1]
- tag = colors[0][2]
- sum_all[tag] += 1
- elif pick >= 1 and pick < 10:
- bg_color = colors[1][0]
- fg_color = colors[1][1]
- tag = colors[1][2]
- sum_all[tag] += 1
- else: # pick >= 10
- #elif pick >= 10 and pick < 100:
- bg_color = colors[2][0]
- fg_color = colors[2][1]
- tag = colors[2][2]
- sum_all[tag] += 1
-
- if show_pick:
- pick = '[%s]' % pick
- else:
- pick = ''
-
- result += "%s%s " % (bg_color, fg_color, token_text, pick)
-
- color_msg = ''
- for i, v in enumerate(colors):
- color_msg += " %s " % (v[0], v[1], v[2])
-
-
- # sum_pick as top 1~10
- keys_with_auto = (sum_pick+sum_outscope_len)
- keys_without_auto = len(full_text)
- saved_ratio = float(keys_without_auto-keys_with_auto)/keys_without_auto * 100
-
- s = 'model: %s\n' \
- 'Autocomplete Effectiveness: %.1f%% (keystrokes saved)\n' \
- 'Total keystrokes: %s (with autocomplete), %s (without autocomplete)\n' \
- 'Keystroke distribution: rank 1~10: %s (rank 1: %s), out of top 10: %s' % (model_name, saved_ratio, keys_with_auto, keys_without_auto, sum_pick, sum_hit_1, sum_outscope_len)
- st.text(s)
- st.markdown(color_msg, unsafe_allow_html=True)
- st.markdown(result, unsafe_allow_html=True)
- sum_lst = [sum_all['1'], sum_all['2-10'], sum_all['out of top 10']]
-
- return sum_lst
-
-def show_overall_summary(prefix_lst, select_lst):
- for prefix in prefix_lst:
- acc_token_count = 0
- acc_sum_pick = 0
- acc_sum_prob = 0
- acc_sum_outscope_count = 0
- acc_sum_outscope_len = 0
- acc_sum_hit_1 = 0
- acc_sum_top_10_len = 0
- acc_full_text_len = 0
-
- pre_full_text = ''
- for i, num in enumerate(select_lst):
- base_fn = '%s_%s_forward.json' % (prefix, num)
- result, avg_pick, avg_prob, token_count, sum_pick, sum_prob, sum_outscope_count, sum_outscope_len, sum_hit_1, sum_top_10_len, full_text = calc_details(base_fn)
-
- acc_token_count += token_count
- acc_sum_pick += sum_pick
- acc_sum_prob += sum_prob
- acc_sum_outscope_count += sum_outscope_count
- acc_sum_outscope_len += sum_outscope_len
- acc_sum_hit_1 += sum_hit_1
- acc_sum_top_10_len += sum_top_10_len
- acc_full_text_len += len(full_text)
-
- if acc_token_count > 0:
- # acc_sum_pick --> top 1~10
- keys_with_auto = acc_sum_pick + acc_sum_outscope_len
- keys_without_auto = acc_full_text_len
- saved_ratio = float(keys_without_auto-keys_with_auto)/keys_without_auto * 100
-
- st.text('[ %s ]\n' \
- 'Autocomplete Effectiveness: %.1f%% (ratio of saving keystroke)\n' \
- '(sum) keys_with_auto: %s, top_10_keys: %s, out_of_scope: %s, sum_hit_1: %s\n' \
- 'keys_without_auto: %s, top_10_len: %s, prob: %.2f' % (
- model_names[prefix], saved_ratio,
- '{:,}'.format(keys_with_auto),
- '{:,}'.format(acc_sum_pick),
- '{:,}'.format(acc_sum_outscope_len),
- '{:,}'.format(acc_sum_hit_1),
- '{:,}'.format(keys_without_auto),
- '{:,}'.format(acc_sum_top_10_len),
- acc_sum_prob,
- ))
-
- st.text('%s & %.1f\\%% & %s & %s & %s & %s & %s \\\\' % (model_names[prefix], saved_ratio, '{:,}'.format(keys_with_auto), '{:,}'.format(acc_sum_pick), '{:,}'.format(acc_sum_outscope_len), '{:,}'.format(acc_sum_hit_1), '{:,}'.format(keys_without_auto)))
-
- # st.text('* acc_token_count =%s --> (avg) hits: %.2f, keys: %.2f, prob: %.2f, outscope: %.2f' % (
- # acc_token_count,
- # float(acc_sum_hit_1)/acc_token_count,
- # float(acc_sum_pick)/acc_token_count,
- # float(acc_sum_prob)/acc_token_count,
- # float(acc_sum_outscope_count)/acc_token_count))
-
-def main():
- st.set_page_config( # Alternate names: setup_page, page, layout
- layout="wide", # Can be "centered" or "wide". In the future also "dashboard", etc.
- initial_sidebar_state="auto", # Can be "auto", "expanded", "collapsed"
- page_title="Patent-GPT-J demo", # String or None. Strings get appended with "• Streamlit".
- page_icon=None, # String, anything supported by st.image, or None.
- )
- st.subheader("PatentGPT-J Demo 3 (Autocomplete Effectiveness)")
- st.text("Data coverage: unicorn text")
-
- num_set = set()
- fn_lst = glob.glob(os.path.join(folder, '*'))
- for i, fn in enumerate(fn_lst):
- for prefix in prefix_lst:
- v = re.search('(.*?)%s\_(\d+\_\d+)\_(.*?)' % prefix, fn)
- if v is None:
- v = re.search('(.*?)%s\_(\w+\_\d+)\_(.*?)' % prefix, fn)
- if v is None:
- continue
-
- v = v.group(2)
- if first_claim_only:
- if v.endswith('_1'):
- num_set.add(v)
- else:
- num_set.add(v)
-
- num_lst = list(num_set)
- num_lst.sort()
-
- select_lst = []
- for i, num in enumerate(num_lst):
- all_existed = True
- for prefix in prefix_lst:
- fn = os.path.join(folder, '%s_%s_forward.json' % (prefix, num))
- if os.path.exists(fn) == False:
- all_existed = False
- break
- if all_existed:
- select_lst.append(num)
- select_lst.sort()
-
- show_patent_lst = [ s.replace('_', ' (claim ') + ')' for s in select_lst]
- pick = random.randrange(len(select_lst))
- num = select_lst[pick]
-
- #st.text('debug 1')
-
- avgs = []
- for prefix in prefix_lst:
- base_fn = '%s_%s_forward.json' % (prefix, num)
- one_avg = show_avg(base_fn, model_names[prefix], num)
- if one_avg is not None:
- avgs.append(one_avg)
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/pierreguillou/Inference-APP-Document-Understanding-at-paragraphlevel-v3/files/functions.py b/spaces/pierreguillou/Inference-APP-Document-Understanding-at-paragraphlevel-v3/files/functions.py
deleted file mode 100644
index 000da827f1620086af4619cdd5bead042c5e91be..0000000000000000000000000000000000000000
--- a/spaces/pierreguillou/Inference-APP-Document-Understanding-at-paragraphlevel-v3/files/functions.py
+++ /dev/null
@@ -1,967 +0,0 @@
-import os
-
-# workaround: install old version of pytorch since detectron2 hasn't released packages for pytorch 1.9 (issue: https://github.com/facebookresearch/detectron2/issues/3158)
-# os.system('pip install torch==1.8.0+cu101 torchvision==0.9.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html')
-os.system('pip install -q torch==1.10.0+cu111 torchvision==0.11+cu111 -f https://download.pytorch.org/whl/torch_stable.html')
-
-# install detectron2 that matches pytorch 1.8
-# See https://detectron2.readthedocs.io/tutorials/install.html for instructions
-#os.system('pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html')
-os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
-
-import detectron2
-from detectron2.utils.logger import setup_logger
-setup_logger()
-
-import gradio as gr
-import re
-import string
-import torch
-
-from operator import itemgetter
-import collections
-
-import pypdf
-from pypdf import PdfReader
-from pypdf.errors import PdfReadError
-
-import pypdfium2 as pdfium
-import langdetect
-from langdetect import detect_langs
-
-import pandas as pd
-import numpy as np
-import random
-import tempfile
-import itertools
-
-from matplotlib import font_manager
-from PIL import Image, ImageDraw, ImageFont
-import cv2
-
-import pathlib
-from pathlib import Path
-import shutil
-
-from functools import partial
-
-# Tesseract
-print(os.popen(f'cat /etc/debian_version').read())
-print(os.popen(f'cat /etc/issue').read())
-print(os.popen(f'apt search tesseract').read())
-import pytesseract
-
-## Key parameters
-
-# categories colors
-label2color = {
- 'Caption': 'brown',
- 'Footnote': 'orange',
- 'Formula': 'gray',
- 'List-item': 'yellow',
- 'Page-footer': 'red',
- 'Page-header': 'red',
- 'Picture': 'violet',
- 'Section-header': 'orange',
- 'Table': 'green',
- 'Text': 'blue',
- 'Title': 'pink'
- }
-
-# bounding boxes start and end of a sequence
-cls_box = [0, 0, 0, 0]
-cls_box1, cls_box2 = cls_box, cls_box
-
-sep_box_lilt = cls_box
-sep_box1 = sep_box_lilt
-
-sep_box_layoutxlm = [1000, 1000, 1000, 1000]
-sep_box2 = sep_box_layoutxlm
-
-# models
-model_id_lilt = "pierreguillou/lilt-xlm-roberta-base-finetuned-with-DocLayNet-base-at-paragraphlevel-ml512"
-model_id1 = model_id_lilt
-model_id_layoutxlm = "pierreguillou/layout-xlm-base-finetuned-with-DocLayNet-base-at-paragraphlevel-ml512"
-model_id2 = model_id_layoutxlm
-
-# tokenizer for LayoutXLM
-tokenizer_id_layoutxlm = "xlm-roberta-base"
-
-# (tokenization) The maximum length of a feature (sequence)
-if (str(384) in model_id_lilt) and (str(384) in model_id_layoutxlm):
- max_length = 384
-elif (str(512) in model_id_lilt) and (str(512) in model_id_layoutxlm):
- max_length = 512
-else:
- print("Error with max_length of chunks!")
-
-# (tokenization) overlap
-doc_stride = 128 # The authorized overlap between two part of the context when splitting it is needed.
-
-# max PDF page images that will be displayed
-max_imgboxes = 2
-
-# get files
-examples_dir = 'files/'
-Path(examples_dir).mkdir(parents=True, exist_ok=True)
-from huggingface_hub import hf_hub_download
-files = ["example.pdf", "blank.pdf", "blank.png", "languages_iso.csv", "languages_tesseract.csv", "wo_content.png"]
-for file_name in files:
- path_to_file = hf_hub_download(
- repo_id = "pierreguillou/Inference-APP-Document-Understanding-at-paragraphlevel-v3",
- filename = "files/" + file_name,
- repo_type = "space"
- )
- shutil.copy(path_to_file,examples_dir)
-
-# path to files
-image_wo_content = examples_dir + "wo_content.png" # image without content
-pdf_blank = examples_dir + "blank.pdf" # blank PDF
-image_blank = examples_dir + "blank.png" # blank image
-
-## get langdetect2Tesseract dictionary
-t = "files/languages_tesseract.csv"
-l = "files/languages_iso.csv"
-
-df_t = pd.read_csv(t)
-df_l = pd.read_csv(l)
-
-langs_t = df_t["Language"].to_list()
-langs_t = [lang_t.lower().strip().translate(str.maketrans('', '', string.punctuation)) for lang_t in langs_t]
-langs_l = df_l["Language"].to_list()
-langs_l = [lang_l.lower().strip().translate(str.maketrans('', '', string.punctuation)) for lang_l in langs_l]
-langscode_t = df_t["LangCode"].to_list()
-langscode_l = df_l["LangCode"].to_list()
-
-Tesseract2langdetect, langdetect2Tesseract = dict(), dict()
-for lang_t, langcode_t in zip(langs_t,langscode_t):
- try:
- if lang_t == "Chinese - Simplified".lower().strip().translate(str.maketrans('', '', string.punctuation)): lang_t = "chinese"
- index = langs_l.index(lang_t)
- langcode_l = langscode_l[index]
- Tesseract2langdetect[langcode_t] = langcode_l
- except:
- continue
-
-langdetect2Tesseract = {v:k for k,v in Tesseract2langdetect.items()}
-
-
-## model / feature extractor / tokenizer
-
-# get device
-import torch
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-## model LiLT
-import transformers
-from transformers import AutoTokenizer, AutoModelForTokenClassification
-tokenizer_lilt = AutoTokenizer.from_pretrained(model_id_lilt)
-model_lilt = AutoModelForTokenClassification.from_pretrained(model_id_lilt);
-model_lilt.to(device);
-
-tokenizer1 = tokenizer_lilt
-model1 = model_lilt
-
-## model LayoutXLM
-from transformers import LayoutLMv2ForTokenClassification # LayoutXLMTokenizerFast,
-model_layoutxlm = LayoutLMv2ForTokenClassification.from_pretrained(model_id_layoutxlm);
-model_layoutxlm.to(device);
-
-# feature extractor
-from transformers import LayoutLMv2FeatureExtractor
-feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
-
-# tokenizer
-from transformers import AutoTokenizer
-tokenizer_layoutxlm = AutoTokenizer.from_pretrained(tokenizer_id_layoutxlm)
-
-tokenizer2 = tokenizer_layoutxlm
-model2 = model_layoutxlm
-
-
-# General
-
-# get text and bounding boxes from an image
-# https://stackoverflow.com/questions/61347755/how-can-i-get-line-coordinates-that-readed-by-tesseract
-# https://medium.com/geekculture/tesseract-ocr-understanding-the-contents-of-documents-beyond-their-text-a98704b7c655
-def get_data_paragraph(results, factor, conf_min=0):
-
- data = {}
- for i in range(len(results['line_num'])):
- level = results['level'][i]
- block_num = results['block_num'][i]
- par_num = results['par_num'][i]
- line_num = results['line_num'][i]
- top, left = results['top'][i], results['left'][i]
- width, height = results['width'][i], results['height'][i]
- conf = results['conf'][i]
- text = results['text'][i]
- if not (text == '' or text.isspace()):
- if conf >= conf_min:
- tup = (text, left, top, width, height)
- if block_num in list(data.keys()):
- if par_num in list(data[block_num].keys()):
- if line_num in list(data[block_num][par_num].keys()):
- data[block_num][par_num][line_num].append(tup)
- else:
- data[block_num][par_num][line_num] = [tup]
- else:
- data[block_num][par_num] = {}
- data[block_num][par_num][line_num] = [tup]
- else:
- data[block_num] = {}
- data[block_num][par_num] = {}
- data[block_num][par_num][line_num] = [tup]
-
- # get paragraphs dicionnary with list of lines
- par_data = {}
- par_idx = 1
- for _, b in data.items():
- for _, p in b.items():
- line_data = {}
- line_idx = 1
- for _, l in p.items():
- line_data[line_idx] = l
- line_idx += 1
- par_data[par_idx] = line_data
- par_idx += 1
-
- # get lines of texts, grouped by paragraph
- texts_pars = list()
- row_indexes = list()
- texts_lines = list()
- texts_lines_par = list()
- row_index = 0
- for _,par in par_data.items():
- count_lines = 0
- lines_par = list()
- for _,line in par.items():
- if count_lines == 0: row_indexes.append(row_index)
- line_text = ' '.join([item[0] for item in line])
- texts_lines.append(line_text)
- lines_par.append(line_text)
- count_lines += 1
- row_index += 1
- # lines.append("\n")
- row_index += 1
- texts_lines_par.append(lines_par)
- texts_pars.append(' '.join(lines_par))
- # lines = lines[:-1]
-
- # get paragraphes boxes (par_boxes)
- # get lines boxes (line_boxes)
- par_boxes = list()
- par_idx = 1
- line_boxes, lines_par_boxes = list(), list()
- line_idx = 1
- for _, par in par_data.items():
- xmins, ymins, xmaxs, ymaxs = list(), list(), list(), list()
- line_boxes_par = list()
- count_line_par = 0
- for _, line in par.items():
- xmin, ymin = line[0][1], line[0][2]
- xmax, ymax = (line[-1][1] + line[-1][3]), (line[-1][2] + line[-1][4])
- line_boxes.append([int(xmin/factor), int(ymin/factor), int(xmax/factor), int(ymax/factor)])
- line_boxes_par.append([int(xmin/factor), int(ymin/factor), int(xmax/factor), int(ymax/factor)])
- xmins.append(xmin)
- ymins.append(ymin)
- xmaxs.append(xmax)
- ymaxs.append(ymax)
- line_idx += 1
- count_line_par += 1
- xmin, ymin, xmax, ymax = min(xmins), min(ymins), max(xmaxs), max(ymaxs)
- par_bbox = [int(xmin/factor), int(ymin/factor), int(xmax/factor), int(ymax/factor)]
- par_boxes.append(par_bbox)
- lines_par_boxes.append(line_boxes_par)
- par_idx += 1
-
- return texts_lines, texts_pars, texts_lines_par, row_indexes, par_boxes, line_boxes, lines_par_boxes
-
-# rescale image to get 300dpi
-def set_image_dpi_resize(image):
- """
- Rescaling image to 300dpi while resizing
- :param image: An image
- :return: A rescaled image
- """
- length_x, width_y = image.size
- factor = min(1, float(1024.0 / length_x))
- size = int(factor * length_x), int(factor * width_y)
- # image_resize = image.resize(size, Image.Resampling.LANCZOS)
- image_resize = image.resize(size, Image.LANCZOS)
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='1.png')
- temp_filename = temp_file.name
- image_resize.save(temp_filename, dpi=(300, 300))
- return factor, temp_filename
-
-# it is important that each bounding box should be in (upper left, lower right) format.
-# source: https://github.com/NielsRogge/Transformers-Tutorials/issues/129
-def upperleft_to_lowerright(bbox):
- x0, y0, x1, y1 = tuple(bbox)
- if bbox[2] < bbox[0]:
- x0 = bbox[2]
- x1 = bbox[0]
- if bbox[3] < bbox[1]:
- y0 = bbox[3]
- y1 = bbox[1]
- return [x0, y0, x1, y1]
-
-# convert boundings boxes (left, top, width, height) format to (left, top, left+widght, top+height) format.
-def convert_box(bbox):
- x, y, w, h = tuple(bbox) # the row comes in (left, top, width, height) format
- return [x, y, x+w, y+h] # we turn it into (left, top, left+widght, top+height) to get the actual box
-
-# LiLT model gets 1000x10000 pixels images
-def normalize_box(bbox, width, height):
- return [
- int(1000 * (bbox[0] / width)),
- int(1000 * (bbox[1] / height)),
- int(1000 * (bbox[2] / width)),
- int(1000 * (bbox[3] / height)),
- ]
-
-# LiLT model gets 1000x10000 pixels images
-def denormalize_box(bbox, width, height):
- return [
- int(width * (bbox[0] / 1000)),
- int(height * (bbox[1] / 1000)),
- int(width* (bbox[2] / 1000)),
- int(height * (bbox[3] / 1000)),
- ]
-
-# get back original size
-def original_box(box, original_width, original_height, coco_width, coco_height):
- return [
- int(original_width * (box[0] / coco_width)),
- int(original_height * (box[1] / coco_height)),
- int(original_width * (box[2] / coco_width)),
- int(original_height* (box[3] / coco_height)),
- ]
-
-def get_blocks(bboxes_block, categories, texts):
-
- # get list of unique block boxes
- bbox_block_dict, bboxes_block_list, bbox_block_prec = dict(), list(), list()
- for count_block, bbox_block in enumerate(bboxes_block):
- if bbox_block != bbox_block_prec:
- bbox_block_indexes = [i for i, bbox in enumerate(bboxes_block) if bbox == bbox_block]
- bbox_block_dict[count_block] = bbox_block_indexes
- bboxes_block_list.append(bbox_block)
- bbox_block_prec = bbox_block
-
- # get list of categories and texts by unique block boxes
- category_block_list, text_block_list = list(), list()
- for bbox_block in bboxes_block_list:
- count_block = bboxes_block.index(bbox_block)
- bbox_block_indexes = bbox_block_dict[count_block]
- category_block = np.array(categories, dtype=object)[bbox_block_indexes].tolist()[0]
- category_block_list.append(category_block)
- text_block = np.array(texts, dtype=object)[bbox_block_indexes].tolist()
- text_block = [text.replace("\n","").strip() for text in text_block]
- if id2label[category_block] == "Text" or id2label[category_block] == "Caption" or id2label[category_block] == "Footnote":
- text_block = ' '.join(text_block)
- else:
- text_block = '\n'.join(text_block)
- text_block_list.append(text_block)
-
- return bboxes_block_list, category_block_list, text_block_list
-
-# function to sort bounding boxes
-def get_sorted_boxes(bboxes):
-
- # sort by y from page top to bottom
- sorted_bboxes = sorted(bboxes, key=itemgetter(1), reverse=False)
- y_list = [bbox[1] for bbox in sorted_bboxes]
-
- # sort by x from page left to right when boxes with same y
- if len(list(set(y_list))) != len(y_list):
- y_list_duplicates_indexes = dict()
- y_list_duplicates = [item for item, count in collections.Counter(y_list).items() if count > 1]
- for item in y_list_duplicates:
- y_list_duplicates_indexes[item] = [i for i, e in enumerate(y_list) if e == item]
- bbox_list_y_duplicates = sorted(np.array(sorted_bboxes, dtype=object)[y_list_duplicates_indexes[item]].tolist(), key=itemgetter(0), reverse=False)
- np_array_bboxes = np.array(sorted_bboxes)
- np_array_bboxes[y_list_duplicates_indexes[item]] = np.array(bbox_list_y_duplicates)
- sorted_bboxes = np_array_bboxes.tolist()
-
- return sorted_bboxes
-
-# sort data from y = 0 to end of page (and after, x=0 to end of page when necessary)
-def sort_data(bboxes, categories, texts):
-
- sorted_bboxes = get_sorted_boxes(bboxes)
- sorted_bboxes_indexes = [bboxes.index(bbox) for bbox in sorted_bboxes]
- sorted_categories = np.array(categories, dtype=object)[sorted_bboxes_indexes].tolist()
- sorted_texts = np.array(texts, dtype=object)[sorted_bboxes_indexes].tolist()
-
- return sorted_bboxes, sorted_categories, sorted_texts
-
-# sort data from y = 0 to end of page (and after, x=0 to end of page when necessary)
-def sort_data_wo_labels(bboxes, texts):
-
- sorted_bboxes = get_sorted_boxes(bboxes)
- sorted_bboxes_indexes = [bboxes.index(bbox) for bbox in sorted_bboxes]
- sorted_texts = np.array(texts, dtype=object)[sorted_bboxes_indexes].tolist()
-
- return sorted_bboxes, sorted_texts
-
-
-# PDF processing
-
-# get filename and images of PDF pages
-def pdf_to_images(uploaded_pdf):
-
- # Check if None object
- if uploaded_pdf is None:
- path_to_file = pdf_blank
- filename = path_to_file.replace(examples_dir,"")
- msg = "Invalid PDF file."
- images = [Image.open(image_blank)]
- else:
- # path to the uploaded PDF
- path_to_file = uploaded_pdf.name
- filename = path_to_file# .replace("/tmp/","")
-
- try:
- PdfReader(path_to_file)
- except PdfReadError:
- path_to_file = pdf_blank
- filename = path_to_file.replace(examples_dir,"")
- msg = "Invalid PDF file."
- images = [Image.open(image_blank)]
- else:
- try:
- # images = convert_from_path(path_to_file, last_page=max_imgboxes)
-
- pdf = pdfium.PdfDocument(str(filename))
- version = pdf.get_version() # get the PDF standard version
- n_pages = len(pdf) # get the number of pages in the document
- last_page = max_imgboxes
- page_indices = [i for i in range(last_page)] # pages until last_page
- images = list(pdf.render(
- pdfium.PdfBitmap.to_pil,
- page_indices = page_indices,
- scale = 300/72, # 300dpi resolution
- ))
-
- num_imgs = len(images)
- msg = f'The PDF "{filename}" was converted into {num_imgs} images.'
- except:
- msg = f'Error with the PDF "{filename}": it was not converted into images.'
- images = [Image.open(image_wo_content)]
-
- return filename, msg, images
-
-# Extraction of image data (text and bounding boxes)
-def extraction_data_from_image(images):
-
- num_imgs = len(images)
-
- if num_imgs > 0:
-
- # https://pyimagesearch.com/2021/11/15/tesseract-page-segmentation-modes-psms-explained-how-to-improve-your-ocr-accuracy/
- custom_config = r'--oem 3 --psm 3 -l eng' # default config PyTesseract: --oem 3 --psm 3 -l eng+deu+fra+jpn+por+spa+rus+hin+chi_sim
- results, texts_lines, texts_pars, texts_lines_par, row_indexes, par_boxes, line_boxes, lines_par_boxes, images_pixels = dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict()
- images_ids_list, texts_lines_list, texts_pars_list, texts_lines_par_list, par_boxes_list, line_boxes_list, lines_par_boxes_list, images_list, images_pixels_list, page_no_list, num_pages_list = list(), list(), list(), list(), list(), list(), list(), list(), list(), list(), list()
-
- try:
- for i,image in enumerate(images):
- # image preprocessing
- # https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.html
- img = image.copy()
- factor, path_to_img = set_image_dpi_resize(img) # Rescaling to 300dpi
- img = Image.open(path_to_img)
- img = np.array(img, dtype='uint8') # convert PIL to cv2
- img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # gray scale image
- ret,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
-
- # OCR PyTesseract | get langs of page
- txt = pytesseract.image_to_string(img, config=custom_config)
- txt = txt.strip().lower()
- txt = re.sub(r" +", " ", txt) # multiple space
- txt = re.sub(r"(\n\s*)+\n+", "\n", txt) # multiple line
- # txt = os.popen(f'tesseract {img_filepath} - {custom_config}').read()
- try:
- langs = detect_langs(txt)
- langs = [langdetect2Tesseract[langs[i].lang] for i in range(len(langs))]
- langs_string = '+'.join(langs)
- except:
- langs_string = "eng"
- langs_string += '+osd'
- custom_config = f'--oem 3 --psm 3 -l {langs_string}' # default config PyTesseract: --oem 3 --psm 3
-
- # OCR PyTesseract | get data
- results[i] = pytesseract.image_to_data(img, config=custom_config, output_type=pytesseract.Output.DICT)
- # results[i] = os.popen(f'tesseract {img_filepath} - {custom_config}').read()
-
- # get image pixels
- images_pixels[i] = feature_extractor(images[i], return_tensors="pt").pixel_values
-
- texts_lines[i], texts_pars[i], texts_lines_par[i], row_indexes[i], par_boxes[i], line_boxes[i], lines_par_boxes[i] = get_data_paragraph(results[i], factor, conf_min=0)
- texts_lines_list.append(texts_lines[i])
- texts_pars_list.append(texts_pars[i])
- texts_lines_par_list.append(texts_lines_par[i])
- par_boxes_list.append(par_boxes[i])
- line_boxes_list.append(line_boxes[i])
- lines_par_boxes_list.append(lines_par_boxes[i])
- images_ids_list.append(i)
- images_pixels_list.append(images_pixels[i])
- images_list.append(images[i])
- page_no_list.append(i)
- num_pages_list.append(num_imgs)
-
- except:
- print(f"There was an error within the extraction of PDF text by the OCR!")
- else:
- from datasets import Dataset
- dataset = Dataset.from_dict({"images_ids": images_ids_list, "images": images_list, "images_pixels": images_pixels_list, "page_no": page_no_list, "num_pages": num_pages_list, "texts_line": texts_lines_list, "texts_par": texts_pars_list, "texts_lines_par": texts_lines_par_list, "bboxes_par": par_boxes_list, "bboxes_lines_par":lines_par_boxes_list})
-
- # print(f"The text data was successfully extracted by the OCR!")
-
- return dataset, texts_lines, texts_pars, texts_lines_par, row_indexes, par_boxes, line_boxes, lines_par_boxes
-
-def prepare_inference_features_paragraph(example, tokenizer, max_length, cls_box, sep_box):
-
- images_ids_list, chunks_ids_list, input_ids_list, attention_mask_list, bb_list, images_pixels_list = list(), list(), list(), list(), list(), list()
-
- # get batch
- # batch_page_hash = example["page_hash"]
- batch_images_ids = example["images_ids"]
- batch_images = example["images"]
- batch_images_pixels = example["images_pixels"]
- batch_bboxes_par = example["bboxes_par"]
- batch_texts_par = example["texts_par"]
- batch_images_size = [image.size for image in batch_images]
-
- batch_width, batch_height = [image_size[0] for image_size in batch_images_size], [image_size[1] for image_size in batch_images_size]
-
- # add a dimension if not a batch but only one image
- if not isinstance(batch_images_ids, list):
- batch_images_ids = [batch_images_ids]
- batch_images = [batch_images]
- batch_images_pixels = [batch_images_pixels]
- batch_bboxes_par = [batch_bboxes_par]
- batch_texts_par = [batch_texts_par]
- batch_width, batch_height = [batch_width], [batch_height]
-
- # process all images of the batch
- for num_batch, (image_id, image_pixels, boxes, texts_par, width, height) in enumerate(zip(batch_images_ids, batch_images_pixels, batch_bboxes_par, batch_texts_par, batch_width, batch_height)):
- tokens_list = []
- bboxes_list = []
-
- # add a dimension if only on image
- if not isinstance(texts_par, list):
- texts_par, boxes = [texts_par], [boxes]
-
- # convert boxes to original
- normalize_bboxes_par = [normalize_box(upperleft_to_lowerright(box), width, height) for box in boxes]
-
- # sort boxes with texts
- # we want sorted lists from top to bottom of the image
- boxes, texts_par = sort_data_wo_labels(normalize_bboxes_par, texts_par)
-
- count = 0
- for box, text_par in zip(boxes, texts_par):
- tokens_par = tokenizer.tokenize(text_par)
- num_tokens_par = len(tokens_par) # get number of tokens
- tokens_list.extend(tokens_par)
- bboxes_list.extend([box] * num_tokens_par) # number of boxes must be the same as the number of tokens
-
- # use of return_overflowing_tokens=True / stride=doc_stride
- # to get parts of image with overlap
- # source: https://huggingface.co/course/chapter6/3b?fw=tf#handling-long-contexts
- encodings = tokenizer(" ".join(texts_par),
- truncation=True,
- padding="max_length",
- max_length=max_length,
- stride=doc_stride,
- return_overflowing_tokens=True,
- return_offsets_mapping=True
- )
-
- otsm = encodings.pop("overflow_to_sample_mapping")
- offset_mapping = encodings.pop("offset_mapping")
-
- # Let's label those examples and get their boxes
- sequence_length_prev = 0
- for i, offsets in enumerate(offset_mapping):
- # truncate tokens, boxes and labels based on length of chunk - 2 (special tokens and )
- sequence_length = len(encodings.input_ids[i]) - 2
- if i == 0: start = 0
- else: start += sequence_length_prev - doc_stride
- end = start + sequence_length
- sequence_length_prev = sequence_length
-
- # get tokens, boxes and labels of this image chunk
- bb = [cls_box] + bboxes_list[start:end] + [sep_box]
-
- # as the last chunk can have a length < max_length
- # we must to add [tokenizer.pad_token] (tokens), [sep_box] (boxes) and [-100] (labels)
- if len(bb) < max_length:
- bb = bb + [sep_box] * (max_length - len(bb))
-
- # append results
- input_ids_list.append(encodings["input_ids"][i])
- attention_mask_list.append(encodings["attention_mask"][i])
- bb_list.append(bb)
- images_ids_list.append(image_id)
- chunks_ids_list.append(i)
- images_pixels_list.append(image_pixels)
-
- return {
- "images_ids": images_ids_list,
- "chunk_ids": chunks_ids_list,
- "input_ids": input_ids_list,
- "attention_mask": attention_mask_list,
- "normalized_bboxes": bb_list,
- "images_pixels": images_pixels_list
- }
-
-from torch.utils.data import Dataset
-
-class CustomDataset(Dataset):
- def __init__(self, dataset, tokenizer):
- self.dataset = dataset
- self.tokenizer = tokenizer
-
- def __len__(self):
- return len(self.dataset)
-
- def __getitem__(self, idx):
- # get item
- example = self.dataset[idx]
- encoding = dict()
- encoding["images_ids"] = example["images_ids"]
- encoding["chunk_ids"] = example["chunk_ids"]
- encoding["input_ids"] = example["input_ids"]
- encoding["attention_mask"] = example["attention_mask"]
- encoding["bbox"] = example["normalized_bboxes"]
- encoding["images_pixels"] = example["images_pixels"]
-
- return encoding
-
-import torch.nn.functional as F
-
-# get predictions at token level
-def predictions_token_level(images, custom_encoded_dataset, model_id, model):
-
- num_imgs = len(images)
- if num_imgs > 0:
-
- chunk_ids, input_ids, bboxes, pixels_values, outputs, token_predictions = dict(), dict(), dict(), dict(), dict(), dict()
- images_ids_list = list()
-
- for i,encoding in enumerate(custom_encoded_dataset):
-
- # get custom encoded data
- image_id = encoding['images_ids']
- chunk_id = encoding['chunk_ids']
- input_id = torch.tensor(encoding['input_ids'])[None]
- attention_mask = torch.tensor(encoding['attention_mask'])[None]
- bbox = torch.tensor(encoding['bbox'])[None]
- pixel_values = torch.tensor(encoding["images_pixels"])
-
- # save data in dictionnaries
- if image_id not in images_ids_list: images_ids_list.append(image_id)
-
- if image_id in chunk_ids: chunk_ids[image_id].append(chunk_id)
- else: chunk_ids[image_id] = [chunk_id]
-
- if image_id in input_ids: input_ids[image_id].append(input_id)
- else: input_ids[image_id] = [input_id]
-
- if image_id in bboxes: bboxes[image_id].append(bbox)
- else: bboxes[image_id] = [bbox]
-
- if image_id in pixels_values: pixels_values[image_id].append(pixel_values)
- else: pixels_values[image_id] = [pixel_values]
-
- # get prediction with forward pass
- with torch.no_grad():
-
- if model_id == model_id_lilt:
- output = model(
- input_ids=input_id.to(device),
- attention_mask=attention_mask.to(device),
- bbox=bbox.to(device),
- )
- elif model_id == model_id_layoutxlm:
- output = model(
- input_ids=input_id.to(device),
- attention_mask=attention_mask.to(device),
- bbox=bbox.to(device),
- image=pixel_values.to(device)
- )
-
- # save probabilities of predictions in dictionnary
- if image_id in outputs: outputs[image_id].append(F.softmax(output.logits.squeeze(), dim=-1))
- else: outputs[image_id] = [F.softmax(output.logits.squeeze(), dim=-1)]
-
- return outputs, images_ids_list, chunk_ids, input_ids, bboxes
-
- else:
- print("An error occurred while getting predictions!")
-
-from functools import reduce
-
-# Get predictions (paragraph level)
-def predictions_probs_paragraph_level(max_length, tokenizer, id2label, dataset, outputs, images_ids_list, chunk_ids, input_ids, bboxes, cls_box, sep_box):
-
- ten_probs_dict, ten_input_ids_dict, ten_bboxes_dict = dict(), dict(), dict()
- bboxes_list_dict, input_ids_dict_dict, probs_dict_dict, df = dict(), dict(), dict(), dict()
-
- if len(images_ids_list) > 0:
-
- for i, image_id in enumerate(images_ids_list):
-
- # get image information
- images_list = dataset.filter(lambda example: example["images_ids"] == image_id)["images"]
- image = images_list[0]
- width, height = image.size
-
- # get data
- chunk_ids_list = chunk_ids[image_id]
- outputs_list = outputs[image_id]
- input_ids_list = input_ids[image_id]
- bboxes_list = bboxes[image_id]
-
- # create zeros tensors
- ten_probs = torch.zeros((outputs_list[0].shape[0] - 2)*len(outputs_list), outputs_list[0].shape[1])
- ten_input_ids = torch.ones(size=(1, (outputs_list[0].shape[0] - 2)*len(outputs_list)), dtype =int)
- ten_bboxes = torch.zeros(size=(1, (outputs_list[0].shape[0] - 2)*len(outputs_list), 4), dtype =int)
-
- if len(outputs_list) > 1:
-
- for num_output, (output, input_id, bbox) in enumerate(zip(outputs_list, input_ids_list, bboxes_list)):
- start = num_output*(max_length - 2) - max(0,num_output)*doc_stride
- end = start + (max_length - 2)
-
- if num_output == 0:
- ten_probs[start:end,:] += output[1:-1]
- ten_input_ids[:,start:end] = input_id[:,1:-1]
- ten_bboxes[:,start:end,:] = bbox[:,1:-1,:]
- else:
- ten_probs[start:start + doc_stride,:] += output[1:1 + doc_stride]
- ten_probs[start:start + doc_stride,:] = ten_probs[start:start + doc_stride,:] * 0.5
- ten_probs[start + doc_stride:end,:] += output[1 + doc_stride:-1]
-
- ten_input_ids[:,start:start + doc_stride] = input_id[:,1:1 + doc_stride]
- ten_input_ids[:,start + doc_stride:end] = input_id[:,1 + doc_stride:-1]
-
- ten_bboxes[:,start:start + doc_stride,:] = bbox[:,1:1 + doc_stride,:]
- ten_bboxes[:,start + doc_stride:end,:] = bbox[:,1 + doc_stride:-1,:]
-
- else:
- ten_probs += outputs_list[0][1:-1]
- ten_input_ids = input_ids_list[0][:,1:-1]
- ten_bboxes = bboxes_list[0][:,1:-1]
-
- ten_probs_list, ten_input_ids_list, ten_bboxes_list = ten_probs.tolist(), ten_input_ids.tolist()[0], ten_bboxes.tolist()[0]
- bboxes_list = list()
- input_ids_dict, probs_dict = dict(), dict()
- bbox_prev = [-100, -100, -100, -100]
- for probs, input_id, bbox in zip(ten_probs_list, ten_input_ids_list, ten_bboxes_list):
- bbox = denormalize_box(bbox, width, height)
- if bbox != bbox_prev and bbox != cls_box and bbox != sep_box and bbox[0] != bbox[2] and bbox[1] != bbox[3]:
- bboxes_list.append(bbox)
- input_ids_dict[str(bbox)] = [input_id]
- probs_dict[str(bbox)] = [probs]
- elif bbox != cls_box and bbox != sep_box and bbox[0] != bbox[2] and bbox[1] != bbox[3]:
- input_ids_dict[str(bbox)].append(input_id)
- probs_dict[str(bbox)].append(probs)
- bbox_prev = bbox
-
- probs_bbox = dict()
- for i,bbox in enumerate(bboxes_list):
- probs = probs_dict[str(bbox)]
- probs = np.array(probs).T.tolist()
-
- probs_label = list()
- for probs_list in probs:
- prob_label = reduce(lambda x, y: x*y, probs_list)
- prob_label = prob_label**(1./(len(probs_list))) # normalization
- probs_label.append(prob_label)
- # max_value = max(probs_label)
- # max_index = probs_label.index(max_value)
- # probs_bbox[str(bbox)] = max_index
- probs_bbox[str(bbox)] = probs_label
-
- bboxes_list_dict[image_id] = bboxes_list
- input_ids_dict_dict[image_id] = input_ids_dict
- probs_dict_dict[image_id] = probs_bbox
-
- # df[image_id] = pd.DataFrame()
- # df[image_id]["bboxes"] = bboxes_list
- # df[image_id]["texts"] = [tokenizer.decode(input_ids_dict[str(bbox)]) for bbox in bboxes_list]
- # df[image_id]["labels"] = [id2label[probs_bbox[str(bbox)]] for bbox in bboxes_list]
-
- return probs_bbox, bboxes_list_dict, input_ids_dict_dict, probs_dict_dict #, df
-
- else:
- print("An error occurred while getting predictions!")
-
-from functools import reduce
-
-# Get predictions (paragraph level)
-def predictions_paragraph_level(max_length, tokenizer1, id2label, dataset, outputs1, images_ids_list1, chunk_ids1, input_ids1, bboxes1, cls_box1, sep_box1, tokenizer2, outputs2, images_ids_list2, chunk_ids2, input_ids2, bboxes2, cls_box2, sep_box2):
-
- bboxes_list_dict, input_ids_dict_dict, probs_dict_dict, df = dict(), dict(), dict(), dict()
-
- probs_bbox1, bboxes_list_dict1, input_ids_dict_dict1, probs_dict_dict1 = predictions_probs_paragraph_level(max_length, tokenizer1, id2label, dataset, outputs1, images_ids_list1, chunk_ids1, input_ids1, bboxes1, cls_box1, sep_box1)
- probs_bbox2, bboxes_list_dict2, input_ids_dict_dict2, probs_dict_dict2 = predictions_probs_paragraph_level(max_length, tokenizer2, id2label, dataset, outputs2, images_ids_list2, chunk_ids2, input_ids2, bboxes2, cls_box2, sep_box2)
-
- if len(images_ids_list1) > 0:
-
- for i, image_id in enumerate(images_ids_list1):
-
- bboxes_list1 = bboxes_list_dict1[image_id]
- input_ids_dict1 = input_ids_dict_dict1[image_id]
- probs_bbox1 = probs_dict_dict1[image_id]
-
- bboxes_list2 = bboxes_list_dict2[image_id]
- input_ids_dict2 = input_ids_dict_dict2[image_id]
- probs_bbox2 = probs_dict_dict2[image_id]
-
- probs_bbox = dict()
- for bbox in bboxes_list1:
- prob_bbox = [(p1+p2)/2 for p1,p2 in zip(probs_bbox1[str(bbox)], probs_bbox2[str(bbox)])]
- max_value = max(prob_bbox)
- max_index = prob_bbox.index(max_value)
- probs_bbox[str(bbox)] = max_index
-
- bboxes_list_dict[image_id] = bboxes_list1
- input_ids_dict_dict[image_id] = input_ids_dict1
- probs_dict_dict[image_id] = probs_bbox
-
- df[image_id] = pd.DataFrame()
- df[image_id]["bboxes"] = bboxes_list1
- df[image_id]["texts"] = [tokenizer1.decode(input_ids_dict1[str(bbox)]) for bbox in bboxes_list1]
- df[image_id]["labels"] = [id2label[probs_bbox[str(bbox)]] for bbox in bboxes_list1]
-
- return bboxes_list_dict, input_ids_dict_dict, probs_dict_dict, df
-
- else:
- print("An error occurred while getting predictions!")
-
-
-# Get labeled images with lines bounding boxes
-def get_labeled_images(id2label, dataset, images_ids_list, bboxes_list_dict, probs_dict_dict):
-
- labeled_images = list()
-
- for i, image_id in enumerate(images_ids_list):
-
- # get image
- images_list = dataset.filter(lambda example: example["images_ids"] == image_id)["images"]
- image = images_list[0]
- width, height = image.size
-
- # get predicted boxes and labels
- bboxes_list = bboxes_list_dict[image_id]
- probs_bbox = probs_dict_dict[image_id]
-
- draw = ImageDraw.Draw(image)
- # https://stackoverflow.com/questions/66274858/choosing-a-pil-imagefont-by-font-name-rather-than-filename-and-cross-platform-f
- font = font_manager.FontProperties(family='sans-serif', weight='bold')
- font_file = font_manager.findfont(font)
- font_size = 30
- font = ImageFont.truetype(font_file, font_size)
-
- for bbox in bboxes_list:
- predicted_label = id2label[probs_bbox[str(bbox)]]
- draw.rectangle(bbox, outline=label2color[predicted_label])
- draw.text((bbox[0] + 10, bbox[1] - font_size), text=predicted_label, fill=label2color[predicted_label], font=font)
-
- labeled_images.append(image)
-
- return labeled_images
-
-# get data of encoded chunk
-def get_encoded_chunk_inference(tokenizer, dataset, encoded_dataset, index_chunk=None):
-
- # get datasets
- example = dataset
- encoded_example = encoded_dataset
-
- # get randomly a document in dataset
- if index_chunk == None: index_chunk = random.randint(0, len(encoded_example)-1)
- encoded_example = encoded_example[index_chunk]
- encoded_image_ids = encoded_example["images_ids"]
-
- # get the image
- example = example.filter(lambda example: example["images_ids"] == encoded_image_ids)[0]
- image = example["images"] # original image
- width, height = image.size
- page_no = example["page_no"]
- num_pages = example["num_pages"]
-
- # get boxes, texts, categories
- bboxes, input_ids = encoded_example["normalized_bboxes"][1:-1], encoded_example["input_ids"][1:-1]
- bboxes = [denormalize_box(bbox, width, height) for bbox in bboxes]
- num_tokens = len(input_ids) + 2
-
- # get unique bboxes and corresponding labels
- bboxes_list, input_ids_list = list(), list()
- input_ids_dict = dict()
- bbox_prev = [-100, -100, -100, -100]
- for i, (bbox, input_id) in enumerate(zip(bboxes, input_ids)):
- if bbox != bbox_prev:
- bboxes_list.append(bbox)
- input_ids_dict[str(bbox)] = [input_id]
- else:
- input_ids_dict[str(bbox)].append(input_id)
-
- # start_indexes_list.append(i)
- bbox_prev = bbox
-
- # do not keep "..."
- if input_ids_dict[str(bboxes_list[-1])][0] == (tokenizer.convert_tokens_to_ids('')):
- del input_ids_dict[str(bboxes_list[-1])]
- bboxes_list = bboxes_list[:-1]
-
- # get texts by line
- input_ids_list = input_ids_dict.values()
- texts_list = [tokenizer.decode(input_ids) for input_ids in input_ids_list]
-
- # display DataFrame
- df = pd.DataFrame({"texts": texts_list, "input_ids": input_ids_list, "bboxes": bboxes_list})
-
- return image, df, num_tokens, page_no, num_pages
-
-# display chunk of PDF image and its data
-def display_chunk_lines_inference(dataset, encoded_dataset, index_chunk=None):
-
- # get image and image data
- image, df, num_tokens, page_no, num_pages = get_encoded_chunk_inference(dataset, encoded_dataset, index_chunk=index_chunk)
-
- # get data from dataframe
- input_ids = df["input_ids"]
- texts = df["texts"]
- bboxes = df["bboxes"]
-
- print(f'Chunk ({num_tokens} tokens) of the PDF (page: {page_no+1} / {num_pages})\n')
-
- # display image with bounding boxes
- print(">> PDF image with bounding boxes of lines\n")
- draw = ImageDraw.Draw(image)
-
- labels = list()
- for box, text in zip(bboxes, texts):
- color = "red"
- draw.rectangle(box, outline=color)
-
- # resize image to original
- width, height = image.size
- image = image.resize((int(0.5*width), int(0.5*height)))
-
- # convert to cv and display
- img = np.array(image, dtype='uint8') # PIL to cv2
- cv2_imshow(img)
- cv2.waitKey(0)
-
- # display image dataframe
- print("\n>> Dataframe of annotated lines\n")
- cols = ["texts", "bboxes"]
- df = df[cols]
- display(df)
-
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py
deleted file mode 100644
index 43f6e144f677a113b5362dcbdfb75db4f41c2b2f..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py
+++ /dev/null
@@ -1,112 +0,0 @@
-"""
-Script which takes one or more file paths and reports on their detected
-encodings
-
-Example::
-
- % chardetect somefile someotherfile
- somefile: windows-1252 with confidence 0.5
- someotherfile: ascii with confidence 1.0
-
-If no paths are provided, it takes its input from stdin.
-
-"""
-
-
-import argparse
-import sys
-from typing import Iterable, List, Optional
-
-from .. import __version__
-from ..universaldetector import UniversalDetector
-
-
-def description_of(
- lines: Iterable[bytes],
- name: str = "stdin",
- minimal: bool = False,
- should_rename_legacy: bool = False,
-) -> Optional[str]:
- """
- Return a string describing the probable encoding of a file or
- list of strings.
-
- :param lines: The lines to get the encoding of.
- :type lines: Iterable of bytes
- :param name: Name of file or collection of lines
- :type name: str
- :param should_rename_legacy: Should we rename legacy encodings to
- their more modern equivalents?
- :type should_rename_legacy: ``bool``
- """
- u = UniversalDetector(should_rename_legacy=should_rename_legacy)
- for line in lines:
- line = bytearray(line)
- u.feed(line)
- # shortcut out of the loop to save reading further - particularly useful if we read a BOM.
- if u.done:
- break
- u.close()
- result = u.result
- if minimal:
- return result["encoding"]
- if result["encoding"]:
- return f'{name}: {result["encoding"]} with confidence {result["confidence"]}'
- return f"{name}: no result"
-
-
-def main(argv: Optional[List[str]] = None) -> None:
- """
- Handles command line arguments and gets things started.
-
- :param argv: List of arguments, as if specified on the command-line.
- If None, ``sys.argv[1:]`` is used instead.
- :type argv: list of str
- """
- # Get command line arguments
- parser = argparse.ArgumentParser(
- description=(
- "Takes one or more file paths and reports their detected encodings"
- )
- )
- parser.add_argument(
- "input",
- help="File whose encoding we would like to determine. (default: stdin)",
- type=argparse.FileType("rb"),
- nargs="*",
- default=[sys.stdin.buffer],
- )
- parser.add_argument(
- "--minimal",
- help="Print only the encoding to standard output",
- action="store_true",
- )
- parser.add_argument(
- "-l",
- "--legacy",
- help="Rename legacy encodings to more modern ones.",
- action="store_true",
- )
- parser.add_argument(
- "--version", action="version", version=f"%(prog)s {__version__}"
- )
- args = parser.parse_args(argv)
-
- for f in args.input:
- if f.isatty():
- print(
- "You are running chardetect interactively. Press "
- "CTRL-D twice at the start of a blank line to signal the "
- "end of your input. If you want help, run chardetect "
- "--help\n",
- file=sys.stderr,
- )
- print(
- description_of(
- f, f.name, minimal=args.minimal, should_rename_legacy=args.legacy
- )
- )
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/styled.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/styled.py
deleted file mode 100644
index 91cd0db31c14e30d4c1e2e9f36382b7a5e022870..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/styled.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from typing import TYPE_CHECKING
-
-from .measure import Measurement
-from .segment import Segment
-from .style import StyleType
-
-if TYPE_CHECKING:
- from .console import Console, ConsoleOptions, RenderResult, RenderableType
-
-
-class Styled:
- """Apply a style to a renderable.
-
- Args:
- renderable (RenderableType): Any renderable.
- style (StyleType): A style to apply across the entire renderable.
- """
-
- def __init__(self, renderable: "RenderableType", style: "StyleType") -> None:
- self.renderable = renderable
- self.style = style
-
- def __rich_console__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> "RenderResult":
- style = console.get_style(self.style)
- rendered_segments = console.render(self.renderable, options)
- segments = Segment.apply_style(rendered_segments, style)
- return segments
-
- def __rich_measure__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> Measurement:
- return Measurement.get(console, options, self.renderable)
-
-
-if __name__ == "__main__": # pragma: no cover
- from pip._vendor.rich import print
- from pip._vendor.rich.panel import Panel
-
- panel = Styled(Panel("hello"), "on blue")
- print(panel)
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/tenacity/stop.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/tenacity/stop.py
deleted file mode 100644
index bb23effdf865b007756451f61fcbd7635f15b5d5..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/tenacity/stop.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2016–2021 Julien Danjou
-# Copyright 2016 Joshua Harlow
-# Copyright 2013-2014 Ray Holder
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import abc
-import typing
-
-from pip._vendor.tenacity import _utils
-
-if typing.TYPE_CHECKING:
- import threading
-
- from pip._vendor.tenacity import RetryCallState
-
-
-class stop_base(abc.ABC):
- """Abstract base class for stop strategies."""
-
- @abc.abstractmethod
- def __call__(self, retry_state: "RetryCallState") -> bool:
- pass
-
- def __and__(self, other: "stop_base") -> "stop_all":
- return stop_all(self, other)
-
- def __or__(self, other: "stop_base") -> "stop_any":
- return stop_any(self, other)
-
-
-StopBaseT = typing.Union[stop_base, typing.Callable[["RetryCallState"], bool]]
-
-
-class stop_any(stop_base):
- """Stop if any of the stop condition is valid."""
-
- def __init__(self, *stops: stop_base) -> None:
- self.stops = stops
-
- def __call__(self, retry_state: "RetryCallState") -> bool:
- return any(x(retry_state) for x in self.stops)
-
-
-class stop_all(stop_base):
- """Stop if all the stop conditions are valid."""
-
- def __init__(self, *stops: stop_base) -> None:
- self.stops = stops
-
- def __call__(self, retry_state: "RetryCallState") -> bool:
- return all(x(retry_state) for x in self.stops)
-
-
-class _stop_never(stop_base):
- """Never stop."""
-
- def __call__(self, retry_state: "RetryCallState") -> bool:
- return False
-
-
-stop_never = _stop_never()
-
-
-class stop_when_event_set(stop_base):
- """Stop when the given event is set."""
-
- def __init__(self, event: "threading.Event") -> None:
- self.event = event
-
- def __call__(self, retry_state: "RetryCallState") -> bool:
- return self.event.is_set()
-
-
-class stop_after_attempt(stop_base):
- """Stop when the previous attempt >= max_attempt."""
-
- def __init__(self, max_attempt_number: int) -> None:
- self.max_attempt_number = max_attempt_number
-
- def __call__(self, retry_state: "RetryCallState") -> bool:
- return retry_state.attempt_number >= self.max_attempt_number
-
-
-class stop_after_delay(stop_base):
- """Stop when the time from the first attempt >= limit."""
-
- def __init__(self, max_delay: _utils.time_unit_type) -> None:
- self.max_delay = _utils.to_seconds(max_delay)
-
- def __call__(self, retry_state: "RetryCallState") -> bool:
- if retry_state.seconds_since_start is None:
- raise RuntimeError("__call__() called but seconds_since_start is not set")
- return retry_state.seconds_since_start >= self.max_delay
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_normalization.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_normalization.py
deleted file mode 100644
index 31899f7ab1d62098a861c82992c980c1e2b1b58f..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_normalization.py
+++ /dev/null
@@ -1,114 +0,0 @@
-"""
-Helpers for normalization as expected in wheel/sdist/module file names
-and core metadata
-"""
-import re
-from pathlib import Path
-from typing import Union
-
-from .extern import packaging
-from .warnings import SetuptoolsDeprecationWarning
-
-_Path = Union[str, Path]
-
-# https://packaging.python.org/en/latest/specifications/core-metadata/#name
-_VALID_NAME = re.compile(r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.I)
-_UNSAFE_NAME_CHARS = re.compile(r"[^A-Z0-9.]+", re.I)
-
-
-def safe_identifier(name: str) -> str:
- """Make a string safe to be used as Python identifier.
- >>> safe_identifier("12abc")
- '_12abc'
- >>> safe_identifier("__editable__.myns.pkg-78.9.3_local")
- '__editable___myns_pkg_78_9_3_local'
- """
- safe = re.sub(r'\W|^(?=\d)', '_', name)
- assert safe.isidentifier()
- return safe
-
-
-def safe_name(component: str) -> str:
- """Escape a component used as a project name according to Core Metadata.
- >>> safe_name("hello world")
- 'hello-world'
- >>> safe_name("hello?world")
- 'hello-world'
- """
- # See pkg_resources.safe_name
- return _UNSAFE_NAME_CHARS.sub("-", component)
-
-
-def safe_version(version: str) -> str:
- """Convert an arbitrary string into a valid version string.
- >>> safe_version("1988 12 25")
- '1988.12.25'
- >>> safe_version("v0.2.1")
- '0.2.1'
- >>> safe_version("v0.2?beta")
- '0.2b0'
- >>> safe_version("v0.2 beta")
- '0.2b0'
- >>> safe_version("ubuntu lts")
- Traceback (most recent call last):
- ...
- setuptools.extern.packaging.version.InvalidVersion: Invalid version: 'ubuntu.lts'
- """
- v = version.replace(' ', '.')
- try:
- return str(packaging.version.Version(v))
- except packaging.version.InvalidVersion:
- attempt = _UNSAFE_NAME_CHARS.sub("-", v)
- return str(packaging.version.Version(attempt))
-
-
-def best_effort_version(version: str) -> str:
- """Convert an arbitrary string into a version-like string.
- >>> best_effort_version("v0.2 beta")
- '0.2b0'
-
- >>> import warnings
- >>> warnings.simplefilter("ignore", category=SetuptoolsDeprecationWarning)
- >>> best_effort_version("ubuntu lts")
- 'ubuntu.lts'
- """
- # See pkg_resources.safe_version
- try:
- return safe_version(version)
- except packaging.version.InvalidVersion:
- SetuptoolsDeprecationWarning.emit(
- f"Invalid version: {version!r}.",
- f"""
- Version {version!r} is not valid according to PEP 440.
-
- Please make sure to specify a valid version for your package.
- Also note that future releases of setuptools may halt the build process
- if an invalid version is given.
- """,
- see_url="https://peps.python.org/pep-0440/",
- due_date=(2023, 9, 26), # See setuptools/dist _validate_version
- )
- v = version.replace(' ', '.')
- return safe_name(v)
-
-
-def filename_component(value: str) -> str:
- """Normalize each component of a filename (e.g. distribution/version part of wheel)
- Note: ``value`` needs to be already normalized.
- >>> filename_component("my-pkg")
- 'my_pkg'
- """
- return value.replace("-", "_").strip("_")
-
-
-def safer_name(value: str) -> str:
- """Like ``safe_name`` but can be used as filename component for wheel"""
- # See bdist_wheel.safer_name
- return filename_component(safe_name(value))
-
-
-def safer_best_effort_version(value: str) -> str:
- """Like ``best_effort_version`` but can be used as filename component for wheel"""
- # See bdist_wheel.safer_verion
- # TODO: Replace with only safe_version in the future (no need for best effort)
- return filename_component(best_effort_version(value))
diff --git a/spaces/pomudachi/spoiled-brrats/Dockerfile b/spaces/pomudachi/spoiled-brrats/Dockerfile
deleted file mode 100644
index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000
--- a/spaces/pomudachi/spoiled-brrats/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM node:18-bullseye-slim
-RUN apt-get update && \
- apt-get install -y git
-RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
-WORKDIR /app
-RUN npm install
-COPY Dockerfile greeting.md* .env* ./
-RUN npm run build
-EXPOSE 7860
-ENV NODE_ENV=production
-CMD [ "npm", "start" ]
diff --git a/spaces/prairie-guy/Art_Mood/README.md b/spaces/prairie-guy/Art_Mood/README.md
deleted file mode 100644
index 136b23e51ed442bdad76646cc5d6c0b1540bb878..0000000000000000000000000000000000000000
--- a/spaces/prairie-guy/Art_Mood/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Art_Mood
-emoji: 🏃
-colorFrom: red
-colorTo: green
-sdk: gradio
-sdk_version: 2.9.4
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/prateekagrawal/roberta-testing/apps/about.py b/spaces/prateekagrawal/roberta-testing/apps/about.py
deleted file mode 100644
index c12ee2c6e9d52ff1216e0b1bd5cac8d9468f793b..0000000000000000000000000000000000000000
--- a/spaces/prateekagrawal/roberta-testing/apps/about.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import streamlit as st
-
-
-def app():
- # st.title("About")
- st.markdown("
About
", unsafe_allow_html=True)
- st.markdown("""## Introduction""")
- st.markdown(
- """**RoBERTa-hindi** is one of the many projects in the Flax/JAX community week organized by HuggingFace in collaboration with Google to make compute-intensive projects more practicable."""
- )
- st.markdown(
- """It is a monolingual transformers model pretrained on a large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts."""
- )
-
- st.markdown("""## Datasets used""")
- st.markdown(
- """RoBERTa-Hindi has been pretrained on a huge corpus consisting of multiple datasets. The entire list of datasets used is mentioned below : """
- )
- st.markdown(
- """
- 1. OSCAR
- 2. mC4
- 3. Indic-glue
- 4. Hindi-wikipedia-articles-172k
- 5. Hindi-text-short-summarization corpus
- 6. Hindi-text-short-and-large-summarization corpus
- 7. Oldnewspaperhindi
- 8. Samanantar
- """
- )
-
- st.markdown(
- """
- ***NOTE: Some of the datasets are readily available on the HuggingFace Datasets while the team developed the rest as per the docs.***
- """
- )
-
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/attr/_next_gen.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/attr/_next_gen.py
deleted file mode 100644
index 8f7c0b9a46b7a0ee008f94b8054baf5807df043a..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/attr/_next_gen.py
+++ /dev/null
@@ -1,232 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-"""
-These are keyword-only APIs that call `attr.s` and `attr.ib` with different
-default values.
-"""
-
-
-from functools import partial
-
-from . import setters
-from ._funcs import asdict as _asdict
-from ._funcs import astuple as _astuple
-from ._make import (
- NOTHING,
- _frozen_setattrs,
- _ng_default_on_setattr,
- attrib,
- attrs,
-)
-from .exceptions import UnannotatedAttributeError
-
-
-def define(
- maybe_cls=None,
- *,
- these=None,
- repr=None,
- unsafe_hash=None,
- hash=None,
- init=None,
- slots=True,
- frozen=False,
- weakref_slot=True,
- str=False,
- auto_attribs=None,
- kw_only=False,
- cache_hash=False,
- auto_exc=True,
- eq=None,
- order=False,
- auto_detect=True,
- getstate_setstate=None,
- on_setattr=None,
- field_transformer=None,
- match_args=True,
-):
- r"""
- Define an *attrs* class.
-
- Differences to the classic `attr.s` that it uses underneath:
-
- - Automatically detect whether or not *auto_attribs* should be `True` (c.f.
- *auto_attribs* parameter).
- - If *frozen* is `False`, run converters and validators when setting an
- attribute by default.
- - *slots=True*
-
- .. caution::
-
- Usually this has only upsides and few visible effects in everyday
- programming. But it *can* lead to some suprising behaviors, so please
- make sure to read :term:`slotted classes`.
- - *auto_exc=True*
- - *auto_detect=True*
- - *order=False*
- - Some options that were only relevant on Python 2 or were kept around for
- backwards-compatibility have been removed.
-
- Please note that these are all defaults and you can change them as you
- wish.
-
- :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves
- exactly like `attr.s`. If left `None`, `attr.s` will try to guess:
-
- 1. If any attributes are annotated and no unannotated `attrs.fields`\ s
- are found, it assumes *auto_attribs=True*.
- 2. Otherwise it assumes *auto_attribs=False* and tries to collect
- `attrs.fields`\ s.
-
- For now, please refer to `attr.s` for the rest of the parameters.
-
- .. versionadded:: 20.1.0
- .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``.
- .. versionadded:: 22.2.0
- *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance).
- """
-
- def do_it(cls, auto_attribs):
- return attrs(
- maybe_cls=cls,
- these=these,
- repr=repr,
- hash=hash,
- unsafe_hash=unsafe_hash,
- init=init,
- slots=slots,
- frozen=frozen,
- weakref_slot=weakref_slot,
- str=str,
- auto_attribs=auto_attribs,
- kw_only=kw_only,
- cache_hash=cache_hash,
- auto_exc=auto_exc,
- eq=eq,
- order=order,
- auto_detect=auto_detect,
- collect_by_mro=True,
- getstate_setstate=getstate_setstate,
- on_setattr=on_setattr,
- field_transformer=field_transformer,
- match_args=match_args,
- )
-
- def wrap(cls):
- """
- Making this a wrapper ensures this code runs during class creation.
-
- We also ensure that frozen-ness of classes is inherited.
- """
- nonlocal frozen, on_setattr
-
- had_on_setattr = on_setattr not in (None, setters.NO_OP)
-
- # By default, mutable classes convert & validate on setattr.
- if frozen is False and on_setattr is None:
- on_setattr = _ng_default_on_setattr
-
- # However, if we subclass a frozen class, we inherit the immutability
- # and disable on_setattr.
- for base_cls in cls.__bases__:
- if base_cls.__setattr__ is _frozen_setattrs:
- if had_on_setattr:
- raise ValueError(
- "Frozen classes can't use on_setattr "
- "(frozen-ness was inherited)."
- )
-
- on_setattr = setters.NO_OP
- break
-
- if auto_attribs is not None:
- return do_it(cls, auto_attribs)
-
- try:
- return do_it(cls, True)
- except UnannotatedAttributeError:
- return do_it(cls, False)
-
- # maybe_cls's type depends on the usage of the decorator. It's a class
- # if it's used as `@attrs` but ``None`` if used as `@attrs()`.
- if maybe_cls is None:
- return wrap
- else:
- return wrap(maybe_cls)
-
-
-mutable = define
-frozen = partial(define, frozen=True, on_setattr=None)
-
-
-def field(
- *,
- default=NOTHING,
- validator=None,
- repr=True,
- hash=None,
- init=True,
- metadata=None,
- type=None,
- converter=None,
- factory=None,
- kw_only=False,
- eq=None,
- order=None,
- on_setattr=None,
- alias=None,
-):
- """
- Identical to `attr.ib`, except keyword-only and with some arguments
- removed.
-
- .. versionadded:: 23.1.0
- The *type* parameter has been re-added; mostly for
- {func}`attrs.make_class`. Please note that type checkers ignore this
- metadata.
- .. versionadded:: 20.1.0
- """
- return attrib(
- default=default,
- validator=validator,
- repr=repr,
- hash=hash,
- init=init,
- metadata=metadata,
- type=type,
- converter=converter,
- factory=factory,
- kw_only=kw_only,
- eq=eq,
- order=order,
- on_setattr=on_setattr,
- alias=alias,
- )
-
-
-def asdict(inst, *, recurse=True, filter=None, value_serializer=None):
- """
- Same as `attr.asdict`, except that collections types are always retained
- and dict is always used as *dict_factory*.
-
- .. versionadded:: 21.3.0
- """
- return _asdict(
- inst=inst,
- recurse=recurse,
- filter=filter,
- value_serializer=value_serializer,
- retain_collection_types=True,
- )
-
-
-def astuple(inst, *, recurse=True, filter=None):
- """
- Same as `attr.astuple`, except that collections types are always retained
- and `tuple` is always used as the *tuple_factory*.
-
- .. versionadded:: 21.3.0
- """
- return _astuple(
- inst=inst, recurse=recurse, filter=filter, retain_collection_types=True
- )
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/exceptiongroup/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/exceptiongroup/__init__.py
deleted file mode 100644
index 0e7e02bcf3bc0eb65f8001ca5f530b53d293c31c..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/exceptiongroup/__init__.py
+++ /dev/null
@@ -1,40 +0,0 @@
-__all__ = [
- "BaseExceptionGroup",
- "ExceptionGroup",
- "catch",
- "format_exception",
- "format_exception_only",
- "print_exception",
- "print_exc",
-]
-
-import os
-import sys
-
-from ._catch import catch
-from ._version import version as __version__ # noqa: F401
-
-if sys.version_info < (3, 11):
- from ._exceptions import BaseExceptionGroup, ExceptionGroup
- from ._formatting import (
- format_exception,
- format_exception_only,
- print_exc,
- print_exception,
- )
-
- if os.getenv("EXCEPTIONGROUP_NO_PATCH") != "1":
- from . import _formatting # noqa: F401
-
- BaseExceptionGroup.__module__ = __name__
- ExceptionGroup.__module__ = __name__
-else:
- from traceback import (
- format_exception,
- format_exception_only,
- print_exc,
- print_exception,
- )
-
- BaseExceptionGroup = BaseExceptionGroup
- ExceptionGroup = ExceptionGroup
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-8f1feca1.css b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-8f1feca1.css
deleted file mode 100644
index 1b457869043e5e2005c2331cb14abed07b7f6a88..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-8f1feca1.css
+++ /dev/null
@@ -1 +0,0 @@
-span.svelte-s1r2yt{font-weight:var(--section-header-text-weight);font-size:var(--section-header-text-size)}.label-wrap.svelte-s1r2yt{display:flex;justify-content:space-between;cursor:pointer;width:var(--size-full)}.label-wrap.open.svelte-s1r2yt{margin-bottom:var(--size-2)}.icon.svelte-s1r2yt{transition:.15s}
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/DropdownArrow-a83f7316.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/DropdownArrow-a83f7316.js
deleted file mode 100644
index b14075154e54a24c486032dd2d88607f8246d53f..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/DropdownArrow-a83f7316.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import"./Index-c74a8b7c.js";const{SvelteComponent:l,append:p,attr:e,detach:w,init:d,insert:c,noop:r,safe_not_equal:_,svg_element:s}=window.__gradio__svelte__internal;function g(a){let t,n;return{c(){t=s("svg"),n=s("path"),e(n,"d","M5 8l4 4 4-4z"),e(t,"class","dropdown-arrow svelte-xjn76a"),e(t,"xmlns","http://www.w3.org/2000/svg"),e(t,"width","100%"),e(t,"height","100%"),e(t,"viewBox","0 0 18 18")},m(o,i){c(o,t,i),p(t,n)},p:r,i:r,o:r,d(o){o&&w(t)}}}class v extends l{constructor(t){super(),d(this,t,null,g,_,{})}}export{v as D};
-//# sourceMappingURL=DropdownArrow-a83f7316.js.map
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/markers.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/markers.py
deleted file mode 100644
index e7096e66bc6ad4112063ba970a4f2952d2d0d92e..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/markers.py
+++ /dev/null
@@ -1,917 +0,0 @@
-r"""
-Functions to handle markers; used by the marker functionality of
-`~matplotlib.axes.Axes.plot`, `~matplotlib.axes.Axes.scatter`, and
-`~matplotlib.axes.Axes.errorbar`.
-
-All possible markers are defined here:
-
-============================== ====== =========================================
-marker symbol description
-============================== ====== =========================================
-``"."`` |m00| point
-``","`` |m01| pixel
-``"o"`` |m02| circle
-``"v"`` |m03| triangle_down
-``"^"`` |m04| triangle_up
-``"<"`` |m05| triangle_left
-``">"`` |m06| triangle_right
-``"1"`` |m07| tri_down
-``"2"`` |m08| tri_up
-``"3"`` |m09| tri_left
-``"4"`` |m10| tri_right
-``"8"`` |m11| octagon
-``"s"`` |m12| square
-``"p"`` |m13| pentagon
-``"P"`` |m23| plus (filled)
-``"*"`` |m14| star
-``"h"`` |m15| hexagon1
-``"H"`` |m16| hexagon2
-``"+"`` |m17| plus
-``"x"`` |m18| x
-``"X"`` |m24| x (filled)
-``"D"`` |m19| diamond
-``"d"`` |m20| thin_diamond
-``"|"`` |m21| vline
-``"_"`` |m22| hline
-``0`` (``TICKLEFT``) |m25| tickleft
-``1`` (``TICKRIGHT``) |m26| tickright
-``2`` (``TICKUP``) |m27| tickup
-``3`` (``TICKDOWN``) |m28| tickdown
-``4`` (``CARETLEFT``) |m29| caretleft
-``5`` (``CARETRIGHT``) |m30| caretright
-``6`` (``CARETUP``) |m31| caretup
-``7`` (``CARETDOWN``) |m32| caretdown
-``8`` (``CARETLEFTBASE``) |m33| caretleft (centered at base)
-``9`` (``CARETRIGHTBASE``) |m34| caretright (centered at base)
-``10`` (``CARETUPBASE``) |m35| caretup (centered at base)
-``11`` (``CARETDOWNBASE``) |m36| caretdown (centered at base)
-``"none"`` or ``"None"`` nothing
-``" "`` or ``""`` nothing
-``'$...$'`` |m37| Render the string using mathtext.
- E.g ``"$f$"`` for marker showing the
- letter ``f``.
-``verts`` A list of (x, y) pairs used for Path
- vertices. The center of the marker is
- located at (0, 0) and the size is
- normalized, such that the created path
- is encapsulated inside the unit cell.
-path A `~matplotlib.path.Path` instance.
-``(numsides, 0, angle)`` A regular polygon with ``numsides``
- sides, rotated by ``angle``.
-``(numsides, 1, angle)`` A star-like symbol with ``numsides``
- sides, rotated by ``angle``.
-``(numsides, 2, angle)`` An asterisk with ``numsides`` sides,
- rotated by ``angle``.
-============================== ====== =========================================
-
-As a deprecated feature, ``None`` also means 'nothing' when directly
-constructing a `.MarkerStyle`, but note that there are other contexts where
-``marker=None`` instead means "the default marker" (e.g. :rc:`scatter.marker`
-for `.Axes.scatter`).
-
-Note that special symbols can be defined via the
-:ref:`STIX math font `,
-e.g. ``"$\u266B$"``. For an overview over the STIX font symbols refer to the
-`STIX font table `_.
-Also see the :doc:`/gallery/text_labels_and_annotations/stix_fonts_demo`.
-
-Integer numbers from ``0`` to ``11`` create lines and triangles. Those are
-equally accessible via capitalized variables, like ``CARETDOWNBASE``.
-Hence the following are equivalent::
-
- plt.plot([1, 2, 3], marker=11)
- plt.plot([1, 2, 3], marker=matplotlib.markers.CARETDOWNBASE)
-
-Markers join and cap styles can be customized by creating a new instance of
-MarkerStyle.
-A MarkerStyle can also have a custom `~matplotlib.transforms.Transform`
-allowing it to be arbitrarily rotated or offset.
-
-Examples showing the use of markers:
-
-* :doc:`/gallery/lines_bars_and_markers/marker_reference`
-* :doc:`/gallery/lines_bars_and_markers/scatter_star_poly`
-* :doc:`/gallery/lines_bars_and_markers/multivariate_marker_plot`
-
-.. |m00| image:: /_static/markers/m00.png
-.. |m01| image:: /_static/markers/m01.png
-.. |m02| image:: /_static/markers/m02.png
-.. |m03| image:: /_static/markers/m03.png
-.. |m04| image:: /_static/markers/m04.png
-.. |m05| image:: /_static/markers/m05.png
-.. |m06| image:: /_static/markers/m06.png
-.. |m07| image:: /_static/markers/m07.png
-.. |m08| image:: /_static/markers/m08.png
-.. |m09| image:: /_static/markers/m09.png
-.. |m10| image:: /_static/markers/m10.png
-.. |m11| image:: /_static/markers/m11.png
-.. |m12| image:: /_static/markers/m12.png
-.. |m13| image:: /_static/markers/m13.png
-.. |m14| image:: /_static/markers/m14.png
-.. |m15| image:: /_static/markers/m15.png
-.. |m16| image:: /_static/markers/m16.png
-.. |m17| image:: /_static/markers/m17.png
-.. |m18| image:: /_static/markers/m18.png
-.. |m19| image:: /_static/markers/m19.png
-.. |m20| image:: /_static/markers/m20.png
-.. |m21| image:: /_static/markers/m21.png
-.. |m22| image:: /_static/markers/m22.png
-.. |m23| image:: /_static/markers/m23.png
-.. |m24| image:: /_static/markers/m24.png
-.. |m25| image:: /_static/markers/m25.png
-.. |m26| image:: /_static/markers/m26.png
-.. |m27| image:: /_static/markers/m27.png
-.. |m28| image:: /_static/markers/m28.png
-.. |m29| image:: /_static/markers/m29.png
-.. |m30| image:: /_static/markers/m30.png
-.. |m31| image:: /_static/markers/m31.png
-.. |m32| image:: /_static/markers/m32.png
-.. |m33| image:: /_static/markers/m33.png
-.. |m34| image:: /_static/markers/m34.png
-.. |m35| image:: /_static/markers/m35.png
-.. |m36| image:: /_static/markers/m36.png
-.. |m37| image:: /_static/markers/m37.png
-"""
-import copy
-
-from collections.abc import Sized
-
-import numpy as np
-
-import matplotlib as mpl
-from . import _api, cbook
-from .path import Path
-from .transforms import IdentityTransform, Affine2D
-from ._enums import JoinStyle, CapStyle
-
-# special-purpose marker identifiers:
-(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
- CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,
- CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE) = range(12)
-
-_empty_path = Path(np.empty((0, 2)))
-
-
-class MarkerStyle:
- """
- A class representing marker types.
-
- Instances are immutable. If you need to change anything, create a new
- instance.
-
- Attributes
- ----------
- markers : dict
- All known markers.
- filled_markers : tuple
- All known filled markers. This is a subset of *markers*.
- fillstyles : tuple
- The supported fillstyles.
- """
-
- markers = {
- '.': 'point',
- ',': 'pixel',
- 'o': 'circle',
- 'v': 'triangle_down',
- '^': 'triangle_up',
- '<': 'triangle_left',
- '>': 'triangle_right',
- '1': 'tri_down',
- '2': 'tri_up',
- '3': 'tri_left',
- '4': 'tri_right',
- '8': 'octagon',
- 's': 'square',
- 'p': 'pentagon',
- '*': 'star',
- 'h': 'hexagon1',
- 'H': 'hexagon2',
- '+': 'plus',
- 'x': 'x',
- 'D': 'diamond',
- 'd': 'thin_diamond',
- '|': 'vline',
- '_': 'hline',
- 'P': 'plus_filled',
- 'X': 'x_filled',
- TICKLEFT: 'tickleft',
- TICKRIGHT: 'tickright',
- TICKUP: 'tickup',
- TICKDOWN: 'tickdown',
- CARETLEFT: 'caretleft',
- CARETRIGHT: 'caretright',
- CARETUP: 'caretup',
- CARETDOWN: 'caretdown',
- CARETLEFTBASE: 'caretleftbase',
- CARETRIGHTBASE: 'caretrightbase',
- CARETUPBASE: 'caretupbase',
- CARETDOWNBASE: 'caretdownbase',
- "None": 'nothing',
- "none": 'nothing',
- ' ': 'nothing',
- '': 'nothing'
- }
-
- # Just used for informational purposes. is_filled()
- # is calculated in the _set_* functions.
- filled_markers = (
- '.', 'o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd',
- 'P', 'X')
-
- fillstyles = ('full', 'left', 'right', 'bottom', 'top', 'none')
- _half_fillstyles = ('left', 'right', 'bottom', 'top')
-
- def __init__(self, marker,
- fillstyle=None, transform=None, capstyle=None, joinstyle=None):
- """
- Parameters
- ----------
- marker : str, array-like, Path, MarkerStyle, or None
- - Another instance of *MarkerStyle* copies the details of that
- ``marker``.
- - *None* means no marker. This is the deprecated default.
- - For other possible marker values, see the module docstring
- `matplotlib.markers`.
-
- fillstyle : str, default: :rc:`markers.fillstyle`
- One of 'full', 'left', 'right', 'bottom', 'top', 'none'.
-
- transform : transforms.Transform, default: None
- Transform that will be combined with the native transform of the
- marker.
-
- capstyle : `.CapStyle` or %(CapStyle)s, default: None
- Cap style that will override the default cap style of the marker.
-
- joinstyle : `.JoinStyle` or %(JoinStyle)s, default: None
- Join style that will override the default join style of the marker.
- """
- self._marker_function = None
- self._user_transform = transform
- self._user_capstyle = CapStyle(capstyle) if capstyle is not None else None
- self._user_joinstyle = JoinStyle(joinstyle) if joinstyle is not None else None
- self._set_fillstyle(fillstyle)
- self._set_marker(marker)
-
- def _recache(self):
- if self._marker_function is None:
- return
- self._path = _empty_path
- self._transform = IdentityTransform()
- self._alt_path = None
- self._alt_transform = None
- self._snap_threshold = None
- self._joinstyle = JoinStyle.round
- self._capstyle = self._user_capstyle or CapStyle.butt
- # Initial guess: Assume the marker is filled unless the fillstyle is
- # set to 'none'. The marker function will override this for unfilled
- # markers.
- self._filled = self._fillstyle != 'none'
- self._marker_function()
-
- def __bool__(self):
- return bool(len(self._path.vertices))
-
- def is_filled(self):
- return self._filled
-
- def get_fillstyle(self):
- return self._fillstyle
-
- def _set_fillstyle(self, fillstyle):
- """
- Set the fillstyle.
-
- Parameters
- ----------
- fillstyle : {'full', 'left', 'right', 'bottom', 'top', 'none'}
- The part of the marker surface that is colored with
- markerfacecolor.
- """
- if fillstyle is None:
- fillstyle = mpl.rcParams['markers.fillstyle']
- _api.check_in_list(self.fillstyles, fillstyle=fillstyle)
- self._fillstyle = fillstyle
-
- def get_joinstyle(self):
- return self._joinstyle.name
-
- def get_capstyle(self):
- return self._capstyle.name
-
- def get_marker(self):
- return self._marker
-
- def _set_marker(self, marker):
- """
- Set the marker.
-
- Parameters
- ----------
- marker : str, array-like, Path, MarkerStyle, or None, default: None
- - Another instance of *MarkerStyle* copies the details of that
- ``marker``.
- - *None* means no marker.
- - For other possible marker values see the module docstring
- `matplotlib.markers`.
- """
- if isinstance(marker, str) and cbook.is_math_text(marker):
- self._marker_function = self._set_mathtext_path
- elif isinstance(marker, (int, str)) and marker in self.markers:
- self._marker_function = getattr(self, '_set_' + self.markers[marker])
- elif (isinstance(marker, np.ndarray) and marker.ndim == 2 and
- marker.shape[1] == 2):
- self._marker_function = self._set_vertices
- elif isinstance(marker, Path):
- self._marker_function = self._set_path_marker
- elif (isinstance(marker, Sized) and len(marker) in (2, 3) and
- marker[1] in (0, 1, 2)):
- self._marker_function = self._set_tuple_marker
- elif isinstance(marker, MarkerStyle):
- self.__dict__ = copy.deepcopy(marker.__dict__)
- else:
- try:
- Path(marker)
- self._marker_function = self._set_vertices
- except ValueError as err:
- raise ValueError(
- f'Unrecognized marker style {marker!r}') from err
-
- if not isinstance(marker, MarkerStyle):
- self._marker = marker
- self._recache()
-
- def get_path(self):
- """
- Return a `.Path` for the primary part of the marker.
-
- For unfilled markers this is the whole marker, for filled markers,
- this is the area to be drawn with *markerfacecolor*.
- """
- return self._path
-
- def get_transform(self):
- """
- Return the transform to be applied to the `.Path` from
- `MarkerStyle.get_path()`.
- """
- if self._user_transform is None:
- return self._transform.frozen()
- else:
- return (self._transform + self._user_transform).frozen()
-
- def get_alt_path(self):
- """
- Return a `.Path` for the alternate part of the marker.
-
- For unfilled markers, this is *None*; for filled markers, this is the
- area to be drawn with *markerfacecoloralt*.
- """
- return self._alt_path
-
- def get_alt_transform(self):
- """
- Return the transform to be applied to the `.Path` from
- `MarkerStyle.get_alt_path()`.
- """
- if self._user_transform is None:
- return self._alt_transform.frozen()
- else:
- return (self._alt_transform + self._user_transform).frozen()
-
- def get_snap_threshold(self):
- return self._snap_threshold
-
- def get_user_transform(self):
- """Return user supplied part of marker transform."""
- if self._user_transform is not None:
- return self._user_transform.frozen()
-
- def transformed(self, transform: Affine2D):
- """
- Return a new version of this marker with the transform applied.
-
- Parameters
- ----------
- transform : `~matplotlib.transforms.Affine2D`, default: None
- Transform will be combined with current user supplied transform.
- """
- new_marker = MarkerStyle(self)
- if new_marker._user_transform is not None:
- new_marker._user_transform += transform
- else:
- new_marker._user_transform = transform
- return new_marker
-
- def rotated(self, *, deg=None, rad=None):
- """
- Return a new version of this marker rotated by specified angle.
-
- Parameters
- ----------
- deg : float, default: None
- Rotation angle in degrees.
-
- rad : float, default: None
- Rotation angle in radians.
-
- .. note:: You must specify exactly one of deg or rad.
- """
- if deg is None and rad is None:
- raise ValueError('One of deg or rad is required')
- if deg is not None and rad is not None:
- raise ValueError('Only one of deg and rad can be supplied')
- new_marker = MarkerStyle(self)
- if new_marker._user_transform is None:
- new_marker._user_transform = Affine2D()
-
- if deg is not None:
- new_marker._user_transform.rotate_deg(deg)
- if rad is not None:
- new_marker._user_transform.rotate(rad)
-
- return new_marker
-
- def scaled(self, sx, sy=None):
- """
- Return new marker scaled by specified scale factors.
-
- If *sy* is None, the same scale is applied in both the *x*- and
- *y*-directions.
-
- Parameters
- ----------
- sx : float
- *X*-direction scaling factor.
- sy : float, default: None
- *Y*-direction scaling factor.
- """
- if sy is None:
- sy = sx
-
- new_marker = MarkerStyle(self)
- _transform = new_marker._user_transform or Affine2D()
- new_marker._user_transform = _transform.scale(sx, sy)
- return new_marker
-
- def _set_nothing(self):
- self._filled = False
-
- def _set_custom_marker(self, path):
- rescale = np.max(np.abs(path.vertices)) # max of x's and y's.
- self._transform = Affine2D().scale(0.5 / rescale)
- self._path = path
-
- def _set_path_marker(self):
- self._set_custom_marker(self._marker)
-
- def _set_vertices(self):
- self._set_custom_marker(Path(self._marker))
-
- def _set_tuple_marker(self):
- marker = self._marker
- if len(marker) == 2:
- numsides, rotation = marker[0], 0.0
- elif len(marker) == 3:
- numsides, rotation = marker[0], marker[2]
- symstyle = marker[1]
- if symstyle == 0:
- self._path = Path.unit_regular_polygon(numsides)
- self._joinstyle = self._user_joinstyle or JoinStyle.miter
- elif symstyle == 1:
- self._path = Path.unit_regular_star(numsides)
- self._joinstyle = self._user_joinstyle or JoinStyle.bevel
- elif symstyle == 2:
- self._path = Path.unit_regular_asterisk(numsides)
- self._filled = False
- self._joinstyle = self._user_joinstyle or JoinStyle.bevel
- else:
- raise ValueError(f"Unexpected tuple marker: {marker}")
- self._transform = Affine2D().scale(0.5).rotate_deg(rotation)
-
- def _set_mathtext_path(self):
- """
- Draw mathtext markers '$...$' using `.TextPath` object.
-
- Submitted by tcb
- """
- from matplotlib.text import TextPath
-
- # again, the properties could be initialised just once outside
- # this function
- text = TextPath(xy=(0, 0), s=self.get_marker(),
- usetex=mpl.rcParams['text.usetex'])
- if len(text.vertices) == 0:
- return
-
- bbox = text.get_extents()
- max_dim = max(bbox.width, bbox.height)
- self._transform = (
- Affine2D()
- .translate(-bbox.xmin + 0.5 * -bbox.width, -bbox.ymin + 0.5 * -bbox.height)
- .scale(1.0 / max_dim))
- self._path = text
- self._snap = False
-
- def _half_fill(self):
- return self.get_fillstyle() in self._half_fillstyles
-
- def _set_circle(self, size=1.0):
- self._transform = Affine2D().scale(0.5 * size)
- self._snap_threshold = np.inf
- if not self._half_fill():
- self._path = Path.unit_circle()
- else:
- self._path = self._alt_path = Path.unit_circle_righthalf()
- fs = self.get_fillstyle()
- self._transform.rotate_deg(
- {'right': 0, 'top': 90, 'left': 180, 'bottom': 270}[fs])
- self._alt_transform = self._transform.frozen().rotate_deg(180.)
-
- def _set_point(self):
- self._set_circle(size=0.5)
-
- def _set_pixel(self):
- self._path = Path.unit_rectangle()
- # Ideally, you'd want -0.5, -0.5 here, but then the snapping
- # algorithm in the Agg backend will round this to a 2x2
- # rectangle from (-1, -1) to (1, 1). By offsetting it
- # slightly, we can force it to be (0, 0) to (1, 1), which both
- # makes it only be a single pixel and places it correctly
- # aligned to 1-width stroking (i.e. the ticks). This hack is
- # the best of a number of bad alternatives, mainly because the
- # backends are not aware of what marker is actually being used
- # beyond just its path data.
- self._transform = Affine2D().translate(-0.49999, -0.49999)
- self._snap_threshold = None
-
- _triangle_path = Path._create_closed([[0, 1], [-1, -1], [1, -1]])
- # Going down halfway looks to small. Golden ratio is too far.
- _triangle_path_u = Path._create_closed([[0, 1], [-3/5, -1/5], [3/5, -1/5]])
- _triangle_path_d = Path._create_closed(
- [[-3/5, -1/5], [3/5, -1/5], [1, -1], [-1, -1]])
- _triangle_path_l = Path._create_closed([[0, 1], [0, -1], [-1, -1]])
- _triangle_path_r = Path._create_closed([[0, 1], [0, -1], [1, -1]])
-
- def _set_triangle(self, rot, skip):
- self._transform = Affine2D().scale(0.5).rotate_deg(rot)
- self._snap_threshold = 5.0
-
- if not self._half_fill():
- self._path = self._triangle_path
- else:
- mpaths = [self._triangle_path_u,
- self._triangle_path_l,
- self._triangle_path_d,
- self._triangle_path_r]
-
- fs = self.get_fillstyle()
- if fs == 'top':
- self._path = mpaths[(0 + skip) % 4]
- self._alt_path = mpaths[(2 + skip) % 4]
- elif fs == 'bottom':
- self._path = mpaths[(2 + skip) % 4]
- self._alt_path = mpaths[(0 + skip) % 4]
- elif fs == 'left':
- self._path = mpaths[(1 + skip) % 4]
- self._alt_path = mpaths[(3 + skip) % 4]
- else:
- self._path = mpaths[(3 + skip) % 4]
- self._alt_path = mpaths[(1 + skip) % 4]
-
- self._alt_transform = self._transform
-
- self._joinstyle = self._user_joinstyle or JoinStyle.miter
-
- def _set_triangle_up(self):
- return self._set_triangle(0.0, 0)
-
- def _set_triangle_down(self):
- return self._set_triangle(180.0, 2)
-
- def _set_triangle_left(self):
- return self._set_triangle(90.0, 3)
-
- def _set_triangle_right(self):
- return self._set_triangle(270.0, 1)
-
- def _set_square(self):
- self._transform = Affine2D().translate(-0.5, -0.5)
- self._snap_threshold = 2.0
- if not self._half_fill():
- self._path = Path.unit_rectangle()
- else:
- # Build a bottom filled square out of two rectangles, one filled.
- self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 0.5],
- [0.0, 0.5], [0.0, 0.0]])
- self._alt_path = Path([[0.0, 0.5], [1.0, 0.5], [1.0, 1.0],
- [0.0, 1.0], [0.0, 0.5]])
- fs = self.get_fillstyle()
- rotate = {'bottom': 0, 'right': 90, 'top': 180, 'left': 270}[fs]
- self._transform.rotate_deg(rotate)
- self._alt_transform = self._transform
-
- self._joinstyle = self._user_joinstyle or JoinStyle.miter
-
- def _set_diamond(self):
- self._transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45)
- self._snap_threshold = 5.0
- if not self._half_fill():
- self._path = Path.unit_rectangle()
- else:
- self._path = Path([[0, 0], [1, 0], [1, 1], [0, 0]])
- self._alt_path = Path([[0, 0], [0, 1], [1, 1], [0, 0]])
- fs = self.get_fillstyle()
- rotate = {'right': 0, 'top': 90, 'left': 180, 'bottom': 270}[fs]
- self._transform.rotate_deg(rotate)
- self._alt_transform = self._transform
- self._joinstyle = self._user_joinstyle or JoinStyle.miter
-
- def _set_thin_diamond(self):
- self._set_diamond()
- self._transform.scale(0.6, 1.0)
-
- def _set_pentagon(self):
- self._transform = Affine2D().scale(0.5)
- self._snap_threshold = 5.0
-
- polypath = Path.unit_regular_polygon(5)
-
- if not self._half_fill():
- self._path = polypath
- else:
- verts = polypath.vertices
- y = (1 + np.sqrt(5)) / 4.
- top = Path(verts[[0, 1, 4, 0]])
- bottom = Path(verts[[1, 2, 3, 4, 1]])
- left = Path([verts[0], verts[1], verts[2], [0, -y], verts[0]])
- right = Path([verts[0], verts[4], verts[3], [0, -y], verts[0]])
- self._path, self._alt_path = {
- 'top': (top, bottom), 'bottom': (bottom, top),
- 'left': (left, right), 'right': (right, left),
- }[self.get_fillstyle()]
- self._alt_transform = self._transform
-
- self._joinstyle = self._user_joinstyle or JoinStyle.miter
-
- def _set_star(self):
- self._transform = Affine2D().scale(0.5)
- self._snap_threshold = 5.0
-
- polypath = Path.unit_regular_star(5, innerCircle=0.381966)
-
- if not self._half_fill():
- self._path = polypath
- else:
- verts = polypath.vertices
- top = Path(np.concatenate([verts[0:4], verts[7:10], verts[0:1]]))
- bottom = Path(np.concatenate([verts[3:8], verts[3:4]]))
- left = Path(np.concatenate([verts[0:6], verts[0:1]]))
- right = Path(np.concatenate([verts[0:1], verts[5:10], verts[0:1]]))
- self._path, self._alt_path = {
- 'top': (top, bottom), 'bottom': (bottom, top),
- 'left': (left, right), 'right': (right, left),
- }[self.get_fillstyle()]
- self._alt_transform = self._transform
-
- self._joinstyle = self._user_joinstyle or JoinStyle.bevel
-
- def _set_hexagon1(self):
- self._transform = Affine2D().scale(0.5)
- self._snap_threshold = None
-
- polypath = Path.unit_regular_polygon(6)
-
- if not self._half_fill():
- self._path = polypath
- else:
- verts = polypath.vertices
- # not drawing inside lines
- x = np.abs(np.cos(5 * np.pi / 6.))
- top = Path(np.concatenate([[(-x, 0)], verts[[1, 0, 5]], [(x, 0)]]))
- bottom = Path(np.concatenate([[(-x, 0)], verts[2:5], [(x, 0)]]))
- left = Path(verts[0:4])
- right = Path(verts[[0, 5, 4, 3]])
- self._path, self._alt_path = {
- 'top': (top, bottom), 'bottom': (bottom, top),
- 'left': (left, right), 'right': (right, left),
- }[self.get_fillstyle()]
- self._alt_transform = self._transform
-
- self._joinstyle = self._user_joinstyle or JoinStyle.miter
-
- def _set_hexagon2(self):
- self._transform = Affine2D().scale(0.5).rotate_deg(30)
- self._snap_threshold = None
-
- polypath = Path.unit_regular_polygon(6)
-
- if not self._half_fill():
- self._path = polypath
- else:
- verts = polypath.vertices
- # not drawing inside lines
- x, y = np.sqrt(3) / 4, 3 / 4.
- top = Path(verts[[1, 0, 5, 4, 1]])
- bottom = Path(verts[1:5])
- left = Path(np.concatenate([
- [(x, y)], verts[:3], [(-x, -y), (x, y)]]))
- right = Path(np.concatenate([
- [(x, y)], verts[5:2:-1], [(-x, -y)]]))
- self._path, self._alt_path = {
- 'top': (top, bottom), 'bottom': (bottom, top),
- 'left': (left, right), 'right': (right, left),
- }[self.get_fillstyle()]
- self._alt_transform = self._transform
-
- self._joinstyle = self._user_joinstyle or JoinStyle.miter
-
- def _set_octagon(self):
- self._transform = Affine2D().scale(0.5)
- self._snap_threshold = 5.0
-
- polypath = Path.unit_regular_polygon(8)
-
- if not self._half_fill():
- self._transform.rotate_deg(22.5)
- self._path = polypath
- else:
- x = np.sqrt(2.) / 4.
- self._path = self._alt_path = Path(
- [[0, -1], [0, 1], [-x, 1], [-1, x],
- [-1, -x], [-x, -1], [0, -1]])
- fs = self.get_fillstyle()
- self._transform.rotate_deg(
- {'left': 0, 'bottom': 90, 'right': 180, 'top': 270}[fs])
- self._alt_transform = self._transform.frozen().rotate_deg(180.0)
-
- self._joinstyle = self._user_joinstyle or JoinStyle.miter
-
- _line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
-
- def _set_vline(self):
- self._transform = Affine2D().scale(0.5)
- self._snap_threshold = 1.0
- self._filled = False
- self._path = self._line_marker_path
-
- def _set_hline(self):
- self._set_vline()
- self._transform = self._transform.rotate_deg(90)
-
- _tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
-
- def _set_tickleft(self):
- self._transform = Affine2D().scale(-1.0, 1.0)
- self._snap_threshold = 1.0
- self._filled = False
- self._path = self._tickhoriz_path
-
- def _set_tickright(self):
- self._transform = Affine2D().scale(1.0, 1.0)
- self._snap_threshold = 1.0
- self._filled = False
- self._path = self._tickhoriz_path
-
- _tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
-
- def _set_tickup(self):
- self._transform = Affine2D().scale(1.0, 1.0)
- self._snap_threshold = 1.0
- self._filled = False
- self._path = self._tickvert_path
-
- def _set_tickdown(self):
- self._transform = Affine2D().scale(1.0, -1.0)
- self._snap_threshold = 1.0
- self._filled = False
- self._path = self._tickvert_path
-
- _tri_path = Path([[0.0, 0.0], [0.0, -1.0],
- [0.0, 0.0], [0.8, 0.5],
- [0.0, 0.0], [-0.8, 0.5]],
- [Path.MOVETO, Path.LINETO,
- Path.MOVETO, Path.LINETO,
- Path.MOVETO, Path.LINETO])
-
- def _set_tri_down(self):
- self._transform = Affine2D().scale(0.5)
- self._snap_threshold = 5.0
- self._filled = False
- self._path = self._tri_path
-
- def _set_tri_up(self):
- self._set_tri_down()
- self._transform = self._transform.rotate_deg(180)
-
- def _set_tri_left(self):
- self._set_tri_down()
- self._transform = self._transform.rotate_deg(270)
-
- def _set_tri_right(self):
- self._set_tri_down()
- self._transform = self._transform.rotate_deg(90)
-
- _caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
-
- def _set_caretdown(self):
- self._transform = Affine2D().scale(0.5)
- self._snap_threshold = 3.0
- self._filled = False
- self._path = self._caret_path
- self._joinstyle = self._user_joinstyle or JoinStyle.miter
-
- def _set_caretup(self):
- self._set_caretdown()
- self._transform = self._transform.rotate_deg(180)
-
- def _set_caretleft(self):
- self._set_caretdown()
- self._transform = self._transform.rotate_deg(270)
-
- def _set_caretright(self):
- self._set_caretdown()
- self._transform = self._transform.rotate_deg(90)
-
- _caret_path_base = Path([[-1.0, 0.0], [0.0, -1.5], [1.0, 0]])
-
- def _set_caretdownbase(self):
- self._set_caretdown()
- self._path = self._caret_path_base
-
- def _set_caretupbase(self):
- self._set_caretdownbase()
- self._transform = self._transform.rotate_deg(180)
-
- def _set_caretleftbase(self):
- self._set_caretdownbase()
- self._transform = self._transform.rotate_deg(270)
-
- def _set_caretrightbase(self):
- self._set_caretdownbase()
- self._transform = self._transform.rotate_deg(90)
-
- _plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
- [0.0, -1.0], [0.0, 1.0]],
- [Path.MOVETO, Path.LINETO,
- Path.MOVETO, Path.LINETO])
-
- def _set_plus(self):
- self._transform = Affine2D().scale(0.5)
- self._snap_threshold = 1.0
- self._filled = False
- self._path = self._plus_path
-
- _x_path = Path([[-1.0, -1.0], [1.0, 1.0],
- [-1.0, 1.0], [1.0, -1.0]],
- [Path.MOVETO, Path.LINETO,
- Path.MOVETO, Path.LINETO])
-
- def _set_x(self):
- self._transform = Affine2D().scale(0.5)
- self._snap_threshold = 3.0
- self._filled = False
- self._path = self._x_path
-
- _plus_filled_path = Path._create_closed(np.array([
- (-1, -3), (+1, -3), (+1, -1), (+3, -1), (+3, +1), (+1, +1),
- (+1, +3), (-1, +3), (-1, +1), (-3, +1), (-3, -1), (-1, -1)]) / 6)
- _plus_filled_path_t = Path._create_closed(np.array([
- (+3, 0), (+3, +1), (+1, +1), (+1, +3),
- (-1, +3), (-1, +1), (-3, +1), (-3, 0)]) / 6)
-
- def _set_plus_filled(self):
- self._transform = Affine2D()
- self._snap_threshold = 5.0
- self._joinstyle = self._user_joinstyle or JoinStyle.miter
- if not self._half_fill():
- self._path = self._plus_filled_path
- else:
- # Rotate top half path to support all partitions
- self._path = self._alt_path = self._plus_filled_path_t
- fs = self.get_fillstyle()
- self._transform.rotate_deg(
- {'top': 0, 'left': 90, 'bottom': 180, 'right': 270}[fs])
- self._alt_transform = self._transform.frozen().rotate_deg(180)
-
- _x_filled_path = Path._create_closed(np.array([
- (-1, -2), (0, -1), (+1, -2), (+2, -1), (+1, 0), (+2, +1),
- (+1, +2), (0, +1), (-1, +2), (-2, +1), (-1, 0), (-2, -1)]) / 4)
- _x_filled_path_t = Path._create_closed(np.array([
- (+1, 0), (+2, +1), (+1, +2), (0, +1),
- (-1, +2), (-2, +1), (-1, 0)]) / 4)
-
- def _set_x_filled(self):
- self._transform = Affine2D()
- self._snap_threshold = 5.0
- self._joinstyle = self._user_joinstyle or JoinStyle.miter
- if not self._half_fill():
- self._path = self._x_filled_path
- else:
- # Rotate top half path to support all partitions
- self._path = self._alt_path = self._x_filled_path_t
- fs = self.get_fillstyle()
- self._transform.rotate_deg(
- {'top': 0, 'left': 90, 'bottom': 180, 'right': 270}[fs])
- self._alt_transform = self._transform.frozen().rotate_deg(180)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/lib/scimath.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/lib/scimath.py
deleted file mode 100644
index b7ef0d7109c63cffc7c30f59d97389a4a4a230f7..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/lib/scimath.py
+++ /dev/null
@@ -1,625 +0,0 @@
-"""
-Wrapper functions to more user-friendly calling of certain math functions
-whose output data-type is different than the input data-type in certain
-domains of the input.
-
-For example, for functions like `log` with branch cuts, the versions in this
-module provide the mathematically valid answers in the complex plane::
-
- >>> import math
- >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi)
- True
-
-Similarly, `sqrt`, other base logarithms, `power` and trig functions are
-correctly handled. See their respective docstrings for specific examples.
-
-Functions
----------
-
-.. autosummary::
- :toctree: generated/
-
- sqrt
- log
- log2
- logn
- log10
- power
- arccos
- arcsin
- arctanh
-
-"""
-import numpy.core.numeric as nx
-import numpy.core.numerictypes as nt
-from numpy.core.numeric import asarray, any
-from numpy.core.overrides import array_function_dispatch
-from numpy.lib.type_check import isreal
-
-
-__all__ = [
- 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
- 'arctanh'
- ]
-
-
-_ln2 = nx.log(2.0)
-
-
-def _tocomplex(arr):
- """Convert its input `arr` to a complex array.
-
- The input is returned as a complex array of the smallest type that will fit
- the original data: types like single, byte, short, etc. become csingle,
- while others become cdouble.
-
- A copy of the input is always made.
-
- Parameters
- ----------
- arr : array
-
- Returns
- -------
- array
- An array with the same input data as the input but in complex form.
-
- Examples
- --------
-
- First, consider an input of type short:
-
- >>> a = np.array([1,2,3],np.short)
-
- >>> ac = np.lib.scimath._tocomplex(a); ac
- array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
-
- >>> ac.dtype
- dtype('complex64')
-
- If the input is of type double, the output is correspondingly of the
- complex double type as well:
-
- >>> b = np.array([1,2,3],np.double)
-
- >>> bc = np.lib.scimath._tocomplex(b); bc
- array([1.+0.j, 2.+0.j, 3.+0.j])
-
- >>> bc.dtype
- dtype('complex128')
-
- Note that even if the input was complex to begin with, a copy is still
- made, since the astype() method always copies:
-
- >>> c = np.array([1,2,3],np.csingle)
-
- >>> cc = np.lib.scimath._tocomplex(c); cc
- array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
-
- >>> c *= 2; c
- array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
-
- >>> cc
- array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
- """
- if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
- nt.ushort, nt.csingle)):
- return arr.astype(nt.csingle)
- else:
- return arr.astype(nt.cdouble)
-
-
-def _fix_real_lt_zero(x):
- """Convert `x` to complex if it has real, negative components.
-
- Otherwise, output is just the array version of the input (via asarray).
-
- Parameters
- ----------
- x : array_like
-
- Returns
- -------
- array
-
- Examples
- --------
- >>> np.lib.scimath._fix_real_lt_zero([1,2])
- array([1, 2])
-
- >>> np.lib.scimath._fix_real_lt_zero([-1,2])
- array([-1.+0.j, 2.+0.j])
-
- """
- x = asarray(x)
- if any(isreal(x) & (x < 0)):
- x = _tocomplex(x)
- return x
-
-
-def _fix_int_lt_zero(x):
- """Convert `x` to double if it has real, negative components.
-
- Otherwise, output is just the array version of the input (via asarray).
-
- Parameters
- ----------
- x : array_like
-
- Returns
- -------
- array
-
- Examples
- --------
- >>> np.lib.scimath._fix_int_lt_zero([1,2])
- array([1, 2])
-
- >>> np.lib.scimath._fix_int_lt_zero([-1,2])
- array([-1., 2.])
- """
- x = asarray(x)
- if any(isreal(x) & (x < 0)):
- x = x * 1.0
- return x
-
-
-def _fix_real_abs_gt_1(x):
- """Convert `x` to complex if it has real components x_i with abs(x_i)>1.
-
- Otherwise, output is just the array version of the input (via asarray).
-
- Parameters
- ----------
- x : array_like
-
- Returns
- -------
- array
-
- Examples
- --------
- >>> np.lib.scimath._fix_real_abs_gt_1([0,1])
- array([0, 1])
-
- >>> np.lib.scimath._fix_real_abs_gt_1([0,2])
- array([0.+0.j, 2.+0.j])
- """
- x = asarray(x)
- if any(isreal(x) & (abs(x) > 1)):
- x = _tocomplex(x)
- return x
-
-
-def _unary_dispatcher(x):
- return (x,)
-
-
-@array_function_dispatch(_unary_dispatcher)
-def sqrt(x):
- """
- Compute the square root of x.
-
- For negative input elements, a complex value is returned
- (unlike `numpy.sqrt` which returns NaN).
-
- Parameters
- ----------
- x : array_like
- The input value(s).
-
- Returns
- -------
- out : ndarray or scalar
- The square root of `x`. If `x` was a scalar, so is `out`,
- otherwise an array is returned.
-
- See Also
- --------
- numpy.sqrt
-
- Examples
- --------
- For real, non-negative inputs this works just like `numpy.sqrt`:
-
- >>> np.emath.sqrt(1)
- 1.0
- >>> np.emath.sqrt([1, 4])
- array([1., 2.])
-
- But it automatically handles negative inputs:
-
- >>> np.emath.sqrt(-1)
- 1j
- >>> np.emath.sqrt([-1,4])
- array([0.+1.j, 2.+0.j])
-
- Different results are expected because:
- floating point 0.0 and -0.0 are distinct.
-
- For more control, explicitly use complex() as follows:
-
- >>> np.emath.sqrt(complex(-4.0, 0.0))
- 2j
- >>> np.emath.sqrt(complex(-4.0, -0.0))
- -2j
- """
- x = _fix_real_lt_zero(x)
- return nx.sqrt(x)
-
-
-@array_function_dispatch(_unary_dispatcher)
-def log(x):
- """
- Compute the natural logarithm of `x`.
-
- Return the "principal value" (for a description of this, see `numpy.log`)
- of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``
- returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the
- complex principle value is returned.
-
- Parameters
- ----------
- x : array_like
- The value(s) whose log is (are) required.
-
- Returns
- -------
- out : ndarray or scalar
- The log of the `x` value(s). If `x` was a scalar, so is `out`,
- otherwise an array is returned.
-
- See Also
- --------
- numpy.log
-
- Notes
- -----
- For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`
- (note, however, that otherwise `numpy.log` and this `log` are identical,
- i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,
- notably, the complex principle value if ``x.imag != 0``).
-
- Examples
- --------
- >>> np.emath.log(np.exp(1))
- 1.0
-
- Negative arguments are handled "correctly" (recall that
- ``exp(log(x)) == x`` does *not* hold for real ``x < 0``):
-
- >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)
- True
-
- """
- x = _fix_real_lt_zero(x)
- return nx.log(x)
-
-
-@array_function_dispatch(_unary_dispatcher)
-def log10(x):
- """
- Compute the logarithm base 10 of `x`.
-
- Return the "principal value" (for a description of this, see
- `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this
- is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``
- returns ``inf``). Otherwise, the complex principle value is returned.
-
- Parameters
- ----------
- x : array_like or scalar
- The value(s) whose log base 10 is (are) required.
-
- Returns
- -------
- out : ndarray or scalar
- The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,
- otherwise an array object is returned.
-
- See Also
- --------
- numpy.log10
-
- Notes
- -----
- For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`
- (note, however, that otherwise `numpy.log10` and this `log10` are
- identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
- and, notably, the complex principle value if ``x.imag != 0``).
-
- Examples
- --------
-
- (We set the printing precision so the example can be auto-tested)
-
- >>> np.set_printoptions(precision=4)
-
- >>> np.emath.log10(10**1)
- 1.0
-
- >>> np.emath.log10([-10**1, -10**2, 10**2])
- array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])
-
- """
- x = _fix_real_lt_zero(x)
- return nx.log10(x)
-
-
-def _logn_dispatcher(n, x):
- return (n, x,)
-
-
-@array_function_dispatch(_logn_dispatcher)
-def logn(n, x):
- """
- Take log base n of x.
-
- If `x` contains negative inputs, the answer is computed and returned in the
- complex domain.
-
- Parameters
- ----------
- n : array_like
- The integer base(s) in which the log is taken.
- x : array_like
- The value(s) whose log base `n` is (are) required.
-
- Returns
- -------
- out : ndarray or scalar
- The log base `n` of the `x` value(s). If `x` was a scalar, so is
- `out`, otherwise an array is returned.
-
- Examples
- --------
- >>> np.set_printoptions(precision=4)
-
- >>> np.emath.logn(2, [4, 8])
- array([2., 3.])
- >>> np.emath.logn(2, [-4, -8, 8])
- array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
-
- """
- x = _fix_real_lt_zero(x)
- n = _fix_real_lt_zero(n)
- return nx.log(x)/nx.log(n)
-
-
-@array_function_dispatch(_unary_dispatcher)
-def log2(x):
- """
- Compute the logarithm base 2 of `x`.
-
- Return the "principal value" (for a description of this, see
- `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is
- a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns
- ``inf``). Otherwise, the complex principle value is returned.
-
- Parameters
- ----------
- x : array_like
- The value(s) whose log base 2 is (are) required.
-
- Returns
- -------
- out : ndarray or scalar
- The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,
- otherwise an array is returned.
-
- See Also
- --------
- numpy.log2
-
- Notes
- -----
- For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
- (note, however, that otherwise `numpy.log2` and this `log2` are
- identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
- and, notably, the complex principle value if ``x.imag != 0``).
-
- Examples
- --------
- We set the printing precision so the example can be auto-tested:
-
- >>> np.set_printoptions(precision=4)
-
- >>> np.emath.log2(8)
- 3.0
- >>> np.emath.log2([-4, -8, 8])
- array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
-
- """
- x = _fix_real_lt_zero(x)
- return nx.log2(x)
-
-
-def _power_dispatcher(x, p):
- return (x, p)
-
-
-@array_function_dispatch(_power_dispatcher)
-def power(x, p):
- """
- Return x to the power p, (x**p).
-
- If `x` contains negative values, the output is converted to the
- complex domain.
-
- Parameters
- ----------
- x : array_like
- The input value(s).
- p : array_like of ints
- The power(s) to which `x` is raised. If `x` contains multiple values,
- `p` has to either be a scalar, or contain the same number of values
- as `x`. In the latter case, the result is
- ``x[0]**p[0], x[1]**p[1], ...``.
-
- Returns
- -------
- out : ndarray or scalar
- The result of ``x**p``. If `x` and `p` are scalars, so is `out`,
- otherwise an array is returned.
-
- See Also
- --------
- numpy.power
-
- Examples
- --------
- >>> np.set_printoptions(precision=4)
-
- >>> np.emath.power([2, 4], 2)
- array([ 4, 16])
- >>> np.emath.power([2, 4], -2)
- array([0.25 , 0.0625])
- >>> np.emath.power([-2, 4], 2)
- array([ 4.-0.j, 16.+0.j])
-
- """
- x = _fix_real_lt_zero(x)
- p = _fix_int_lt_zero(p)
- return nx.power(x, p)
-
-
-@array_function_dispatch(_unary_dispatcher)
-def arccos(x):
- """
- Compute the inverse cosine of x.
-
- Return the "principal value" (for a description of this, see
- `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
- `abs(x) <= 1`, this is a real number in the closed interval
- :math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
-
- Parameters
- ----------
- x : array_like or scalar
- The value(s) whose arccos is (are) required.
-
- Returns
- -------
- out : ndarray or scalar
- The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
- is `out`, otherwise an array object is returned.
-
- See Also
- --------
- numpy.arccos
-
- Notes
- -----
- For an arccos() that returns ``NAN`` when real `x` is not in the
- interval ``[-1,1]``, use `numpy.arccos`.
-
- Examples
- --------
- >>> np.set_printoptions(precision=4)
-
- >>> np.emath.arccos(1) # a scalar is returned
- 0.0
-
- >>> np.emath.arccos([1,2])
- array([0.-0.j , 0.-1.317j])
-
- """
- x = _fix_real_abs_gt_1(x)
- return nx.arccos(x)
-
-
-@array_function_dispatch(_unary_dispatcher)
-def arcsin(x):
- """
- Compute the inverse sine of x.
-
- Return the "principal value" (for a description of this, see
- `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
- `abs(x) <= 1`, this is a real number in the closed interval
- :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
- returned.
-
- Parameters
- ----------
- x : array_like or scalar
- The value(s) whose arcsin is (are) required.
-
- Returns
- -------
- out : ndarray or scalar
- The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
- is `out`, otherwise an array object is returned.
-
- See Also
- --------
- numpy.arcsin
-
- Notes
- -----
- For an arcsin() that returns ``NAN`` when real `x` is not in the
- interval ``[-1,1]``, use `numpy.arcsin`.
-
- Examples
- --------
- >>> np.set_printoptions(precision=4)
-
- >>> np.emath.arcsin(0)
- 0.0
-
- >>> np.emath.arcsin([0,1])
- array([0. , 1.5708])
-
- """
- x = _fix_real_abs_gt_1(x)
- return nx.arcsin(x)
-
-
-@array_function_dispatch(_unary_dispatcher)
-def arctanh(x):
- """
- Compute the inverse hyperbolic tangent of `x`.
-
- Return the "principal value" (for a description of this, see
- `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
- ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is
- complex, the result is complex. Finally, `x = 1` returns``inf`` and
- ``x=-1`` returns ``-inf``.
-
- Parameters
- ----------
- x : array_like
- The value(s) whose arctanh is (are) required.
-
- Returns
- -------
- out : ndarray or scalar
- The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
- a scalar so is `out`, otherwise an array is returned.
-
-
- See Also
- --------
- numpy.arctanh
-
- Notes
- -----
- For an arctanh() that returns ``NAN`` when real `x` is not in the
- interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
- return +/-inf for ``x = +/-1``).
-
- Examples
- --------
- >>> np.set_printoptions(precision=4)
-
- >>> from numpy.testing import suppress_warnings
- >>> with suppress_warnings() as sup:
- ... sup.filter(RuntimeWarning)
- ... np.emath.arctanh(np.eye(2))
- array([[inf, 0.],
- [ 0., inf]])
- >>> np.emath.arctanh([1j])
- array([0.+0.7854j])
-
- """
- x = _fix_real_abs_gt_1(x)
- return nx.arctanh(x)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/interval.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/interval.py
deleted file mode 100644
index e8b3676e71ae0de4946ec57bd2ea0a62012f95e9..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/interval.py
+++ /dev/null
@@ -1,1154 +0,0 @@
-""" define the IntervalIndex """
-from __future__ import annotations
-
-from operator import (
- le,
- lt,
-)
-import textwrap
-from typing import (
- TYPE_CHECKING,
- Any,
- Literal,
-)
-
-import numpy as np
-
-from pandas._libs import lib
-from pandas._libs.interval import (
- Interval,
- IntervalMixin,
- IntervalTree,
-)
-from pandas._libs.tslibs import (
- BaseOffset,
- Timedelta,
- Timestamp,
- to_offset,
-)
-from pandas.errors import InvalidIndexError
-from pandas.util._decorators import (
- Appender,
- cache_readonly,
-)
-from pandas.util._exceptions import rewrite_exception
-
-from pandas.core.dtypes.cast import (
- find_common_type,
- infer_dtype_from_scalar,
- maybe_box_datetimelike,
- maybe_downcast_numeric,
- maybe_upcast_numeric_to_64bit,
-)
-from pandas.core.dtypes.common import (
- ensure_platform_int,
- is_float,
- is_float_dtype,
- is_integer,
- is_integer_dtype,
- is_list_like,
- is_number,
- is_object_dtype,
- is_scalar,
- pandas_dtype,
-)
-from pandas.core.dtypes.dtypes import (
- DatetimeTZDtype,
- IntervalDtype,
-)
-from pandas.core.dtypes.missing import is_valid_na_for_dtype
-
-from pandas.core.algorithms import unique
-from pandas.core.arrays.interval import (
- IntervalArray,
- _interval_shared_docs,
-)
-import pandas.core.common as com
-from pandas.core.indexers import is_valid_positional_slice
-import pandas.core.indexes.base as ibase
-from pandas.core.indexes.base import (
- Index,
- _index_shared_docs,
- ensure_index,
- maybe_extract_name,
-)
-from pandas.core.indexes.datetimes import (
- DatetimeIndex,
- date_range,
-)
-from pandas.core.indexes.extension import (
- ExtensionIndex,
- inherit_names,
-)
-from pandas.core.indexes.multi import MultiIndex
-from pandas.core.indexes.timedeltas import (
- TimedeltaIndex,
- timedelta_range,
-)
-
-if TYPE_CHECKING:
- from collections.abc import Hashable
-
- from pandas._typing import (
- Dtype,
- DtypeObj,
- IntervalClosedType,
- npt,
- )
-_index_doc_kwargs = dict(ibase._index_doc_kwargs)
-
-_index_doc_kwargs.update(
- {
- "klass": "IntervalIndex",
- "qualname": "IntervalIndex",
- "target_klass": "IntervalIndex or list of Intervals",
- "name": textwrap.dedent(
- """\
- name : object, optional
- Name to be stored in the index.
- """
- ),
- }
-)
-
-
-def _get_next_label(label):
- # see test_slice_locs_with_ints_and_floats_succeeds
- dtype = getattr(label, "dtype", type(label))
- if isinstance(label, (Timestamp, Timedelta)):
- dtype = "datetime64[ns]"
- dtype = pandas_dtype(dtype)
-
- if lib.is_np_dtype(dtype, "mM") or isinstance(dtype, DatetimeTZDtype):
- return label + np.timedelta64(1, "ns")
- elif is_integer_dtype(dtype):
- return label + 1
- elif is_float_dtype(dtype):
- return np.nextafter(label, np.inf)
- else:
- raise TypeError(f"cannot determine next label for type {repr(type(label))}")
-
-
-def _get_prev_label(label):
- # see test_slice_locs_with_ints_and_floats_succeeds
- dtype = getattr(label, "dtype", type(label))
- if isinstance(label, (Timestamp, Timedelta)):
- dtype = "datetime64[ns]"
- dtype = pandas_dtype(dtype)
-
- if lib.is_np_dtype(dtype, "mM") or isinstance(dtype, DatetimeTZDtype):
- return label - np.timedelta64(1, "ns")
- elif is_integer_dtype(dtype):
- return label - 1
- elif is_float_dtype(dtype):
- return np.nextafter(label, -np.inf)
- else:
- raise TypeError(f"cannot determine next label for type {repr(type(label))}")
-
-
-def _new_IntervalIndex(cls, d):
- """
- This is called upon unpickling, rather than the default which doesn't have
- arguments and breaks __new__.
- """
- return cls.from_arrays(**d)
-
-
-@Appender(
- _interval_shared_docs["class"]
- % {
- "klass": "IntervalIndex",
- "summary": "Immutable index of intervals that are closed on the same side.",
- "name": _index_doc_kwargs["name"],
- "extra_attributes": "is_overlapping\nvalues\n",
- "extra_methods": "",
- "examples": textwrap.dedent(
- """\
- Examples
- --------
- A new ``IntervalIndex`` is typically constructed using
- :func:`interval_range`:
-
- >>> pd.interval_range(start=0, end=5)
- IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
- dtype='interval[int64, right]')
-
- It may also be constructed using one of the constructor
- methods: :meth:`IntervalIndex.from_arrays`,
- :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
-
- See further examples in the doc strings of ``interval_range`` and the
- mentioned constructor methods.
- """
- ),
- }
-)
-@inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True)
-@inherit_names(
- [
- "__array__",
- "overlaps",
- "contains",
- "closed_left",
- "closed_right",
- "open_left",
- "open_right",
- "is_empty",
- ],
- IntervalArray,
-)
-@inherit_names(["is_non_overlapping_monotonic", "closed"], IntervalArray, cache=True)
-class IntervalIndex(ExtensionIndex):
- _typ = "intervalindex"
-
- # annotate properties pinned via inherit_names
- closed: IntervalClosedType
- is_non_overlapping_monotonic: bool
- closed_left: bool
- closed_right: bool
- open_left: bool
- open_right: bool
-
- _data: IntervalArray
- _values: IntervalArray
- _can_hold_strings = False
- _data_cls = IntervalArray
-
- # --------------------------------------------------------------------
- # Constructors
-
- def __new__(
- cls,
- data,
- closed: IntervalClosedType | None = None,
- dtype: Dtype | None = None,
- copy: bool = False,
- name: Hashable | None = None,
- verify_integrity: bool = True,
- ) -> IntervalIndex:
- name = maybe_extract_name(name, data, cls)
-
- with rewrite_exception("IntervalArray", cls.__name__):
- array = IntervalArray(
- data,
- closed=closed,
- copy=copy,
- dtype=dtype,
- verify_integrity=verify_integrity,
- )
-
- return cls._simple_new(array, name)
-
- @classmethod
- @Appender(
- _interval_shared_docs["from_breaks"]
- % {
- "klass": "IntervalIndex",
- "name": textwrap.dedent(
- """
- name : str, optional
- Name of the resulting IntervalIndex."""
- ),
- "examples": textwrap.dedent(
- """\
- Examples
- --------
- >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])
- IntervalIndex([(0, 1], (1, 2], (2, 3]],
- dtype='interval[int64, right]')
- """
- ),
- }
- )
- def from_breaks(
- cls,
- breaks,
- closed: IntervalClosedType | None = "right",
- name: Hashable | None = None,
- copy: bool = False,
- dtype: Dtype | None = None,
- ) -> IntervalIndex:
- with rewrite_exception("IntervalArray", cls.__name__):
- array = IntervalArray.from_breaks(
- breaks, closed=closed, copy=copy, dtype=dtype
- )
- return cls._simple_new(array, name=name)
-
- @classmethod
- @Appender(
- _interval_shared_docs["from_arrays"]
- % {
- "klass": "IntervalIndex",
- "name": textwrap.dedent(
- """
- name : str, optional
- Name of the resulting IntervalIndex."""
- ),
- "examples": textwrap.dedent(
- """\
- Examples
- --------
- >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])
- IntervalIndex([(0, 1], (1, 2], (2, 3]],
- dtype='interval[int64, right]')
- """
- ),
- }
- )
- def from_arrays(
- cls,
- left,
- right,
- closed: IntervalClosedType = "right",
- name: Hashable | None = None,
- copy: bool = False,
- dtype: Dtype | None = None,
- ) -> IntervalIndex:
- with rewrite_exception("IntervalArray", cls.__name__):
- array = IntervalArray.from_arrays(
- left, right, closed, copy=copy, dtype=dtype
- )
- return cls._simple_new(array, name=name)
-
- @classmethod
- @Appender(
- _interval_shared_docs["from_tuples"]
- % {
- "klass": "IntervalIndex",
- "name": textwrap.dedent(
- """
- name : str, optional
- Name of the resulting IntervalIndex."""
- ),
- "examples": textwrap.dedent(
- """\
- Examples
- --------
- >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
- IntervalIndex([(0, 1], (1, 2]],
- dtype='interval[int64, right]')
- """
- ),
- }
- )
- def from_tuples(
- cls,
- data,
- closed: IntervalClosedType = "right",
- name: Hashable | None = None,
- copy: bool = False,
- dtype: Dtype | None = None,
- ) -> IntervalIndex:
- with rewrite_exception("IntervalArray", cls.__name__):
- arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
- return cls._simple_new(arr, name=name)
-
- # --------------------------------------------------------------------
- # error: Return type "IntervalTree" of "_engine" incompatible with return type
- # "Union[IndexEngine, ExtensionEngine]" in supertype "Index"
- @cache_readonly
- def _engine(self) -> IntervalTree: # type: ignore[override]
- # IntervalTree does not supports numpy array unless they are 64 bit
- left = self._maybe_convert_i8(self.left)
- left = maybe_upcast_numeric_to_64bit(left)
- right = self._maybe_convert_i8(self.right)
- right = maybe_upcast_numeric_to_64bit(right)
- return IntervalTree(left, right, closed=self.closed)
-
- def __contains__(self, key: Any) -> bool:
- """
- return a boolean if this key is IN the index
- We *only* accept an Interval
-
- Parameters
- ----------
- key : Interval
-
- Returns
- -------
- bool
- """
- hash(key)
- if not isinstance(key, Interval):
- if is_valid_na_for_dtype(key, self.dtype):
- return self.hasnans
- return False
-
- try:
- self.get_loc(key)
- return True
- except KeyError:
- return False
-
- def _getitem_slice(self, slobj: slice) -> IntervalIndex:
- """
- Fastpath for __getitem__ when we know we have a slice.
- """
- res = self._data[slobj]
- return type(self)._simple_new(res, name=self._name)
-
- @cache_readonly
- def _multiindex(self) -> MultiIndex:
- return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"])
-
- def __reduce__(self):
- d = {
- "left": self.left,
- "right": self.right,
- "closed": self.closed,
- "name": self.name,
- }
- return _new_IntervalIndex, (type(self), d), None
-
- @property
- def inferred_type(self) -> str:
- """Return a string of the type inferred from the values"""
- return "interval"
-
- # Cannot determine type of "memory_usage"
- @Appender(Index.memory_usage.__doc__) # type: ignore[has-type]
- def memory_usage(self, deep: bool = False) -> int:
- # we don't use an explicit engine
- # so return the bytes here
- return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)
-
- # IntervalTree doesn't have a is_monotonic_decreasing, so have to override
- # the Index implementation
- @cache_readonly
- def is_monotonic_decreasing(self) -> bool:
- """
- Return True if the IntervalIndex is monotonic decreasing (only equal or
- decreasing values), else False
- """
- return self[::-1].is_monotonic_increasing
-
- @cache_readonly
- def is_unique(self) -> bool:
- """
- Return True if the IntervalIndex contains unique elements, else False.
- """
- left = self.left
- right = self.right
-
- if self.isna().sum() > 1:
- return False
-
- if left.is_unique or right.is_unique:
- return True
-
- seen_pairs = set()
- check_idx = np.where(left.duplicated(keep=False))[0]
- for idx in check_idx:
- pair = (left[idx], right[idx])
- if pair in seen_pairs:
- return False
- seen_pairs.add(pair)
-
- return True
-
- @property
- def is_overlapping(self) -> bool:
- """
- Return True if the IntervalIndex has overlapping intervals, else False.
-
- Two intervals overlap if they share a common point, including closed
- endpoints. Intervals that only have an open endpoint in common do not
- overlap.
-
- Returns
- -------
- bool
- Boolean indicating if the IntervalIndex has overlapping intervals.
-
- See Also
- --------
- Interval.overlaps : Check whether two Interval objects overlap.
- IntervalIndex.overlaps : Check an IntervalIndex elementwise for
- overlaps.
-
- Examples
- --------
- >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])
- >>> index
- IntervalIndex([(0, 2], (1, 3], (4, 5]],
- dtype='interval[int64, right]')
- >>> index.is_overlapping
- True
-
- Intervals that share closed endpoints overlap:
-
- >>> index = pd.interval_range(0, 3, closed='both')
- >>> index
- IntervalIndex([[0, 1], [1, 2], [2, 3]],
- dtype='interval[int64, both]')
- >>> index.is_overlapping
- True
-
- Intervals that only have an open endpoint in common do not overlap:
-
- >>> index = pd.interval_range(0, 3, closed='left')
- >>> index
- IntervalIndex([[0, 1), [1, 2), [2, 3)],
- dtype='interval[int64, left]')
- >>> index.is_overlapping
- False
- """
- # GH 23309
- return self._engine.is_overlapping
-
- def _needs_i8_conversion(self, key) -> bool:
- """
- Check if a given key needs i8 conversion. Conversion is necessary for
- Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An
- Interval-like requires conversion if its endpoints are one of the
- aforementioned types.
-
- Assumes that any list-like data has already been cast to an Index.
-
- Parameters
- ----------
- key : scalar or Index-like
- The key that should be checked for i8 conversion
-
- Returns
- -------
- bool
- """
- key_dtype = getattr(key, "dtype", None)
- if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):
- return self._needs_i8_conversion(key.left)
-
- i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)
- return isinstance(key, i8_types)
-
- def _maybe_convert_i8(self, key):
- """
- Maybe convert a given key to its equivalent i8 value(s). Used as a
- preprocessing step prior to IntervalTree queries (self._engine), which
- expects numeric data.
-
- Parameters
- ----------
- key : scalar or list-like
- The key that should maybe be converted to i8.
-
- Returns
- -------
- scalar or list-like
- The original key if no conversion occurred, int if converted scalar,
- Index with an int64 dtype if converted list-like.
- """
- if is_list_like(key):
- key = ensure_index(key)
- key = maybe_upcast_numeric_to_64bit(key)
-
- if not self._needs_i8_conversion(key):
- return key
-
- scalar = is_scalar(key)
- key_dtype = getattr(key, "dtype", None)
- if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):
- # convert left/right and reconstruct
- left = self._maybe_convert_i8(key.left)
- right = self._maybe_convert_i8(key.right)
- constructor = Interval if scalar else IntervalIndex.from_arrays
- # error: "object" not callable
- return constructor(
- left, right, closed=self.closed
- ) # type: ignore[operator]
-
- if scalar:
- # Timestamp/Timedelta
- key_dtype, key_i8 = infer_dtype_from_scalar(key)
- if lib.is_period(key):
- key_i8 = key.ordinal
- elif isinstance(key_i8, Timestamp):
- key_i8 = key_i8._value
- elif isinstance(key_i8, (np.datetime64, np.timedelta64)):
- key_i8 = key_i8.view("i8")
- else:
- # DatetimeIndex/TimedeltaIndex
- key_dtype, key_i8 = key.dtype, Index(key.asi8)
- if key.hasnans:
- # convert NaT from its i8 value to np.nan so it's not viewed
- # as a valid value, maybe causing errors (e.g. is_overlapping)
- key_i8 = key_i8.where(~key._isnan)
-
- # ensure consistency with IntervalIndex subtype
- # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],
- # ExtensionDtype]" has no attribute "subtype"
- subtype = self.dtype.subtype # type: ignore[union-attr]
-
- if subtype != key_dtype:
- raise ValueError(
- f"Cannot index an IntervalIndex of subtype {subtype} with "
- f"values of dtype {key_dtype}"
- )
-
- return key_i8
-
- def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"):
- if not self.is_non_overlapping_monotonic:
- raise KeyError(
- "can only get slices from an IntervalIndex if bounds are "
- "non-overlapping and all monotonic increasing or decreasing"
- )
-
- if isinstance(label, (IntervalMixin, IntervalIndex)):
- raise NotImplementedError("Interval objects are not currently supported")
-
- # GH 20921: "not is_monotonic_increasing" for the second condition
- # instead of "is_monotonic_decreasing" to account for single element
- # indexes being both increasing and decreasing
- if (side == "left" and self.left.is_monotonic_increasing) or (
- side == "right" and not self.left.is_monotonic_increasing
- ):
- sub_idx = self.right
- if self.open_right:
- label = _get_next_label(label)
- else:
- sub_idx = self.left
- if self.open_left:
- label = _get_prev_label(label)
-
- return sub_idx._searchsorted_monotonic(label, side)
-
- # --------------------------------------------------------------------
- # Indexing Methods
-
- def get_loc(self, key) -> int | slice | np.ndarray:
- """
- Get integer location, slice or boolean mask for requested label.
-
- Parameters
- ----------
- key : label
-
- Returns
- -------
- int if unique index, slice if monotonic index, else mask
-
- Examples
- --------
- >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
- >>> index = pd.IntervalIndex([i1, i2])
- >>> index.get_loc(1)
- 0
-
- You can also supply a point inside an interval.
-
- >>> index.get_loc(1.5)
- 1
-
- If a label is in several intervals, you get the locations of all the
- relevant intervals.
-
- >>> i3 = pd.Interval(0, 2)
- >>> overlapping_index = pd.IntervalIndex([i1, i2, i3])
- >>> overlapping_index.get_loc(0.5)
- array([ True, False, True])
-
- Only exact matches will be returned if an interval is provided.
-
- >>> index.get_loc(pd.Interval(0, 1))
- 0
- """
- self._check_indexing_error(key)
-
- if isinstance(key, Interval):
- if self.closed != key.closed:
- raise KeyError(key)
- mask = (self.left == key.left) & (self.right == key.right)
- elif is_valid_na_for_dtype(key, self.dtype):
- mask = self.isna()
- else:
- # assume scalar
- op_left = le if self.closed_left else lt
- op_right = le if self.closed_right else lt
- try:
- mask = op_left(self.left, key) & op_right(key, self.right)
- except TypeError as err:
- # scalar is not comparable to II subtype --> invalid label
- raise KeyError(key) from err
-
- matches = mask.sum()
- if matches == 0:
- raise KeyError(key)
- if matches == 1:
- return mask.argmax()
-
- res = lib.maybe_booleans_to_slice(mask.view("u1"))
- if isinstance(res, slice) and res.stop is None:
- # TODO: DO this in maybe_booleans_to_slice?
- res = slice(res.start, len(self), res.step)
- return res
-
- def _get_indexer(
- self,
- target: Index,
- method: str | None = None,
- limit: int | None = None,
- tolerance: Any | None = None,
- ) -> npt.NDArray[np.intp]:
- if isinstance(target, IntervalIndex):
- # We only get here with not self.is_overlapping
- # -> at most one match per interval in target
- # want exact matches -> need both left/right to match, so defer to
- # left/right get_indexer, compare elementwise, equality -> match
- indexer = self._get_indexer_unique_sides(target)
-
- elif not is_object_dtype(target.dtype):
- # homogeneous scalar index: use IntervalTree
- # we should always have self._should_partial_index(target) here
- target = self._maybe_convert_i8(target)
- indexer = self._engine.get_indexer(target.values)
- else:
- # heterogeneous scalar index: defer elementwise to get_loc
- # we should always have self._should_partial_index(target) here
- return self._get_indexer_pointwise(target)[0]
-
- return ensure_platform_int(indexer)
-
- @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
- def get_indexer_non_unique(
- self, target: Index
- ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
- target = ensure_index(target)
-
- if not self._should_compare(target) and not self._should_partial_index(target):
- # e.g. IntervalIndex with different closed or incompatible subtype
- # -> no matches
- return self._get_indexer_non_comparable(target, None, unique=False)
-
- elif isinstance(target, IntervalIndex):
- if self.left.is_unique and self.right.is_unique:
- # fastpath available even if we don't have self._index_as_unique
- indexer = self._get_indexer_unique_sides(target)
- missing = (indexer == -1).nonzero()[0]
- else:
- return self._get_indexer_pointwise(target)
-
- elif is_object_dtype(target.dtype) or not self._should_partial_index(target):
- # target might contain intervals: defer elementwise to get_loc
- return self._get_indexer_pointwise(target)
-
- else:
- # Note: this case behaves differently from other Index subclasses
- # because IntervalIndex does partial-int indexing
- target = self._maybe_convert_i8(target)
- indexer, missing = self._engine.get_indexer_non_unique(target.values)
-
- return ensure_platform_int(indexer), ensure_platform_int(missing)
-
- def _get_indexer_unique_sides(self, target: IntervalIndex) -> npt.NDArray[np.intp]:
- """
- _get_indexer specialized to the case where both of our sides are unique.
- """
- # Caller is responsible for checking
- # `self.left.is_unique and self.right.is_unique`
-
- left_indexer = self.left.get_indexer(target.left)
- right_indexer = self.right.get_indexer(target.right)
- indexer = np.where(left_indexer == right_indexer, left_indexer, -1)
- return indexer
-
- def _get_indexer_pointwise(
- self, target: Index
- ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
- """
- pointwise implementation for get_indexer and get_indexer_non_unique.
- """
- indexer, missing = [], []
- for i, key in enumerate(target):
- try:
- locs = self.get_loc(key)
- if isinstance(locs, slice):
- # Only needed for get_indexer_non_unique
- locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp")
- elif lib.is_integer(locs):
- locs = np.array(locs, ndmin=1)
- else:
- # otherwise we have ndarray[bool]
- locs = np.where(locs)[0]
- except KeyError:
- missing.append(i)
- locs = np.array([-1])
- except InvalidIndexError:
- # i.e. non-scalar key e.g. a tuple.
- # see test_append_different_columns_types_raises
- missing.append(i)
- locs = np.array([-1])
-
- indexer.append(locs)
-
- indexer = np.concatenate(indexer)
- return ensure_platform_int(indexer), ensure_platform_int(missing)
-
- @cache_readonly
- def _index_as_unique(self) -> bool:
- return not self.is_overlapping and self._engine._na_count < 2
-
- _requires_unique_msg = (
- "cannot handle overlapping indices; use IntervalIndex.get_indexer_non_unique"
- )
-
- def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]):
- if not (key.step is None or key.step == 1):
- # GH#31658 if label-based, we require step == 1,
- # if positional, we disallow float start/stop
- msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
- if kind == "loc":
- raise ValueError(msg)
- if kind == "getitem":
- if not is_valid_positional_slice(key):
- # i.e. this cannot be interpreted as a positional slice
- raise ValueError(msg)
-
- return super()._convert_slice_indexer(key, kind)
-
- @cache_readonly
- def _should_fallback_to_positional(self) -> bool:
- # integer lookups in Series.__getitem__ are unambiguously
- # positional in this case
- # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],
- # ExtensionDtype]" has no attribute "subtype"
- return self.dtype.subtype.kind in "mM" # type: ignore[union-attr]
-
- def _maybe_cast_slice_bound(self, label, side: str):
- return getattr(self, side)._maybe_cast_slice_bound(label, side)
-
- def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
- if not isinstance(dtype, IntervalDtype):
- return False
- common_subtype = find_common_type([self.dtype, dtype])
- return not is_object_dtype(common_subtype)
-
- # --------------------------------------------------------------------
-
- @cache_readonly
- def left(self) -> Index:
- return Index(self._data.left, copy=False)
-
- @cache_readonly
- def right(self) -> Index:
- return Index(self._data.right, copy=False)
-
- @cache_readonly
- def mid(self) -> Index:
- return Index(self._data.mid, copy=False)
-
- @property
- def length(self) -> Index:
- return Index(self._data.length, copy=False)
-
- # --------------------------------------------------------------------
- # Rendering Methods
- # __repr__ associated methods are based on MultiIndex
-
- def _format_with_header(self, header: list[str], na_rep: str) -> list[str]:
- # matches base class except for whitespace padding
- return header + list(self._format_native_types(na_rep=na_rep))
-
- def _format_native_types(
- self, *, na_rep: str = "NaN", quoting=None, **kwargs
- ) -> npt.NDArray[np.object_]:
- # GH 28210: use base method but with different default na_rep
- return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs)
-
- def _format_data(self, name=None) -> str:
- # TODO: integrate with categorical and make generic
- # name argument is unused here; just for compat with base / categorical
- return f"{self._data._format_data()},{self._format_space()}"
-
- # --------------------------------------------------------------------
- # Set Operations
-
- def _intersection(self, other, sort):
- """
- intersection specialized to the case with matching dtypes.
- """
- # For IntervalIndex we also know other.closed == self.closed
- if self.left.is_unique and self.right.is_unique:
- taken = self._intersection_unique(other)
- elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:
- # Swap other/self if other is unique and self does not have
- # multiple NaNs
- taken = other._intersection_unique(self)
- else:
- # duplicates
- taken = self._intersection_non_unique(other)
-
- if sort is None:
- taken = taken.sort_values()
-
- return taken
-
- def _intersection_unique(self, other: IntervalIndex) -> IntervalIndex:
- """
- Used when the IntervalIndex does not have any common endpoint,
- no matter left or right.
- Return the intersection with another IntervalIndex.
- Parameters
- ----------
- other : IntervalIndex
- Returns
- -------
- IntervalIndex
- """
- # Note: this is much more performant than super()._intersection(other)
- lindexer = self.left.get_indexer(other.left)
- rindexer = self.right.get_indexer(other.right)
-
- match = (lindexer == rindexer) & (lindexer != -1)
- indexer = lindexer.take(match.nonzero()[0])
- indexer = unique(indexer)
-
- return self.take(indexer)
-
- def _intersection_non_unique(self, other: IntervalIndex) -> IntervalIndex:
- """
- Used when the IntervalIndex does have some common endpoints,
- on either sides.
- Return the intersection with another IntervalIndex.
-
- Parameters
- ----------
- other : IntervalIndex
-
- Returns
- -------
- IntervalIndex
- """
- # Note: this is about 3.25x faster than super()._intersection(other)
- # in IntervalIndexMethod.time_intersection_both_duplicate(1000)
- mask = np.zeros(len(self), dtype=bool)
-
- if self.hasnans and other.hasnans:
- first_nan_loc = np.arange(len(self))[self.isna()][0]
- mask[first_nan_loc] = True
-
- other_tups = set(zip(other.left, other.right))
- for i, tup in enumerate(zip(self.left, self.right)):
- if tup in other_tups:
- mask[i] = True
-
- return self[mask]
-
- # --------------------------------------------------------------------
-
- def _get_engine_target(self) -> np.ndarray:
- # Note: we _could_ use libjoin functions by either casting to object
- # dtype or constructing tuples (faster than constructing Intervals)
- # but the libjoin fastpaths are no longer fast in these cases.
- raise NotImplementedError(
- "IntervalIndex does not use libjoin fastpaths or pass values to "
- "IndexEngine objects"
- )
-
- def _from_join_target(self, result):
- raise NotImplementedError("IntervalIndex does not use libjoin fastpaths")
-
- # TODO: arithmetic operations
-
-
-def _is_valid_endpoint(endpoint) -> bool:
- """
- Helper for interval_range to check if start/end are valid types.
- """
- return any(
- [
- is_number(endpoint),
- isinstance(endpoint, Timestamp),
- isinstance(endpoint, Timedelta),
- endpoint is None,
- ]
- )
-
-
-def _is_type_compatible(a, b) -> bool:
- """
- Helper for interval_range to check type compat of start/end/freq.
- """
- is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset))
- is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset))
- return (
- (is_number(a) and is_number(b))
- or (is_ts_compat(a) and is_ts_compat(b))
- or (is_td_compat(a) and is_td_compat(b))
- or com.any_none(a, b)
- )
-
-
-def interval_range(
- start=None,
- end=None,
- periods=None,
- freq=None,
- name: Hashable | None = None,
- closed: IntervalClosedType = "right",
-) -> IntervalIndex:
- """
- Return a fixed frequency IntervalIndex.
-
- Parameters
- ----------
- start : numeric or datetime-like, default None
- Left bound for generating intervals.
- end : numeric or datetime-like, default None
- Right bound for generating intervals.
- periods : int, default None
- Number of periods to generate.
- freq : numeric, str, Timedelta, datetime.timedelta, or DateOffset, default None
- The length of each interval. Must be consistent with the type of start
- and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
- for numeric and 'D' for datetime-like.
- name : str, default None
- Name of the resulting IntervalIndex.
- closed : {'left', 'right', 'both', 'neither'}, default 'right'
- Whether the intervals are closed on the left-side, right-side, both
- or neither.
-
- Returns
- -------
- IntervalIndex
-
- See Also
- --------
- IntervalIndex : An Index of intervals that are all closed on the same side.
-
- Notes
- -----
- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
- exactly three must be specified. If ``freq`` is omitted, the resulting
- ``IntervalIndex`` will have ``periods`` linearly spaced elements between
- ``start`` and ``end``, inclusively.
-
- To learn more about datetime-like frequency strings, please see `this link
- `__.
-
- Examples
- --------
- Numeric ``start`` and ``end`` is supported.
-
- >>> pd.interval_range(start=0, end=5)
- IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
- dtype='interval[int64, right]')
-
- Additionally, datetime-like input is also supported.
-
- >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
- ... end=pd.Timestamp('2017-01-04'))
- IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
- (2017-01-03, 2017-01-04]],
- dtype='interval[datetime64[ns], right]')
-
- The ``freq`` parameter specifies the frequency between the left and right.
- endpoints of the individual intervals within the ``IntervalIndex``. For
- numeric ``start`` and ``end``, the frequency must also be numeric.
-
- >>> pd.interval_range(start=0, periods=4, freq=1.5)
- IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
- dtype='interval[float64, right]')
-
- Similarly, for datetime-like ``start`` and ``end``, the frequency must be
- convertible to a DateOffset.
-
- >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
- ... periods=3, freq='MS')
- IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
- (2017-03-01, 2017-04-01]],
- dtype='interval[datetime64[ns], right]')
-
- Specify ``start``, ``end``, and ``periods``; the frequency is generated
- automatically (linearly spaced).
-
- >>> pd.interval_range(start=0, end=6, periods=4)
- IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
- dtype='interval[float64, right]')
-
- The ``closed`` parameter specifies which endpoints of the individual
- intervals within the ``IntervalIndex`` are closed.
-
- >>> pd.interval_range(end=5, periods=4, closed='both')
- IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
- dtype='interval[int64, both]')
- """
- start = maybe_box_datetimelike(start)
- end = maybe_box_datetimelike(end)
- endpoint = start if start is not None else end
-
- if freq is None and com.any_none(periods, start, end):
- freq = 1 if is_number(endpoint) else "D"
-
- if com.count_not_none(start, end, periods, freq) != 3:
- raise ValueError(
- "Of the four parameters: start, end, periods, and "
- "freq, exactly three must be specified"
- )
-
- if not _is_valid_endpoint(start):
- raise ValueError(f"start must be numeric or datetime-like, got {start}")
- if not _is_valid_endpoint(end):
- raise ValueError(f"end must be numeric or datetime-like, got {end}")
-
- if is_float(periods):
- periods = int(periods)
- elif not is_integer(periods) and periods is not None:
- raise TypeError(f"periods must be a number, got {periods}")
-
- if freq is not None and not is_number(freq):
- try:
- freq = to_offset(freq)
- except ValueError as err:
- raise ValueError(
- f"freq must be numeric or convertible to DateOffset, got {freq}"
- ) from err
-
- # verify type compatibility
- if not all(
- [
- _is_type_compatible(start, end),
- _is_type_compatible(start, freq),
- _is_type_compatible(end, freq),
- ]
- ):
- raise TypeError("start, end, freq need to be type compatible")
-
- # +1 to convert interval count to breaks count (n breaks = n-1 intervals)
- if periods is not None:
- periods += 1
-
- breaks: np.ndarray | TimedeltaIndex | DatetimeIndex
-
- if is_number(endpoint):
- if com.all_not_none(start, end, freq):
- # 0.1 ensures we capture end
- breaks = np.arange(start, end + (freq * 0.1), freq)
- else:
- # compute the period/start/end if unspecified (at most one)
- if periods is None:
- periods = int((end - start) // freq) + 1
- elif start is None:
- start = end - (periods - 1) * freq
- elif end is None:
- end = start + (periods - 1) * freq
-
- breaks = np.linspace(start, end, periods)
- if all(is_integer(x) for x in com.not_none(start, end, freq)):
- # np.linspace always produces float output
-
- # error: Argument 1 to "maybe_downcast_numeric" has incompatible type
- # "Union[ndarray[Any, Any], TimedeltaIndex, DatetimeIndex]";
- # expected "ndarray[Any, Any]" [
- breaks = maybe_downcast_numeric(
- breaks, # type: ignore[arg-type]
- np.dtype("int64"),
- )
- else:
- # delegate to the appropriate range function
- if isinstance(endpoint, Timestamp):
- breaks = date_range(start=start, end=end, periods=periods, freq=freq)
- else:
- breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq)
-
- return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/test_numpy_compat.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/test_numpy_compat.py
deleted file mode 100644
index ace78d77350cbdc4ca3aa837720767a965443051..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/test_numpy_compat.py
+++ /dev/null
@@ -1,189 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas import (
- CategoricalIndex,
- DatetimeIndex,
- Index,
- PeriodIndex,
- TimedeltaIndex,
- isna,
-)
-import pandas._testing as tm
-from pandas.api.types import (
- is_complex_dtype,
- is_numeric_dtype,
-)
-from pandas.core.arrays import BooleanArray
-from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
-
-
-def test_numpy_ufuncs_out(index):
- result = index == index
-
- out = np.empty(index.shape, dtype=bool)
- np.equal(index, index, out=out)
- tm.assert_numpy_array_equal(out, result)
-
- if not index._is_multi:
- # same thing on the ExtensionArray
- out = np.empty(index.shape, dtype=bool)
- np.equal(index.array, index.array, out=out)
- tm.assert_numpy_array_equal(out, result)
-
-
-@pytest.mark.parametrize(
- "func",
- [
- np.exp,
- np.exp2,
- np.expm1,
- np.log,
- np.log2,
- np.log10,
- np.log1p,
- np.sqrt,
- np.sin,
- np.cos,
- np.tan,
- np.arcsin,
- np.arccos,
- np.arctan,
- np.sinh,
- np.cosh,
- np.tanh,
- np.arcsinh,
- np.arccosh,
- np.arctanh,
- np.deg2rad,
- np.rad2deg,
- ],
- ids=lambda x: x.__name__,
-)
-def test_numpy_ufuncs_basic(index, func):
- # test ufuncs of numpy, see:
- # https://numpy.org/doc/stable/reference/ufuncs.html
-
- if isinstance(index, DatetimeIndexOpsMixin):
- with tm.external_error_raised((TypeError, AttributeError)):
- with np.errstate(all="ignore"):
- func(index)
- elif is_numeric_dtype(index) and not (
- is_complex_dtype(index) and func in [np.deg2rad, np.rad2deg]
- ):
- # coerces to float (e.g. np.sin)
- with np.errstate(all="ignore"):
- result = func(index)
- arr_result = func(index.values)
- if arr_result.dtype == np.float16:
- arr_result = arr_result.astype(np.float32)
- exp = Index(arr_result, name=index.name)
-
- tm.assert_index_equal(result, exp)
- if isinstance(index.dtype, np.dtype) and is_numeric_dtype(index):
- if is_complex_dtype(index):
- assert result.dtype == index.dtype
- elif index.dtype in ["bool", "int8", "uint8"]:
- assert result.dtype in ["float16", "float32"]
- elif index.dtype in ["int16", "uint16", "float32"]:
- assert result.dtype == "float32"
- else:
- assert result.dtype == "float64"
- else:
- # e.g. np.exp with Int64 -> Float64
- assert type(result) is Index
- # raise AttributeError or TypeError
- elif len(index) == 0:
- pass
- else:
- with tm.external_error_raised((TypeError, AttributeError)):
- with np.errstate(all="ignore"):
- func(index)
-
-
-@pytest.mark.parametrize(
- "func", [np.isfinite, np.isinf, np.isnan, np.signbit], ids=lambda x: x.__name__
-)
-def test_numpy_ufuncs_other(index, func):
- # test ufuncs of numpy, see:
- # https://numpy.org/doc/stable/reference/ufuncs.html
- if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
- if func in (np.isfinite, np.isinf, np.isnan):
- # numpy 1.18 changed isinf and isnan to not raise on dt64/td64
- result = func(index)
- assert isinstance(result, np.ndarray)
-
- out = np.empty(index.shape, dtype=bool)
- func(index, out=out)
- tm.assert_numpy_array_equal(out, result)
- else:
- with tm.external_error_raised(TypeError):
- func(index)
-
- elif isinstance(index, PeriodIndex):
- with tm.external_error_raised(TypeError):
- func(index)
-
- elif is_numeric_dtype(index) and not (
- is_complex_dtype(index) and func is np.signbit
- ):
- # Results in bool array
- result = func(index)
- if not isinstance(index.dtype, np.dtype):
- # e.g. Int64 we expect to get BooleanArray back
- assert isinstance(result, BooleanArray)
- else:
- assert isinstance(result, np.ndarray)
-
- out = np.empty(index.shape, dtype=bool)
- func(index, out=out)
-
- if not isinstance(index.dtype, np.dtype):
- tm.assert_numpy_array_equal(out, result._data)
- else:
- tm.assert_numpy_array_equal(out, result)
-
- elif len(index) == 0:
- pass
- else:
- with tm.external_error_raised(TypeError):
- func(index)
-
-
-@pytest.mark.parametrize("func", [np.maximum, np.minimum])
-def test_numpy_ufuncs_reductions(index, func, request):
- # TODO: overlap with tests.series.test_ufunc.test_reductions
- if len(index) == 0:
- pytest.skip("Test doesn't make sense for empty index.")
-
- if isinstance(index, CategoricalIndex) and index.dtype.ordered is False:
- with pytest.raises(TypeError, match="is not ordered for"):
- func.reduce(index)
- return
- else:
- result = func.reduce(index)
-
- if func is np.maximum:
- expected = index.max(skipna=False)
- else:
- expected = index.min(skipna=False)
- # TODO: do we have cases both with and without NAs?
-
- assert type(result) is type(expected)
- if isna(result):
- assert isna(expected)
- else:
- assert result == expected
-
-
-@pytest.mark.parametrize("func", [np.bitwise_and, np.bitwise_or, np.bitwise_xor])
-def test_numpy_ufuncs_bitwise(func):
- # https://github.com/pandas-dev/pandas/issues/46769
- idx1 = Index([1, 2, 3, 4], dtype="int64")
- idx2 = Index([3, 4, 5, 6], dtype="int64")
-
- with tm.assert_produces_warning(None):
- result = func(idx1, idx2)
-
- expected = Index(func(idx1.values, idx2.values))
- tm.assert_index_equal(result, expected)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_shift.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_shift.py
deleted file mode 100644
index f49af73f9f4997840b9ded937fb4687a4b9594ca..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_shift.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import pytest
-
-from pandas.errors import NullFrequencyError
-
-import pandas as pd
-from pandas import TimedeltaIndex
-import pandas._testing as tm
-
-
-class TestTimedeltaIndexShift:
- # -------------------------------------------------------------
- # TimedeltaIndex.shift is used by __add__/__sub__
-
- def test_tdi_shift_empty(self):
- # GH#9903
- idx = TimedeltaIndex([], name="xxx")
- tm.assert_index_equal(idx.shift(0, freq="H"), idx)
- tm.assert_index_equal(idx.shift(3, freq="H"), idx)
-
- def test_tdi_shift_hours(self):
- # GH#9903
- idx = TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
- tm.assert_index_equal(idx.shift(0, freq="H"), idx)
- exp = TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx")
- tm.assert_index_equal(idx.shift(3, freq="H"), exp)
- exp = TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx")
- tm.assert_index_equal(idx.shift(-3, freq="H"), exp)
-
- def test_tdi_shift_minutes(self):
- # GH#9903
- idx = TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
- tm.assert_index_equal(idx.shift(0, freq="T"), idx)
- exp = TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx")
- tm.assert_index_equal(idx.shift(3, freq="T"), exp)
- exp = TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx")
- tm.assert_index_equal(idx.shift(-3, freq="T"), exp)
-
- def test_tdi_shift_int(self):
- # GH#8083
- tdi = pd.to_timedelta(range(5), unit="d")
- trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
- result = trange.shift(1)
- expected = TimedeltaIndex(
- [
- "1 days 01:00:00",
- "2 days 01:00:00",
- "3 days 01:00:00",
- "4 days 01:00:00",
- "5 days 01:00:00",
- ],
- freq="D",
- )
- tm.assert_index_equal(result, expected)
-
- def test_tdi_shift_nonstandard_freq(self):
- # GH#8083
- tdi = pd.to_timedelta(range(5), unit="d")
- trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
- result = trange.shift(3, freq="2D 1s")
- expected = TimedeltaIndex(
- [
- "6 days 01:00:03",
- "7 days 01:00:03",
- "8 days 01:00:03",
- "9 days 01:00:03",
- "10 days 01:00:03",
- ],
- freq="D",
- )
- tm.assert_index_equal(result, expected)
-
- def test_shift_no_freq(self):
- # GH#19147
- tdi = TimedeltaIndex(["1 days 01:00:00", "2 days 01:00:00"], freq=None)
- with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"):
- tdi.shift(2)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_dataframe.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_dataframe.py
deleted file mode 100644
index 105ffe84a07038983c1c61a5c01e14626bc9ed6e..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_dataframe.py
+++ /dev/null
@@ -1,230 +0,0 @@
-import numpy as np
-import pytest
-
-import pandas as pd
-from pandas import (
- DataFrame,
- Index,
- Series,
- concat,
-)
-import pandas._testing as tm
-
-
-class TestDataFrameConcat:
- def test_concat_multiple_frames_dtypes(self):
- # GH#2759
- df1 = DataFrame(data=np.ones((10, 2)), columns=["foo", "bar"], dtype=np.float64)
- df2 = DataFrame(data=np.ones((10, 2)), dtype=np.float32)
- results = concat((df1, df2), axis=1).dtypes
- expected = Series(
- [np.dtype("float64")] * 2 + [np.dtype("float32")] * 2,
- index=["foo", "bar", 0, 1],
- )
- tm.assert_series_equal(results, expected)
-
- def test_concat_tuple_keys(self):
- # GH#14438
- df1 = DataFrame(np.ones((2, 2)), columns=list("AB"))
- df2 = DataFrame(np.ones((3, 2)) * 2, columns=list("AB"))
- results = concat((df1, df2), keys=[("bee", "bah"), ("bee", "boo")])
- expected = DataFrame(
- {
- "A": {
- ("bee", "bah", 0): 1.0,
- ("bee", "bah", 1): 1.0,
- ("bee", "boo", 0): 2.0,
- ("bee", "boo", 1): 2.0,
- ("bee", "boo", 2): 2.0,
- },
- "B": {
- ("bee", "bah", 0): 1.0,
- ("bee", "bah", 1): 1.0,
- ("bee", "boo", 0): 2.0,
- ("bee", "boo", 1): 2.0,
- ("bee", "boo", 2): 2.0,
- },
- }
- )
- tm.assert_frame_equal(results, expected)
-
- def test_concat_named_keys(self):
- # GH#14252
- df = DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]})
- index = Index(["a", "b"], name="baz")
- concatted_named_from_keys = concat([df, df], keys=index)
- expected_named = DataFrame(
- {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
- index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=["baz", None]),
- )
- tm.assert_frame_equal(concatted_named_from_keys, expected_named)
-
- index_no_name = Index(["a", "b"], name=None)
- concatted_named_from_names = concat([df, df], keys=index_no_name, names=["baz"])
- tm.assert_frame_equal(concatted_named_from_names, expected_named)
-
- concatted_unnamed = concat([df, df], keys=index_no_name)
- expected_unnamed = DataFrame(
- {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
- index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=[None, None]),
- )
- tm.assert_frame_equal(concatted_unnamed, expected_unnamed)
-
- def test_concat_axis_parameter(self):
- # GH#14369
- df1 = DataFrame({"A": [0.1, 0.2]}, index=range(2))
- df2 = DataFrame({"A": [0.3, 0.4]}, index=range(2))
-
- # Index/row/0 DataFrame
- expected_index = DataFrame({"A": [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1])
-
- concatted_index = concat([df1, df2], axis="index")
- tm.assert_frame_equal(concatted_index, expected_index)
-
- concatted_row = concat([df1, df2], axis="rows")
- tm.assert_frame_equal(concatted_row, expected_index)
-
- concatted_0 = concat([df1, df2], axis=0)
- tm.assert_frame_equal(concatted_0, expected_index)
-
- # Columns/1 DataFrame
- expected_columns = DataFrame(
- [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=["A", "A"]
- )
-
- concatted_columns = concat([df1, df2], axis="columns")
- tm.assert_frame_equal(concatted_columns, expected_columns)
-
- concatted_1 = concat([df1, df2], axis=1)
- tm.assert_frame_equal(concatted_1, expected_columns)
-
- series1 = Series([0.1, 0.2])
- series2 = Series([0.3, 0.4])
-
- # Index/row/0 Series
- expected_index_series = Series([0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
-
- concatted_index_series = concat([series1, series2], axis="index")
- tm.assert_series_equal(concatted_index_series, expected_index_series)
-
- concatted_row_series = concat([series1, series2], axis="rows")
- tm.assert_series_equal(concatted_row_series, expected_index_series)
-
- concatted_0_series = concat([series1, series2], axis=0)
- tm.assert_series_equal(concatted_0_series, expected_index_series)
-
- # Columns/1 Series
- expected_columns_series = DataFrame(
- [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1]
- )
-
- concatted_columns_series = concat([series1, series2], axis="columns")
- tm.assert_frame_equal(concatted_columns_series, expected_columns_series)
-
- concatted_1_series = concat([series1, series2], axis=1)
- tm.assert_frame_equal(concatted_1_series, expected_columns_series)
-
- # Testing ValueError
- with pytest.raises(ValueError, match="No axis named"):
- concat([series1, series2], axis="something")
-
- def test_concat_numerical_names(self):
- # GH#15262, GH#12223
- df = DataFrame(
- {"col": range(9)},
- dtype="int32",
- index=(
- pd.MultiIndex.from_product(
- [["A0", "A1", "A2"], ["B0", "B1", "B2"]], names=[1, 2]
- )
- ),
- )
- result = concat((df.iloc[:2, :], df.iloc[-2:, :]))
- expected = DataFrame(
- {"col": [0, 1, 7, 8]},
- dtype="int32",
- index=pd.MultiIndex.from_tuples(
- [("A0", "B0"), ("A0", "B1"), ("A2", "B1"), ("A2", "B2")], names=[1, 2]
- ),
- )
- tm.assert_frame_equal(result, expected)
-
- def test_concat_astype_dup_col(self):
- # GH#23049
- df = DataFrame([{"a": "b"}])
- df = concat([df, df], axis=1)
-
- result = df.astype("category")
- expected = DataFrame(
- np.array(["b", "b"]).reshape(1, 2), columns=["a", "a"]
- ).astype("category")
- tm.assert_frame_equal(result, expected)
-
- def test_concat_dataframe_keys_bug(self, sort):
- t1 = DataFrame(
- {"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
- )
- t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
-
- # it works
- result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
- assert list(result.columns) == [("t1", "value"), ("t2", "value")]
-
- def test_concat_bool_with_int(self):
- # GH#42092 we may want to change this to return object, but that
- # would need a deprecation
- df1 = DataFrame(Series([True, False, True, True], dtype="bool"))
- df2 = DataFrame(Series([1, 0, 1], dtype="int64"))
-
- result = concat([df1, df2])
- expected = concat([df1.astype("int64"), df2])
- tm.assert_frame_equal(result, expected)
-
- def test_concat_duplicates_in_index_with_keys(self):
- # GH#42651
- index = [1, 1, 3]
- data = [1, 2, 3]
-
- df = DataFrame(data=data, index=index)
- result = concat([df], keys=["A"], names=["ID", "date"])
- mi = pd.MultiIndex.from_product([["A"], index], names=["ID", "date"])
- expected = DataFrame(data=data, index=mi)
- tm.assert_frame_equal(result, expected)
- tm.assert_index_equal(result.index.levels[1], Index([1, 3], name="date"))
-
- @pytest.mark.parametrize("ignore_index", [True, False])
- @pytest.mark.parametrize("order", ["C", "F"])
- @pytest.mark.parametrize("axis", [0, 1])
- def test_concat_copies(self, axis, order, ignore_index, using_copy_on_write):
- # based on asv ConcatDataFrames
- df = DataFrame(np.zeros((10000, 200), dtype=np.float32, order=order))
-
- res = concat([df] * 5, axis=axis, ignore_index=ignore_index, copy=True)
-
- if not using_copy_on_write:
- for arr in res._iter_column_arrays():
- for arr2 in df._iter_column_arrays():
- assert not np.shares_memory(arr, arr2)
-
- def test_outer_sort_columns(self):
- # GH#47127
- df1 = DataFrame({"A": [0], "B": [1], 0: 1})
- df2 = DataFrame({"A": [100]})
- result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
- expected = DataFrame({0: [1.0, np.nan], "A": [0, 100], "B": [1.0, np.nan]})
- tm.assert_frame_equal(result, expected)
-
- def test_inner_sort_columns(self):
- # GH#47127
- df1 = DataFrame({"A": [0], "B": [1], 0: 1})
- df2 = DataFrame({"A": [100], 0: 2})
- result = concat([df1, df2], ignore_index=True, join="inner", sort=True)
- expected = DataFrame({0: [1, 2], "A": [0, 100]})
- tm.assert_frame_equal(result, expected)
-
- def test_sort_columns_one_df(self):
- # GH#47127
- df1 = DataFrame({"A": [100], 0: 2})
- result = concat([df1], ignore_index=True, join="inner", sort=True)
- expected = DataFrame({0: [2], "A": [100]})
- tm.assert_frame_equal(result, expected)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/test_optional_dependency.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/test_optional_dependency.py
deleted file mode 100644
index c1d1948d6c31acacc2ff965cd41c2d1a799274ed..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/test_optional_dependency.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import sys
-import types
-
-import pytest
-
-from pandas.compat._optional import (
- VERSIONS,
- import_optional_dependency,
-)
-
-import pandas._testing as tm
-
-
-def test_import_optional():
- match = "Missing .*notapackage.* pip .* conda .* notapackage"
- with pytest.raises(ImportError, match=match) as exc_info:
- import_optional_dependency("notapackage")
- # The original exception should be there as context:
- assert isinstance(exc_info.value.__context__, ImportError)
-
- result = import_optional_dependency("notapackage", errors="ignore")
- assert result is None
-
-
-def test_xlrd_version_fallback():
- pytest.importorskip("xlrd")
- import_optional_dependency("xlrd")
-
-
-def test_bad_version(monkeypatch):
- name = "fakemodule"
- module = types.ModuleType(name)
- module.__version__ = "0.9.0"
- sys.modules[name] = module
- monkeypatch.setitem(VERSIONS, name, "1.0.0")
-
- match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'"
- with pytest.raises(ImportError, match=match):
- import_optional_dependency("fakemodule")
-
- # Test min_version parameter
- result = import_optional_dependency("fakemodule", min_version="0.8")
- assert result is module
-
- with tm.assert_produces_warning(UserWarning):
- result = import_optional_dependency("fakemodule", errors="warn")
- assert result is None
-
- module.__version__ = "1.0.0" # exact match is OK
- result = import_optional_dependency("fakemodule")
- assert result is module
-
-
-def test_submodule(monkeypatch):
- # Create a fake module with a submodule
- name = "fakemodule"
- module = types.ModuleType(name)
- module.__version__ = "0.9.0"
- sys.modules[name] = module
- sub_name = "submodule"
- submodule = types.ModuleType(sub_name)
- setattr(module, sub_name, submodule)
- sys.modules[f"{name}.{sub_name}"] = submodule
- monkeypatch.setitem(VERSIONS, name, "1.0.0")
-
- match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'"
- with pytest.raises(ImportError, match=match):
- import_optional_dependency("fakemodule.submodule")
-
- with tm.assert_produces_warning(UserWarning):
- result = import_optional_dependency("fakemodule.submodule", errors="warn")
- assert result is None
-
- module.__version__ = "1.0.0" # exact match is OK
- result = import_optional_dependency("fakemodule.submodule")
- assert result is submodule
-
-
-def test_no_version_raises(monkeypatch):
- name = "fakemodule"
- module = types.ModuleType(name)
- sys.modules[name] = module
- monkeypatch.setitem(VERSIONS, name, "1.0.0")
-
- with pytest.raises(ImportError, match="Can't determine .* fakemodule"):
- import_optional_dependency(name)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/models/link.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/models/link.py
deleted file mode 100644
index 6069b278b9bcbf64f1552c932ab909690bb7c149..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/models/link.py
+++ /dev/null
@@ -1,288 +0,0 @@
-import functools
-import logging
-import os
-import posixpath
-import re
-import urllib.parse
-from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Tuple, Union
-
-from pip._internal.utils.filetypes import WHEEL_EXTENSION
-from pip._internal.utils.hashes import Hashes
-from pip._internal.utils.misc import (
- redact_auth_from_url,
- split_auth_from_netloc,
- splitext,
-)
-from pip._internal.utils.models import KeyBasedCompareMixin
-from pip._internal.utils.urls import path_to_url, url_to_path
-
-if TYPE_CHECKING:
- from pip._internal.index.collector import HTMLPage
-
-logger = logging.getLogger(__name__)
-
-
-_SUPPORTED_HASHES = ("sha1", "sha224", "sha384", "sha256", "sha512", "md5")
-
-
-class Link(KeyBasedCompareMixin):
- """Represents a parsed link from a Package Index's simple URL"""
-
- __slots__ = [
- "_parsed_url",
- "_url",
- "comes_from",
- "requires_python",
- "yanked_reason",
- "cache_link_parsing",
- ]
-
- def __init__(
- self,
- url: str,
- comes_from: Optional[Union[str, "HTMLPage"]] = None,
- requires_python: Optional[str] = None,
- yanked_reason: Optional[str] = None,
- cache_link_parsing: bool = True,
- ) -> None:
- """
- :param url: url of the resource pointed to (href of the link)
- :param comes_from: instance of HTMLPage where the link was found,
- or string.
- :param requires_python: String containing the `Requires-Python`
- metadata field, specified in PEP 345. This may be specified by
- a data-requires-python attribute in the HTML link tag, as
- described in PEP 503.
- :param yanked_reason: the reason the file has been yanked, if the
- file has been yanked, or None if the file hasn't been yanked.
- This is the value of the "data-yanked" attribute, if present, in
- a simple repository HTML link. If the file has been yanked but
- no reason was provided, this should be the empty string. See
- PEP 592 for more information and the specification.
- :param cache_link_parsing: A flag that is used elsewhere to determine
- whether resources retrieved from this link
- should be cached. PyPI index urls should
- generally have this set to False, for
- example.
- """
-
- # url can be a UNC windows share
- if url.startswith("\\\\"):
- url = path_to_url(url)
-
- self._parsed_url = urllib.parse.urlsplit(url)
- # Store the url as a private attribute to prevent accidentally
- # trying to set a new value.
- self._url = url
-
- self.comes_from = comes_from
- self.requires_python = requires_python if requires_python else None
- self.yanked_reason = yanked_reason
-
- super().__init__(key=url, defining_class=Link)
-
- self.cache_link_parsing = cache_link_parsing
-
- def __str__(self) -> str:
- if self.requires_python:
- rp = f" (requires-python:{self.requires_python})"
- else:
- rp = ""
- if self.comes_from:
- return "{} (from {}){}".format(
- redact_auth_from_url(self._url), self.comes_from, rp
- )
- else:
- return redact_auth_from_url(str(self._url))
-
- def __repr__(self) -> str:
- return f""
-
- @property
- def url(self) -> str:
- return self._url
-
- @property
- def filename(self) -> str:
- path = self.path.rstrip("/")
- name = posixpath.basename(path)
- if not name:
- # Make sure we don't leak auth information if the netloc
- # includes a username and password.
- netloc, user_pass = split_auth_from_netloc(self.netloc)
- return netloc
-
- name = urllib.parse.unquote(name)
- assert name, f"URL {self._url!r} produced no filename"
- return name
-
- @property
- def file_path(self) -> str:
- return url_to_path(self.url)
-
- @property
- def scheme(self) -> str:
- return self._parsed_url.scheme
-
- @property
- def netloc(self) -> str:
- """
- This can contain auth information.
- """
- return self._parsed_url.netloc
-
- @property
- def path(self) -> str:
- return urllib.parse.unquote(self._parsed_url.path)
-
- def splitext(self) -> Tuple[str, str]:
- return splitext(posixpath.basename(self.path.rstrip("/")))
-
- @property
- def ext(self) -> str:
- return self.splitext()[1]
-
- @property
- def url_without_fragment(self) -> str:
- scheme, netloc, path, query, fragment = self._parsed_url
- return urllib.parse.urlunsplit((scheme, netloc, path, query, ""))
-
- _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)")
-
- @property
- def egg_fragment(self) -> Optional[str]:
- match = self._egg_fragment_re.search(self._url)
- if not match:
- return None
- return match.group(1)
-
- _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)")
-
- @property
- def subdirectory_fragment(self) -> Optional[str]:
- match = self._subdirectory_fragment_re.search(self._url)
- if not match:
- return None
- return match.group(1)
-
- _hash_re = re.compile(
- r"({choices})=([a-f0-9]+)".format(choices="|".join(_SUPPORTED_HASHES))
- )
-
- @property
- def hash(self) -> Optional[str]:
- match = self._hash_re.search(self._url)
- if match:
- return match.group(2)
- return None
-
- @property
- def hash_name(self) -> Optional[str]:
- match = self._hash_re.search(self._url)
- if match:
- return match.group(1)
- return None
-
- @property
- def show_url(self) -> str:
- return posixpath.basename(self._url.split("#", 1)[0].split("?", 1)[0])
-
- @property
- def is_file(self) -> bool:
- return self.scheme == "file"
-
- def is_existing_dir(self) -> bool:
- return self.is_file and os.path.isdir(self.file_path)
-
- @property
- def is_wheel(self) -> bool:
- return self.ext == WHEEL_EXTENSION
-
- @property
- def is_vcs(self) -> bool:
- from pip._internal.vcs import vcs
-
- return self.scheme in vcs.all_schemes
-
- @property
- def is_yanked(self) -> bool:
- return self.yanked_reason is not None
-
- @property
- def has_hash(self) -> bool:
- return self.hash_name is not None
-
- def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool:
- """
- Return True if the link has a hash and it is allowed.
- """
- if hashes is None or not self.has_hash:
- return False
- # Assert non-None so mypy knows self.hash_name and self.hash are str.
- assert self.hash_name is not None
- assert self.hash is not None
-
- return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash)
-
-
-class _CleanResult(NamedTuple):
- """Convert link for equivalency check.
-
- This is used in the resolver to check whether two URL-specified requirements
- likely point to the same distribution and can be considered equivalent. This
- equivalency logic avoids comparing URLs literally, which can be too strict
- (e.g. "a=1&b=2" vs "b=2&a=1") and produce conflicts unexpecting to users.
-
- Currently this does three things:
-
- 1. Drop the basic auth part. This is technically wrong since a server can
- serve different content based on auth, but if it does that, it is even
- impossible to guarantee two URLs without auth are equivalent, since
- the user can input different auth information when prompted. So the
- practical solution is to assume the auth doesn't affect the response.
- 2. Parse the query to avoid the ordering issue. Note that ordering under the
- same key in the query are NOT cleaned; i.e. "a=1&a=2" and "a=2&a=1" are
- still considered different.
- 3. Explicitly drop most of the fragment part, except ``subdirectory=`` and
- hash values, since it should have no impact the downloaded content. Note
- that this drops the "egg=" part historically used to denote the requested
- project (and extras), which is wrong in the strictest sense, but too many
- people are supplying it inconsistently to cause superfluous resolution
- conflicts, so we choose to also ignore them.
- """
-
- parsed: urllib.parse.SplitResult
- query: Dict[str, List[str]]
- subdirectory: str
- hashes: Dict[str, str]
-
-
-def _clean_link(link: Link) -> _CleanResult:
- parsed = link._parsed_url
- netloc = parsed.netloc.rsplit("@", 1)[-1]
- # According to RFC 8089, an empty host in file: means localhost.
- if parsed.scheme == "file" and not netloc:
- netloc = "localhost"
- fragment = urllib.parse.parse_qs(parsed.fragment)
- if "egg" in fragment:
- logger.debug("Ignoring egg= fragment in %s", link)
- try:
- # If there are multiple subdirectory values, use the first one.
- # This matches the behavior of Link.subdirectory_fragment.
- subdirectory = fragment["subdirectory"][0]
- except (IndexError, KeyError):
- subdirectory = ""
- # If there are multiple hash values under the same algorithm, use the
- # first one. This matches the behavior of Link.hash_value.
- hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment}
- return _CleanResult(
- parsed=parsed._replace(netloc=netloc, query="", fragment=""),
- query=urllib.parse.parse_qs(parsed.query),
- subdirectory=subdirectory,
- hashes=hashes,
- )
-
-
-@functools.lru_cache(maxsize=None)
-def links_equivalent(link1: Link, link2: Link) -> bool:
- return _clean_link(link1) == _clean_link(link2)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/colorama/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/colorama/__init__.py
deleted file mode 100644
index b149ed79b0a1d5808a7e392876c2f5aae4b5057c..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/colorama/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
-from .initialise import init, deinit, reinit, colorama_text
-from .ansi import Fore, Back, Style, Cursor
-from .ansitowin32 import AnsiToWin32
-
-__version__ = '0.4.4'
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/foxpro.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/foxpro.py
deleted file mode 100644
index 9d8d951c585f517714dfd02c4c75bb77a19b56da..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/foxpro.py
+++ /dev/null
@@ -1,427 +0,0 @@
-"""
- pygments.lexers.foxpro
- ~~~~~~~~~~~~~~~~~~~~~~
-
- Simple lexer for Microsoft Visual FoxPro source code.
-
- :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import re
-
-from pygments.lexer import RegexLexer
-from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
- Name, String
-
-__all__ = ['FoxProLexer']
-
-
-class FoxProLexer(RegexLexer):
- """Lexer for Microsoft Visual FoxPro language.
-
- FoxPro syntax allows to shorten all keywords and function names
- to 4 characters. Shortened forms are not recognized by this lexer.
-
- .. versionadded:: 1.6
- """
-
- name = 'FoxPro'
- aliases = ['foxpro', 'vfp', 'clipper', 'xbase']
- filenames = ['*.PRG', '*.prg']
- mimetype = []
-
- flags = re.IGNORECASE | re.MULTILINE
-
- tokens = {
- 'root': [
- (r';\s*\n', Punctuation), # consume newline
- (r'(^|\n)\s*', Text, 'newline'),
-
- # Square brackets may be used for array indices
- # and for string literal. Look for arrays
- # before matching string literals.
- (r'(?<=\w)\[[0-9, ]+\]', Text),
- (r'\'[^\'\n]*\'|"[^"\n]*"|\[[^]*]\]', String),
- (r'(^\s*\*|&&|&&).*?\n', Comment.Single),
-
- (r'(ABS|ACLASS|ACOPY|ACOS|ADATABASES|ADBOBJECTS|ADDBS|'
- r'ADDPROPERTY|ADEL|ADIR|ADLLS|ADOCKSTATE|AELEMENT|AERROR|'
- r'AEVENTS|AFIELDS|AFONT|AGETCLASS|AGETFILEVERSION|AINS|'
- r'AINSTANCE|ALANGUAGE|ALEN|ALIAS|ALINES|ALLTRIM|'
- r'AMEMBERS|AMOUSEOBJ|ANETRESOURCES|APRINTERS|APROCINFO|'
- r'ASC|ASCAN|ASELOBJ|ASESSIONS|ASIN|ASORT|ASQLHANDLES|'
- r'ASTACKINFO|ASUBSCRIPT|AT|AT_C|ATAGINFO|ATAN|ATC|ATCC|'
- r'ATCLINE|ATLINE|ATN2|AUSED|AVCXCLASSES|BAR|BARCOUNT|'
- r'BARPROMPT|BETWEEN|BINDEVENT|BINTOC|BITAND|BITCLEAR|'
- r'BITLSHIFT|BITNOT|BITOR|BITRSHIFT|BITSET|BITTEST|BITXOR|'
- r'BOF|CANDIDATE|CAPSLOCK|CAST|CDOW|CDX|CEILING|CHR|CHRSAW|'
- r'CHRTRAN|CHRTRANC|CLEARRESULTSET|CMONTH|CNTBAR|CNTPAD|COL|'
- r'COM|Functions|COMARRAY|COMCLASSINFO|COMPOBJ|COMPROP|'
- r'COMRETURNERROR|COS|CPCONVERT|CPCURRENT|CPDBF|CREATEBINARY|'
- r'CREATEOBJECT|CREATEOBJECTEX|CREATEOFFLINE|CTOBIN|CTOD|'
- r'CTOT|CURDIR|CURSORGETPROP|CURSORSETPROP|CURSORTOXML|'
- r'CURVAL|DATE|DATETIME|DAY|DBC|DBF|DBGETPROP|DBSETPROP|'
- r'DBUSED|DDEAbortTrans|DDEAdvise|DDEEnabled|DDEExecute|'
- r'DDEInitiate|DDELastError|DDEPoke|DDERequest|DDESetOption|'
- r'DDESetService|DDESetTopic|DDETerminate|DEFAULTEXT|'
- r'DELETED|DESCENDING|DIFFERENCE|DIRECTORY|DISKSPACE|'
- r'DisplayPath|DMY|DODEFAULT|DOW|DRIVETYPE|DROPOFFLINE|'
- r'DTOC|DTOR|DTOS|DTOT|EDITSOURCE|EMPTY|EOF|ERROR|EVAL(UATE)?|'
- r'EVENTHANDLER|EVL|EXECSCRIPT|EXP|FCHSIZE|FCLOSE|FCOUNT|'
- r'FCREATE|FDATE|FEOF|FERROR|FFLUSH|FGETS|FIELD|FILE|'
- r'FILETOSTR|FILTER|FKLABEL|FKMAX|FLDLIST|FLOCK|FLOOR|'
- r'FONTMETRIC|FOPEN|FOR|FORCEEXT|FORCEPATH|FOUND|FPUTS|'
- r'FREAD|FSEEK|FSIZE|FTIME|FULLPATH|FV|FWRITE|'
- r'GETAUTOINCVALUE|GETBAR|GETCOLOR|GETCP|GETDIR|GETENV|'
- r'GETFILE|GETFLDSTATE|GETFONT|GETINTERFACE|'
- r'GETNEXTMODIFIED|GETOBJECT|GETPAD|GETPEM|GETPICT|'
- r'GETPRINTER|GETRESULTSET|GETWORDCOUNT|GETWORDNUM|'
- r'GETCURSORADAPTER|GOMONTH|HEADER|HOME|HOUR|ICASE|'
- r'IDXCOLLATE|IIF|IMESTATUS|INDBC|INDEXSEEK|INKEY|INLIST|'
- r'INPUTBOX|INSMODE|INT|ISALPHA|ISBLANK|ISCOLOR|ISDIGIT|'
- r'ISEXCLUSIVE|ISFLOCKED|ISLEADBYTE|ISLOWER|ISMEMOFETCHED|'
- r'ISMOUSE|ISNULL|ISPEN|ISREADONLY|ISRLOCKED|'
- r'ISTRANSACTABLE|ISUPPER|JUSTDRIVE|JUSTEXT|JUSTFNAME|'
- r'JUSTPATH|JUSTSTEM|KEY|KEYMATCH|LASTKEY|LEFT|LEFTC|LEN|'
- r'LENC|LIKE|LIKEC|LINENO|LOADPICTURE|LOCFILE|LOCK|LOG|'
- r'LOG10|LOOKUP|LOWER|LTRIM|LUPDATE|MAKETRANSACTABLE|MAX|'
- r'MCOL|MDOWN|MDX|MDY|MEMLINES|MEMORY|MENU|MESSAGE|'
- r'MESSAGEBOX|MIN|MINUTE|MLINE|MOD|MONTH|MRKBAR|MRKPAD|'
- r'MROW|MTON|MWINDOW|NDX|NEWOBJECT|NORMALIZE|NTOM|NUMLOCK|'
- r'NVL|OBJNUM|OBJTOCLIENT|OBJVAR|OCCURS|OEMTOANSI|OLDVAL|'
- r'ON|ORDER|OS|PAD|PADL|PARAMETERS|PAYMENT|PCOL|PCOUNT|'
- r'PEMSTATUS|PI|POPUP|PRIMARY|PRINTSTATUS|PRMBAR|PRMPAD|'
- r'PROGRAM|PROMPT|PROPER|PROW|PRTINFO|PUTFILE|PV|QUARTER|'
- r'RAISEEVENT|RAND|RAT|RATC|RATLINE|RDLEVEL|READKEY|RECCOUNT|'
- r'RECNO|RECSIZE|REFRESH|RELATION|REPLICATE|REQUERY|RGB|'
- r'RGBSCHEME|RIGHT|RIGHTC|RLOCK|ROUND|ROW|RTOD|RTRIM|'
- r'SAVEPICTURE|SCHEME|SCOLS|SEC|SECONDS|SEEK|SELECT|SET|'
- r'SETFLDSTATE|SETRESULTSET|SIGN|SIN|SKPBAR|SKPPAD|SOUNDEX|'
- r'SPACE|SQLCANCEL|SQLCOLUMNS|SQLCOMMIT|SQLCONNECT|'
- r'SQLDISCONNECT|SQLEXEC|SQLGETPROP|SQLIDLEDISCONNECT|'
- r'SQLMORERESULTS|SQLPREPARE|SQLROLLBACK|SQLSETPROP|'
- r'SQLSTRINGCONNECT|SQLTABLES|SQRT|SROWS|STR|STRCONV|'
- r'STREXTRACT|STRTOFILE|STRTRAN|STUFF|STUFFC|SUBSTR|'
- r'SUBSTRC|SYS|SYSMETRIC|TABLEREVERT|TABLEUPDATE|TAG|'
- r'TAGCOUNT|TAGNO|TAN|TARGET|TEXTMERGE|TIME|TRANSFORM|'
- r'TRIM|TTOC|TTOD|TXNLEVEL|TXTWIDTH|TYPE|UNBINDEVENTS|'
- r'UNIQUE|UPDATED|UPPER|USED|VAL|VARREAD|VARTYPE|VERSION|'
- r'WBORDER|WCHILD|WCOLS|WDOCKABLE|WEEK|WEXIST|WFONT|WLAST|'
- r'WLCOL|WLROW|WMAXIMUM|WMINIMUM|WONTOP|WOUTPUT|WPARENT|'
- r'WREAD|WROWS|WTITLE|WVISIBLE|XMLTOCURSOR|XMLUPDATEGRAM|'
- r'YEAR)(?=\s*\()', Name.Function),
-
- (r'_ALIGNMENT|_ASCIICOLS|_ASCIIROWS|_ASSIST|_BEAUTIFY|_BOX|'
- r'_BROWSER|_BUILDER|_CALCMEM|_CALCVALUE|_CLIPTEXT|_CONVERTER|'
- r'_COVERAGE|_CUROBJ|_DBLCLICK|_DIARYDATE|_DOS|_FOXDOC|_FOXREF|'
- r'_GALLERY|_GENGRAPH|_GENHTML|_GENMENU|_GENPD|_GENSCRN|'
- r'_GENXTAB|_GETEXPR|_INCLUDE|_INCSEEK|_INDENT|_LMARGIN|_MAC|'
- r'_MENUDESIGNER|_MLINE|_PADVANCE|_PAGENO|_PAGETOTAL|_PBPAGE|'
- r'_PCOLNO|_PCOPIES|_PDRIVER|_PDSETUP|_PECODE|_PEJECT|_PEPAGE|'
- r'_PLENGTH|_PLINENO|_PLOFFSET|_PPITCH|_PQUALITY|_PRETEXT|'
- r'_PSCODE|_PSPACING|_PWAIT|_RMARGIN|_REPORTBUILDER|'
- r'_REPORTOUTPUT|_REPORTPREVIEW|_SAMPLES|_SCCTEXT|_SCREEN|'
- r'_SHELL|_SPELLCHK|_STARTUP|_TABS|_TALLY|_TASKPANE|_TEXT|'
- r'_THROTTLE|_TOOLBOX|_TOOLTIPTIMEOUT|_TRANSPORT|_TRIGGERLEVEL|'
- r'_UNIX|_VFP|_WINDOWS|_WIZARD|_WRAP', Keyword.Pseudo),
-
- (r'THISFORMSET|THISFORM|THIS', Name.Builtin),
-
- (r'Application|CheckBox|Collection|Column|ComboBox|'
- r'CommandButton|CommandGroup|Container|Control|CursorAdapter|'
- r'Cursor|Custom|DataEnvironment|DataObject|EditBox|'
- r'Empty|Exception|Fields|Files|File|FormSet|Form|FoxCode|'
- r'Grid|Header|Hyperlink|Image|Label|Line|ListBox|Objects|'
- r'OptionButton|OptionGroup|PageFrame|Page|ProjectHook|Projects|'
- r'Project|Relation|ReportListener|Separator|Servers|Server|'
- r'Session|Shape|Spinner|Tables|TextBox|Timer|ToolBar|'
- r'XMLAdapter|XMLField|XMLTable', Name.Class),
-
- (r'm\.[a-z_]\w*', Name.Variable),
- (r'\.(F|T|AND|OR|NOT|NULL)\.|\b(AND|OR|NOT|NULL)\b', Operator.Word),
-
- (r'\.(ActiveColumn|ActiveControl|ActiveForm|ActivePage|'
- r'ActiveProject|ActiveRow|AddLineFeeds|ADOCodePage|Alias|'
- r'Alignment|Align|AllowAddNew|AllowAutoColumnFit|'
- r'AllowCellSelection|AllowDelete|AllowHeaderSizing|'
- r'AllowInsert|AllowModalMessages|AllowOutput|AllowRowSizing|'
- r'AllowSimultaneousFetch|AllowTabs|AllowUpdate|'
- r'AlwaysOnBottom|AlwaysOnTop|Anchor|Application|'
- r'AutoActivate|AutoCenter|AutoCloseTables|AutoComplete|'
- r'AutoCompSource|AutoCompTable|AutoHideScrollBar|'
- r'AutoIncrement|AutoOpenTables|AutoRelease|AutoSize|'
- r'AutoVerbMenu|AutoYield|BackColor|ForeColor|BackStyle|'
- r'BaseClass|BatchUpdateCount|BindControls|BorderColor|'
- r'BorderStyle|BorderWidth|BoundColumn|BoundTo|Bound|'
- r'BreakOnError|BufferModeOverride|BufferMode|'
- r'BuildDateTime|ButtonCount|Buttons|Cancel|Caption|'
- r'Centered|Century|ChildAlias|ChildOrder|ChildTable|'
- r'ClassLibrary|Class|ClipControls|Closable|CLSID|CodePage|'
- r'ColorScheme|ColorSource|ColumnCount|ColumnLines|'
- r'ColumnOrder|Columns|ColumnWidths|CommandClauses|'
- r'Comment|CompareMemo|ConflictCheckCmd|ConflictCheckType|'
- r'ContinuousScroll|ControlBox|ControlCount|Controls|'
- r'ControlSource|ConversionFunc|Count|CurrentControl|'
- r'CurrentDataSession|CurrentPass|CurrentX|CurrentY|'
- r'CursorSchema|CursorSource|CursorStatus|Curvature|'
- r'Database|DataSessionID|DataSession|DataSourceType|'
- r'DataSource|DataType|DateFormat|DateMark|Debug|'
- r'DeclareXMLPrefix|DEClassLibrary|DEClass|DefaultFilePath|'
- r'Default|DefOLELCID|DeleteCmdDataSourceType|DeleteCmdDataSource|'
- r'DeleteCmd|DeleteMark|Description|Desktop|'
- r'Details|DisabledBackColor|DisabledForeColor|'
- r'DisabledItemBackColor|DisabledItemForeColor|'
- r'DisabledPicture|DisableEncode|DisplayCount|'
- r'DisplayValue|Dockable|Docked|DockPosition|'
- r'DocumentFile|DownPicture|DragIcon|DragMode|DrawMode|'
- r'DrawStyle|DrawWidth|DynamicAlignment|DynamicBackColor|'
- r'DynamicForeColor|DynamicCurrentControl|DynamicFontBold|'
- r'DynamicFontItalic|DynamicFontStrikethru|'
- r'DynamicFontUnderline|DynamicFontName|DynamicFontOutline|'
- r'DynamicFontShadow|DynamicFontSize|DynamicInputMask|'
- r'DynamicLineHeight|EditorOptions|Enabled|'
- r'EnableHyperlinks|Encrypted|ErrorNo|Exclude|Exclusive|'
- r'FetchAsNeeded|FetchMemoCmdList|FetchMemoDataSourceType|'
- r'FetchMemoDataSource|FetchMemo|FetchSize|'
- r'FileClassLibrary|FileClass|FillColor|FillStyle|Filter|'
- r'FirstElement|FirstNestedTable|Flags|FontBold|FontItalic|'
- r'FontStrikethru|FontUnderline|FontCharSet|FontCondense|'
- r'FontExtend|FontName|FontOutline|FontShadow|FontSize|'
- r'ForceCloseTag|Format|FormCount|FormattedOutput|Forms|'
- r'FractionDigits|FRXDataSession|FullName|GDIPlusGraphics|'
- r'GridLineColor|GridLines|GridLineWidth|HalfHeightCaption|'
- r'HeaderClassLibrary|HeaderClass|HeaderHeight|Height|'
- r'HelpContextID|HideSelection|HighlightBackColor|'
- r'HighlightForeColor|HighlightStyle|HighlightRowLineWidth|'
- r'HighlightRow|Highlight|HomeDir|Hours|HostName|'
- r'HScrollSmallChange|hWnd|Icon|IncrementalSearch|Increment|'
- r'InitialSelectedAlias|InputMask|InsertCmdDataSourceType|'
- r'InsertCmdDataSource|InsertCmdRefreshCmd|'
- r'InsertCmdRefreshFieldList|InsertCmdRefreshKeyFieldList|'
- r'InsertCmd|Instancing|IntegralHeight|'
- r'Interval|IMEMode|IsAttribute|IsBase64|IsBinary|IsNull|'
- r'IsDiffGram|IsLoaded|ItemBackColor,|ItemData|ItemIDData|'
- r'ItemTips|IXMLDOMElement|KeyboardHighValue|KeyboardLowValue|'
- r'Keyfield|KeyFieldList|KeyPreview|KeySort|LanguageOptions|'
- r'LeftColumn|Left|LineContents|LineNo|LineSlant|LinkMaster|'
- r'ListCount|ListenerType|ListIndex|ListItemID|ListItem|'
- r'List|LockColumnsLeft|LockColumns|LockScreen|MacDesktop|'
- r'MainFile|MapN19_4ToCurrency|MapBinary|MapVarchar|Margin|'
- r'MaxButton|MaxHeight|MaxLeft|MaxLength|MaxRecords|MaxTop|'
- r'MaxWidth|MDIForm|MemberClassLibrary|MemberClass|'
- r'MemoWindow|Message|MinButton|MinHeight|MinWidth|'
- r'MouseIcon|MousePointer|Movable|MoverBars|MultiSelect|'
- r'Name|NestedInto|NewIndex|NewItemID|NextSiblingTable|'
- r'NoCpTrans|NoDataOnLoad|NoData|NullDisplay|'
- r'NumberOfElements|Object|OLEClass|OLEDragMode|'
- r'OLEDragPicture|OLEDropEffects|OLEDropHasData|'
- r'OLEDropMode|OLEDropTextInsertion|OLELCID|'
- r'OLERequestPendingTimeout|OLEServerBusyRaiseError|'
- r'OLEServerBusyTimeout|OLETypeAllowed|OneToMany|'
- r'OpenViews|OpenWindow|Optimize|OrderDirection|Order|'
- r'OutputPageCount|OutputType|PageCount|PageHeight|'
- r'PageNo|PageOrder|Pages|PageTotal|PageWidth|'
- r'PanelLink|Panel|ParentAlias|ParentClass|ParentTable|'
- r'Parent|Partition|PasswordChar|PictureMargin|'
- r'PicturePosition|PictureSpacing|PictureSelectionDisplay|'
- r'PictureVal|Picture|Prepared|'
- r'PolyPoints|PreserveWhiteSpace|PreviewContainer|'
- r'PrintJobName|Procedure|PROCESSID|ProgID|ProjectHookClass|'
- r'ProjectHookLibrary|ProjectHook|QuietMode|'
- r'ReadCycle|ReadLock|ReadMouse|ReadObject|ReadOnly|'
- r'ReadSave|ReadTimeout|RecordMark|RecordSourceType|'
- r'RecordSource|RefreshAlias|'
- r'RefreshCmdDataSourceType|RefreshCmdDataSource|RefreshCmd|'
- r'RefreshIgnoreFieldList|RefreshTimeStamp|RelationalExpr|'
- r'RelativeColumn|RelativeRow|ReleaseType|Resizable|'
- r'RespectCursorCP|RespectNesting|RightToLeft|RotateFlip|'
- r'Rotation|RowColChange|RowHeight|RowSourceType|'
- r'RowSource|ScaleMode|SCCProvider|SCCStatus|ScrollBars|'
- r'Seconds|SelectCmd|SelectedID|'
- r'SelectedItemBackColor|SelectedItemForeColor|Selected|'
- r'SelectionNamespaces|SelectOnEntry|SelLength|SelStart|'
- r'SelText|SendGDIPlusImage|SendUpdates|ServerClassLibrary|'
- r'ServerClass|ServerHelpFile|ServerName|'
- r'ServerProject|ShowTips|ShowInTaskbar|ShowWindow|'
- r'Sizable|SizeBox|SOM|Sorted|Sparse|SpecialEffect|'
- r'SpinnerHighValue|SpinnerLowValue|SplitBar|StackLevel|'
- r'StartMode|StatusBarText|StatusBar|Stretch|StrictDateEntry|'
- r'Style|TabIndex|Tables|TabOrientation|Tabs|TabStop|'
- r'TabStretch|TabStyle|Tag|TerminateRead|Text|Themes|'
- r'ThreadID|TimestampFieldList|TitleBar|ToolTipText|'
- r'TopIndex|TopItemID|Top|TwoPassProcess|TypeLibCLSID|'
- r'TypeLibDesc|TypeLibName|Type|Unicode|UpdatableFieldList|'
- r'UpdateCmdDataSourceType|UpdateCmdDataSource|'
- r'UpdateCmdRefreshCmd|UpdateCmdRefreshFieldList|'
- r'UpdateCmdRefreshKeyFieldList|UpdateCmd|'
- r'UpdateGramSchemaLocation|UpdateGram|UpdateNameList|UpdateType|'
- r'UseCodePage|UseCursorSchema|UseDeDataSource|UseMemoSize|'
- r'UserValue|UseTransactions|UTF8Encoded|Value|VersionComments|'
- r'VersionCompany|VersionCopyright|VersionDescription|'
- r'VersionNumber|VersionProduct|VersionTrademarks|Version|'
- r'VFPXMLProgID|ViewPortHeight|ViewPortLeft|'
- r'ViewPortTop|ViewPortWidth|VScrollSmallChange|View|Visible|'
- r'VisualEffect|WhatsThisButton|WhatsThisHelpID|WhatsThisHelp|'
- r'WhereType|Width|WindowList|WindowState|WindowType|WordWrap|'
- r'WrapCharInCDATA|WrapInCDATA|WrapMemoInCDATA|XMLAdapter|'
- r'XMLConstraints|XMLNameIsXPath|XMLNamespace|XMLName|'
- r'XMLPrefix|XMLSchemaLocation|XMLTable|XMLType|'
- r'XSDfractionDigits|XSDmaxLength|XSDtotalDigits|'
- r'XSDtype|ZoomBox)', Name.Attribute),
-
- (r'\.(ActivateCell|AddColumn|AddItem|AddListItem|AddObject|'
- r'AddProperty|AddTableSchema|AddToSCC|Add|'
- r'ApplyDiffgram|Attach|AutoFit|AutoOpen|Box|Build|'
- r'CancelReport|ChangesToCursor|CheckIn|CheckOut|Circle|'
- r'CleanUp|ClearData|ClearStatus|Clear|CloneObject|CloseTables|'
- r'Close|Cls|CursorAttach|CursorDetach|CursorFill|'
- r'CursorRefresh|DataToClip|DelayedMemoFetch|DeleteColumn|'
- r'Dock|DoMessage|DoScroll|DoStatus|DoVerb|Drag|Draw|Eval|'
- r'GetData|GetDockState|GetFormat|GetKey|GetLatestVersion|'
- r'GetPageHeight|GetPageWidth|Help|Hide|IncludePageInOutput|'
- r'IndexToItemID|ItemIDToIndex|Item|LoadXML|Line|Modify|'
- r'MoveItem|Move|Nest|OLEDrag|OnPreviewClose|OutputPage|'
- r'Point|Print|PSet|Quit|ReadExpression|ReadMethod|'
- r'RecordRefresh|Refresh|ReleaseXML|Release|RemoveFromSCC|'
- r'RemoveItem|RemoveListItem|RemoveObject|Remove|'
- r'Render|Requery|RequestData|ResetToDefault|Reset|Run|'
- r'SaveAsClass|SaveAs|SetAll|SetData|SetFocus|SetFormat|'
- r'SetMain|SetVar|SetViewPort|ShowWhatsThis|Show|'
- r'SupportsListenerType|TextHeight|TextWidth|ToCursor|'
- r'ToXML|UndoCheckOut|Unnest|UpdateStatus|WhatsThisMode|'
- r'WriteExpression|WriteMethod|ZOrder)', Name.Function),
-
- (r'\.(Activate|AdjustObjectSize|AfterBand|AfterBuild|'
- r'AfterCloseTables|AfterCursorAttach|AfterCursorClose|'
- r'AfterCursorDetach|AfterCursorFill|AfterCursorRefresh|'
- r'AfterCursorUpdate|AfterDelete|AfterInsert|'
- r'AfterRecordRefresh|AfterUpdate|AfterDock|AfterReport|'
- r'AfterRowColChange|BeforeBand|BeforeCursorAttach|'
- r'BeforeCursorClose|BeforeCursorDetach|BeforeCursorFill|'
- r'BeforeCursorRefresh|BeforeCursorUpdate|BeforeDelete|'
- r'BeforeInsert|BeforeDock|BeforeOpenTables|'
- r'BeforeRecordRefresh|BeforeReport|BeforeRowColChange|'
- r'BeforeUpdate|Click|dbc_Activate|dbc_AfterAddTable|'
- r'dbc_AfterAppendProc|dbc_AfterCloseTable|dbc_AfterCopyProc|'
- r'dbc_AfterCreateConnection|dbc_AfterCreateOffline|'
- r'dbc_AfterCreateTable|dbc_AfterCreateView|dbc_AfterDBGetProp|'
- r'dbc_AfterDBSetProp|dbc_AfterDeleteConnection|'
- r'dbc_AfterDropOffline|dbc_AfterDropTable|'
- r'dbc_AfterModifyConnection|dbc_AfterModifyProc|'
- r'dbc_AfterModifyTable|dbc_AfterModifyView|dbc_AfterOpenTable|'
- r'dbc_AfterRemoveTable|dbc_AfterRenameConnection|'
- r'dbc_AfterRenameTable|dbc_AfterRenameView|'
- r'dbc_AfterValidateData|dbc_BeforeAddTable|'
- r'dbc_BeforeAppendProc|dbc_BeforeCloseTable|'
- r'dbc_BeforeCopyProc|dbc_BeforeCreateConnection|'
- r'dbc_BeforeCreateOffline|dbc_BeforeCreateTable|'
- r'dbc_BeforeCreateView|dbc_BeforeDBGetProp|'
- r'dbc_BeforeDBSetProp|dbc_BeforeDeleteConnection|'
- r'dbc_BeforeDropOffline|dbc_BeforeDropTable|'
- r'dbc_BeforeModifyConnection|dbc_BeforeModifyProc|'
- r'dbc_BeforeModifyTable|dbc_BeforeModifyView|'
- r'dbc_BeforeOpenTable|dbc_BeforeRemoveTable|'
- r'dbc_BeforeRenameConnection|dbc_BeforeRenameTable|'
- r'dbc_BeforeRenameView|dbc_BeforeValidateData|'
- r'dbc_CloseData|dbc_Deactivate|dbc_ModifyData|dbc_OpenData|'
- r'dbc_PackData|DblClick|Deactivate|Deleted|Destroy|DoCmd|'
- r'DownClick|DragDrop|DragOver|DropDown|ErrorMessage|Error|'
- r'EvaluateContents|GotFocus|Init|InteractiveChange|KeyPress|'
- r'LoadReport|Load|LostFocus|Message|MiddleClick|MouseDown|'
- r'MouseEnter|MouseLeave|MouseMove|MouseUp|MouseWheel|Moved|'
- r'OLECompleteDrag|OLEDragOver|OLEGiveFeedback|OLESetData|'
- r'OLEStartDrag|OnMoveItem|Paint|ProgrammaticChange|'
- r'QueryAddFile|QueryModifyFile|QueryNewFile|QueryRemoveFile|'
- r'QueryRunFile|QueryUnload|RangeHigh|RangeLow|ReadActivate|'
- r'ReadDeactivate|ReadShow|ReadValid|ReadWhen|Resize|'
- r'RightClick|SCCInit|SCCDestroy|Scrolled|Timer|UIEnable|'
- r'UnDock|UnloadReport|Unload|UpClick|Valid|When)', Name.Function),
-
- (r'\s+', Text),
- # everything else is not colored
- (r'.', Text),
- ],
- 'newline': [
- (r'\*.*?$', Comment.Single, '#pop'),
- (r'(ACCEPT|ACTIVATE\s*MENU|ACTIVATE\s*POPUP|ACTIVATE\s*SCREEN|'
- r'ACTIVATE\s*WINDOW|APPEND|APPEND\s*FROM|APPEND\s*FROM\s*ARRAY|'
- r'APPEND\s*GENERAL|APPEND\s*MEMO|ASSIST|AVERAGE|BLANK|BROWSE|'
- r'BUILD\s*APP|BUILD\s*EXE|BUILD\s*PROJECT|CALCULATE|CALL|'
- r'CANCEL|CHANGE|CLEAR|CLOSE|CLOSE\s*MEMO|COMPILE|CONTINUE|'
- r'COPY\s*FILE|COPY\s*INDEXES|COPY\s*MEMO|COPY\s*STRUCTURE|'
- r'COPY\s*STRUCTURE\s*EXTENDED|COPY\s*TAG|COPY\s*TO|'
- r'COPY\s*TO\s*ARRAY|COUNT|CREATE|CREATE\s*COLOR\s*SET|'
- r'CREATE\s*CURSOR|CREATE\s*FROM|CREATE\s*LABEL|CREATE\s*MENU|'
- r'CREATE\s*PROJECT|CREATE\s*QUERY|CREATE\s*REPORT|'
- r'CREATE\s*SCREEN|CREATE\s*TABLE|CREATE\s*VIEW|DDE|'
- r'DEACTIVATE\s*MENU|DEACTIVATE\s*POPUP|DEACTIVATE\s*WINDOW|'
- r'DECLARE|DEFINE\s*BAR|DEFINE\s*BOX|DEFINE\s*MENU|'
- r'DEFINE\s*PAD|DEFINE\s*POPUP|DEFINE\s*WINDOW|DELETE|'
- r'DELETE\s*FILE|DELETE\s*TAG|DIMENSION|DIRECTORY|DISPLAY|'
- r'DISPLAY\s*FILES|DISPLAY\s*MEMORY|DISPLAY\s*STATUS|'
- r'DISPLAY\s*STRUCTURE|DO|EDIT|EJECT|EJECT\s*PAGE|ERASE|'
- r'EXIT|EXPORT|EXTERNAL|FILER|FIND|FLUSH|FUNCTION|GATHER|'
- r'GETEXPR|GO|GOTO|HELP|HIDE\s*MENU|HIDE\s*POPUP|'
- r'HIDE\s*WINDOW|IMPORT|INDEX|INPUT|INSERT|JOIN|KEYBOARD|'
- r'LABEL|LIST|LOAD|LOCATE|LOOP|MENU|MENU\s*TO|MODIFY\s*COMMAND|'
- r'MODIFY\s*FILE|MODIFY\s*GENERAL|MODIFY\s*LABEL|MODIFY\s*MEMO|'
- r'MODIFY\s*MENU|MODIFY\s*PROJECT|MODIFY\s*QUERY|'
- r'MODIFY\s*REPORT|MODIFY\s*SCREEN|MODIFY\s*STRUCTURE|'
- r'MODIFY\s*WINDOW|MOVE\s*POPUP|MOVE\s*WINDOW|NOTE|'
- r'ON\s*APLABOUT|ON\s*BAR|ON\s*ERROR|ON\s*ESCAPE|'
- r'ON\s*EXIT\s*BAR|ON\s*EXIT\s*MENU|ON\s*EXIT\s*PAD|'
- r'ON\s*EXIT\s*POPUP|ON\s*KEY|ON\s*KEY\s*=|ON\s*KEY\s*LABEL|'
- r'ON\s*MACHELP|ON\s*PAD|ON\s*PAGE|ON\s*READERROR|'
- r'ON\s*SELECTION\s*BAR|ON\s*SELECTION\s*MENU|'
- r'ON\s*SELECTION\s*PAD|ON\s*SELECTION\s*POPUP|ON\s*SHUTDOWN|'
- r'PACK|PARAMETERS|PLAY\s*MACRO|POP\s*KEY|POP\s*MENU|'
- r'POP\s*POPUP|PRIVATE|PROCEDURE|PUBLIC|PUSH\s*KEY|'
- r'PUSH\s*MENU|PUSH\s*POPUP|QUIT|READ|READ\s*MENU|RECALL|'
- r'REINDEX|RELEASE|RELEASE\s*MODULE|RENAME|REPLACE|'
- r'REPLACE\s*FROM\s*ARRAY|REPORT|RESTORE\s*FROM|'
- r'RESTORE\s*MACROS|RESTORE\s*SCREEN|RESTORE\s*WINDOW|'
- r'RESUME|RETRY|RETURN|RUN|RUN\s*\/N"|RUNSCRIPT|'
- r'SAVE\s*MACROS|SAVE\s*SCREEN|SAVE\s*TO|SAVE\s*WINDOWS|'
- r'SCATTER|SCROLL|SEEK|SELECT|SET|SET\s*ALTERNATE|'
- r'SET\s*ANSI|SET\s*APLABOUT|SET\s*AUTOSAVE|SET\s*BELL|'
- r'SET\s*BLINK|SET\s*BLOCKSIZE|SET\s*BORDER|SET\s*BRSTATUS|'
- r'SET\s*CARRY|SET\s*CENTURY|SET\s*CLEAR|SET\s*CLOCK|'
- r'SET\s*COLLATE|SET\s*COLOR\s*OF|SET\s*COLOR\s*OF\s*SCHEME|'
- r'SET\s*COLOR\s*SET|SET\s*COLOR\s*TO|SET\s*COMPATIBLE|'
- r'SET\s*CONFIRM|SET\s*CONSOLE|SET\s*CURRENCY|SET\s*CURSOR|'
- r'SET\s*DATE|SET\s*DEBUG|SET\s*DECIMALS|SET\s*DEFAULT|'
- r'SET\s*DELETED|SET\s*DELIMITERS|SET\s*DEVELOPMENT|'
- r'SET\s*DEVICE|SET\s*DISPLAY|SET\s*DOHISTORY|SET\s*ECHO|'
- r'SET\s*ESCAPE|SET\s*EXACT|SET\s*EXCLUSIVE|SET\s*FIELDS|'
- r'SET\s*FILTER|SET\s*FIXED|SET\s*FORMAT|SET\s*FULLPATH|'
- r'SET\s*FUNCTION|SET\s*HEADINGS|SET\s*HELP|SET\s*HELPFILTER|'
- r'SET\s*HOURS|SET\s*INDEX|SET\s*INTENSITY|SET\s*KEY|'
- r'SET\s*KEYCOMP|SET\s*LIBRARY|SET\s*LOCK|SET\s*LOGERRORS|'
- r'SET\s*MACDESKTOP|SET\s*MACHELP|SET\s*MACKEY|SET\s*MARGIN|'
- r'SET\s*MARK\s*OF|SET\s*MARK\s*TO|SET\s*MEMOWIDTH|'
- r'SET\s*MESSAGE|SET\s*MOUSE|SET\s*MULTILOCKS|SET\s*NEAR|'
- r'SET\s*NOCPTRANS|SET\s*NOTIFY|SET\s*ODOMETER|SET\s*OPTIMIZE|'
- r'SET\s*ORDER|SET\s*PALETTE|SET\s*PATH|SET\s*PDSETUP|'
- r'SET\s*POINT|SET\s*PRINTER|SET\s*PROCEDURE|SET\s*READBORDER|'
- r'SET\s*REFRESH|SET\s*RELATION|SET\s*RELATION\s*OFF|'
- r'SET\s*REPROCESS|SET\s*RESOURCE|SET\s*SAFETY|SET\s*SCOREBOARD|'
- r'SET\s*SEPARATOR|SET\s*SHADOWS|SET\s*SKIP|SET\s*SKIP\s*OF|'
- r'SET\s*SPACE|SET\s*STATUS|SET\s*STATUS\s*BAR|SET\s*STEP|'
- r'SET\s*STICKY|SET\s*SYSMENU|SET\s*TALK|SET\s*TEXTMERGE|'
- r'SET\s*TEXTMERGE\s*DELIMITERS|SET\s*TOPIC|SET\s*TRBETWEEN|'
- r'SET\s*TYPEAHEAD|SET\s*UDFPARMS|SET\s*UNIQUE|SET\s*VIEW|'
- r'SET\s*VOLUME|SET\s*WINDOW\s*OF\s*MEMO|SET\s*XCMDFILE|'
- r'SHOW\s*GET|SHOW\s*GETS|SHOW\s*MENU|SHOW\s*OBJECT|'
- r'SHOW\s*POPUP|SHOW\s*WINDOW|SIZE\s*POPUP|SKIP|SORT|'
- r'STORE|SUM|SUSPEND|TOTAL|TYPE|UNLOCK|UPDATE|USE|WAIT|'
- r'ZAP|ZOOM\s*WINDOW|DO\s*CASE|CASE|OTHERWISE|ENDCASE|'
- r'DO\s*WHILE|ENDDO|FOR|ENDFOR|NEXT|IF|ELSE|ENDIF|PRINTJOB|'
- r'ENDPRINTJOB|SCAN|ENDSCAN|TEXT|ENDTEXT|=)',
- Keyword.Reserved, '#pop'),
- (r'#\s*(IF|ELIF|ELSE|ENDIF|DEFINE|IFDEF|IFNDEF|INCLUDE)',
- Comment.Preproc, '#pop'),
- (r'(m\.)?[a-z_]\w*', Name.Variable, '#pop'),
- (r'.', Text, '#pop'),
- ],
- }
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/urllib3/contrib/_securetransport/low_level.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/urllib3/contrib/_securetransport/low_level.py
deleted file mode 100644
index e23569972c7a541774366a01eea05e066b19c406..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/urllib3/contrib/_securetransport/low_level.py
+++ /dev/null
@@ -1,474 +0,0 @@
-"""
-Low-level helpers for the SecureTransport bindings.
-
-These are Python functions that are not directly related to the high-level APIs
-but are necessary to get them to work. They include a whole bunch of low-level
-CoreFoundation messing about and memory management. The concerns in this module
-are almost entirely about trying to avoid memory leaks and providing
-appropriate and useful assistance to the higher-level code.
-"""
-from __future__ import annotations
-
-import base64
-import ctypes
-import itertools
-import os
-import re
-import ssl
-import struct
-import tempfile
-import typing
-
-from .bindings import ( # type: ignore[attr-defined]
- CFArray,
- CFConst,
- CFData,
- CFDictionary,
- CFMutableArray,
- CFString,
- CFTypeRef,
- CoreFoundation,
- SecKeychainRef,
- Security,
-)
-
-# This regular expression is used to grab PEM data out of a PEM bundle.
-_PEM_CERTS_RE = re.compile(
- b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
-)
-
-
-def _cf_data_from_bytes(bytestring: bytes) -> CFData:
- """
- Given a bytestring, create a CFData object from it. This CFData object must
- be CFReleased by the caller.
- """
- return CoreFoundation.CFDataCreate(
- CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
- )
-
-
-def _cf_dictionary_from_tuples(
- tuples: list[tuple[typing.Any, typing.Any]]
-) -> CFDictionary:
- """
- Given a list of Python tuples, create an associated CFDictionary.
- """
- dictionary_size = len(tuples)
-
- # We need to get the dictionary keys and values out in the same order.
- keys = (t[0] for t in tuples)
- values = (t[1] for t in tuples)
- cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
- cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
-
- return CoreFoundation.CFDictionaryCreate(
- CoreFoundation.kCFAllocatorDefault,
- cf_keys,
- cf_values,
- dictionary_size,
- CoreFoundation.kCFTypeDictionaryKeyCallBacks,
- CoreFoundation.kCFTypeDictionaryValueCallBacks,
- )
-
-
-def _cfstr(py_bstr: bytes) -> CFString:
- """
- Given a Python binary data, create a CFString.
- The string must be CFReleased by the caller.
- """
- c_str = ctypes.c_char_p(py_bstr)
- cf_str = CoreFoundation.CFStringCreateWithCString(
- CoreFoundation.kCFAllocatorDefault,
- c_str,
- CFConst.kCFStringEncodingUTF8,
- )
- return cf_str
-
-
-def _create_cfstring_array(lst: list[bytes]) -> CFMutableArray:
- """
- Given a list of Python binary data, create an associated CFMutableArray.
- The array must be CFReleased by the caller.
-
- Raises an ssl.SSLError on failure.
- """
- cf_arr = None
- try:
- cf_arr = CoreFoundation.CFArrayCreateMutable(
- CoreFoundation.kCFAllocatorDefault,
- 0,
- ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
- )
- if not cf_arr:
- raise MemoryError("Unable to allocate memory!")
- for item in lst:
- cf_str = _cfstr(item)
- if not cf_str:
- raise MemoryError("Unable to allocate memory!")
- try:
- CoreFoundation.CFArrayAppendValue(cf_arr, cf_str)
- finally:
- CoreFoundation.CFRelease(cf_str)
- except BaseException as e:
- if cf_arr:
- CoreFoundation.CFRelease(cf_arr)
- raise ssl.SSLError(f"Unable to allocate array: {e}") from None
- return cf_arr
-
-
-def _cf_string_to_unicode(value: CFString) -> str | None:
- """
- Creates a Unicode string from a CFString object. Used entirely for error
- reporting.
-
- Yes, it annoys me quite a lot that this function is this complex.
- """
- value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
-
- string = CoreFoundation.CFStringGetCStringPtr(
- value_as_void_p, CFConst.kCFStringEncodingUTF8
- )
- if string is None:
- buffer = ctypes.create_string_buffer(1024)
- result = CoreFoundation.CFStringGetCString(
- value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8
- )
- if not result:
- raise OSError("Error copying C string from CFStringRef")
- string = buffer.value
- if string is not None:
- string = string.decode("utf-8")
- return string # type: ignore[no-any-return]
-
-
-def _assert_no_error(
- error: int, exception_class: type[BaseException] | None = None
-) -> None:
- """
- Checks the return code and throws an exception if there is an error to
- report
- """
- if error == 0:
- return
-
- cf_error_string = Security.SecCopyErrorMessageString(error, None)
- output = _cf_string_to_unicode(cf_error_string)
- CoreFoundation.CFRelease(cf_error_string)
-
- if output is None or output == "":
- output = f"OSStatus {error}"
-
- if exception_class is None:
- exception_class = ssl.SSLError
-
- raise exception_class(output)
-
-
-def _cert_array_from_pem(pem_bundle: bytes) -> CFArray:
- """
- Given a bundle of certs in PEM format, turns them into a CFArray of certs
- that can be used to validate a cert chain.
- """
- # Normalize the PEM bundle's line endings.
- pem_bundle = pem_bundle.replace(b"\r\n", b"\n")
-
- der_certs = [
- base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle)
- ]
- if not der_certs:
- raise ssl.SSLError("No root certificates specified")
-
- cert_array = CoreFoundation.CFArrayCreateMutable(
- CoreFoundation.kCFAllocatorDefault,
- 0,
- ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
- )
- if not cert_array:
- raise ssl.SSLError("Unable to allocate memory!")
-
- try:
- for der_bytes in der_certs:
- certdata = _cf_data_from_bytes(der_bytes)
- if not certdata:
- raise ssl.SSLError("Unable to allocate memory!")
- cert = Security.SecCertificateCreateWithData(
- CoreFoundation.kCFAllocatorDefault, certdata
- )
- CoreFoundation.CFRelease(certdata)
- if not cert:
- raise ssl.SSLError("Unable to build cert object!")
-
- CoreFoundation.CFArrayAppendValue(cert_array, cert)
- CoreFoundation.CFRelease(cert)
- except Exception:
- # We need to free the array before the exception bubbles further.
- # We only want to do that if an error occurs: otherwise, the caller
- # should free.
- CoreFoundation.CFRelease(cert_array)
- raise
-
- return cert_array
-
-
-def _is_cert(item: CFTypeRef) -> bool:
- """
- Returns True if a given CFTypeRef is a certificate.
- """
- expected = Security.SecCertificateGetTypeID()
- return CoreFoundation.CFGetTypeID(item) == expected # type: ignore[no-any-return]
-
-
-def _is_identity(item: CFTypeRef) -> bool:
- """
- Returns True if a given CFTypeRef is an identity.
- """
- expected = Security.SecIdentityGetTypeID()
- return CoreFoundation.CFGetTypeID(item) == expected # type: ignore[no-any-return]
-
-
-def _temporary_keychain() -> tuple[SecKeychainRef, str]:
- """
- This function creates a temporary Mac keychain that we can use to work with
- credentials. This keychain uses a one-time password and a temporary file to
- store the data. We expect to have one keychain per socket. The returned
- SecKeychainRef must be freed by the caller, including calling
- SecKeychainDelete.
-
- Returns a tuple of the SecKeychainRef and the path to the temporary
- directory that contains it.
- """
- # Unfortunately, SecKeychainCreate requires a path to a keychain. This
- # means we cannot use mkstemp to use a generic temporary file. Instead,
- # we're going to create a temporary directory and a filename to use there.
- # This filename will be 8 random bytes expanded into base64. We also need
- # some random bytes to password-protect the keychain we're creating, so we
- # ask for 40 random bytes.
- random_bytes = os.urandom(40)
- filename = base64.b16encode(random_bytes[:8]).decode("utf-8")
- password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8
- tempdirectory = tempfile.mkdtemp()
-
- keychain_path = os.path.join(tempdirectory, filename).encode("utf-8")
-
- # We now want to create the keychain itself.
- keychain = Security.SecKeychainRef()
- status = Security.SecKeychainCreate(
- keychain_path, len(password), password, False, None, ctypes.byref(keychain)
- )
- _assert_no_error(status)
-
- # Having created the keychain, we want to pass it off to the caller.
- return keychain, tempdirectory
-
-
-def _load_items_from_file(
- keychain: SecKeychainRef, path: str
-) -> tuple[list[CFTypeRef], list[CFTypeRef]]:
- """
- Given a single file, loads all the trust objects from it into arrays and
- the keychain.
- Returns a tuple of lists: the first list is a list of identities, the
- second a list of certs.
- """
- certificates = []
- identities = []
- result_array = None
-
- with open(path, "rb") as f:
- raw_filedata = f.read()
-
- try:
- filedata = CoreFoundation.CFDataCreate(
- CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata)
- )
- result_array = CoreFoundation.CFArrayRef()
- result = Security.SecItemImport(
- filedata, # cert data
- None, # Filename, leaving it out for now
- None, # What the type of the file is, we don't care
- None, # what's in the file, we don't care
- 0, # import flags
- None, # key params, can include passphrase in the future
- keychain, # The keychain to insert into
- ctypes.byref(result_array), # Results
- )
- _assert_no_error(result)
-
- # A CFArray is not very useful to us as an intermediary
- # representation, so we are going to extract the objects we want
- # and then free the array. We don't need to keep hold of keys: the
- # keychain already has them!
- result_count = CoreFoundation.CFArrayGetCount(result_array)
- for index in range(result_count):
- item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index)
- item = ctypes.cast(item, CoreFoundation.CFTypeRef)
-
- if _is_cert(item):
- CoreFoundation.CFRetain(item)
- certificates.append(item)
- elif _is_identity(item):
- CoreFoundation.CFRetain(item)
- identities.append(item)
- finally:
- if result_array:
- CoreFoundation.CFRelease(result_array)
-
- CoreFoundation.CFRelease(filedata)
-
- return (identities, certificates)
-
-
-def _load_client_cert_chain(keychain: SecKeychainRef, *paths: str | None) -> CFArray:
- """
- Load certificates and maybe keys from a number of files. Has the end goal
- of returning a CFArray containing one SecIdentityRef, and then zero or more
- SecCertificateRef objects, suitable for use as a client certificate trust
- chain.
- """
- # Ok, the strategy.
- #
- # This relies on knowing that macOS will not give you a SecIdentityRef
- # unless you have imported a key into a keychain. This is a somewhat
- # artificial limitation of macOS (for example, it doesn't necessarily
- # affect iOS), but there is nothing inside Security.framework that lets you
- # get a SecIdentityRef without having a key in a keychain.
- #
- # So the policy here is we take all the files and iterate them in order.
- # Each one will use SecItemImport to have one or more objects loaded from
- # it. We will also point at a keychain that macOS can use to work with the
- # private key.
- #
- # Once we have all the objects, we'll check what we actually have. If we
- # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
- # we'll take the first certificate (which we assume to be our leaf) and
- # ask the keychain to give us a SecIdentityRef with that cert's associated
- # key.
- #
- # We'll then return a CFArray containing the trust chain: one
- # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
- # responsibility for freeing this CFArray will be with the caller. This
- # CFArray must remain alive for the entire connection, so in practice it
- # will be stored with a single SSLSocket, along with the reference to the
- # keychain.
- certificates = []
- identities = []
-
- # Filter out bad paths.
- filtered_paths = (path for path in paths if path)
-
- try:
- for file_path in filtered_paths:
- new_identities, new_certs = _load_items_from_file(keychain, file_path)
- identities.extend(new_identities)
- certificates.extend(new_certs)
-
- # Ok, we have everything. The question is: do we have an identity? If
- # not, we want to grab one from the first cert we have.
- if not identities:
- new_identity = Security.SecIdentityRef()
- status = Security.SecIdentityCreateWithCertificate(
- keychain, certificates[0], ctypes.byref(new_identity)
- )
- _assert_no_error(status)
- identities.append(new_identity)
-
- # We now want to release the original certificate, as we no longer
- # need it.
- CoreFoundation.CFRelease(certificates.pop(0))
-
- # We now need to build a new CFArray that holds the trust chain.
- trust_chain = CoreFoundation.CFArrayCreateMutable(
- CoreFoundation.kCFAllocatorDefault,
- 0,
- ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
- )
- for item in itertools.chain(identities, certificates):
- # ArrayAppendValue does a CFRetain on the item. That's fine,
- # because the finally block will release our other refs to them.
- CoreFoundation.CFArrayAppendValue(trust_chain, item)
-
- return trust_chain
- finally:
- for obj in itertools.chain(identities, certificates):
- CoreFoundation.CFRelease(obj)
-
-
-TLS_PROTOCOL_VERSIONS = {
- "SSLv2": (0, 2),
- "SSLv3": (3, 0),
- "TLSv1": (3, 1),
- "TLSv1.1": (3, 2),
- "TLSv1.2": (3, 3),
-}
-
-
-def _build_tls_unknown_ca_alert(version: str) -> bytes:
- """
- Builds a TLS alert record for an unknown CA.
- """
- ver_maj, ver_min = TLS_PROTOCOL_VERSIONS[version]
- severity_fatal = 0x02
- description_unknown_ca = 0x30
- msg = struct.pack(">BB", severity_fatal, description_unknown_ca)
- msg_len = len(msg)
- record_type_alert = 0x15
- record = struct.pack(">BBBH", record_type_alert, ver_maj, ver_min, msg_len) + msg
- return record
-
-
-class SecurityConst:
- """
- A class object that acts as essentially a namespace for Security constants.
- """
-
- kSSLSessionOptionBreakOnServerAuth = 0
-
- kSSLProtocol2 = 1
- kSSLProtocol3 = 2
- kTLSProtocol1 = 4
- kTLSProtocol11 = 7
- kTLSProtocol12 = 8
- # SecureTransport does not support TLS 1.3 even if there's a constant for it
- kTLSProtocol13 = 10
- kTLSProtocolMaxSupported = 999
-
- kSSLClientSide = 1
- kSSLStreamType = 0
-
- kSecFormatPEMSequence = 10
-
- kSecTrustResultInvalid = 0
- kSecTrustResultProceed = 1
- # This gap is present on purpose: this was kSecTrustResultConfirm, which
- # is deprecated.
- kSecTrustResultDeny = 3
- kSecTrustResultUnspecified = 4
- kSecTrustResultRecoverableTrustFailure = 5
- kSecTrustResultFatalTrustFailure = 6
- kSecTrustResultOtherError = 7
-
- errSSLProtocol = -9800
- errSSLWouldBlock = -9803
- errSSLClosedGraceful = -9805
- errSSLClosedNoNotify = -9816
- errSSLClosedAbort = -9806
-
- errSSLXCertChainInvalid = -9807
- errSSLCrypto = -9809
- errSSLInternal = -9810
- errSSLCertExpired = -9814
- errSSLCertNotYetValid = -9815
- errSSLUnknownRootCert = -9812
- errSSLNoRootCert = -9813
- errSSLHostNameMismatch = -9843
- errSSLPeerHandshakeFail = -9824
- errSSLPeerUserCancelled = -9839
- errSSLWeakPeerEphemeralDHKey = -9850
- errSSLServerAuthCompleted = -9841
- errSSLRecordOverflow = -9847
-
- errSecVerifyFailed = -67808
- errSecNoTrustSettings = -25263
- errSecItemNotFound = -25300
- errSecInvalidTrustSettings = -25262
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/yarl/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/yarl/__init__.py
deleted file mode 100644
index b92ac79d92bae26875bf9e0f715a0fdb259d33e6..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/yarl/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from ._url import URL, cache_clear, cache_configure, cache_info
-
-__version__ = "1.9.2"
-
-__all__ = ("URL", "cache_clear", "cache_configure", "cache_info")
diff --git a/spaces/pseudolab/MistralMED_Chat/app.py b/spaces/pseudolab/MistralMED_Chat/app.py
deleted file mode 100644
index 3af1e8df0ba3843f0485eb5238d78a9337fcac4c..0000000000000000000000000000000000000000
--- a/spaces/pseudolab/MistralMED_Chat/app.py
+++ /dev/null
@@ -1,113 +0,0 @@
-from transformers import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, MistralForCausalLM
-from peft import PeftModel, PeftConfig
-import torch
-import gradio as gr
-import random
-from textwrap import wrap
-
-# Functions to Wrap the Prompt Correctly
-def wrap_text(text, width=90):
- lines = text.split('\n')
- wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
- wrapped_text = '\n'.join(wrapped_lines)
- return wrapped_text
-
-def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
- """
- Generates text using a large language model, given a user input and a system prompt.
- Args:
- user_input: The user's input text to generate a response for.
- system_prompt: Optional system prompt.
- Returns:
- A string containing the generated text.
- """
- # Combine user input and system prompt
- formatted_input = f"[INST]{system_prompt} {user_input}[/INST]"
-
- # Encode the input text
- encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
- model_inputs = encodeds.to(device)
-
- # Generate a response using the model
- output = model.generate(
- **model_inputs,
- max_length=max_length,
- use_cache=True,
- early_stopping=True,
- bos_token_id=model.config.bos_token_id,
- eos_token_id=model.config.eos_token_id,
- pad_token_id=model.config.eos_token_id,
- temperature=0.1,
- do_sample=True
- )
-
- # Decode the response
- response_text = tokenizer.decode(output[0], skip_special_tokens=True)
-
- return response_text
-
-# Define the device
-device = "cuda" if torch.cuda.is_available() else "cpu"
-
-# Use the base model's ID
-base_model_id = "mistralai/Mistral-7B-v0.1"
-model_directory = "Tonic/mistralmed"
-
-# Instantiate the Tokenizer
-tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True, padding_side="left")
-# tokenizer = AutoTokenizer.from_pretrained("Tonic/mistralmed", trust_remote_code=True, padding_side="left")
-tokenizer.pad_token = tokenizer.eos_token
-tokenizer.padding_side = 'left'
-
-# Specify the configuration class for the model
-#model_config = AutoConfig.from_pretrained(base_model_id)
-
-# Load the PEFT model with the specified configuration
-#peft_model = AutoModelForCausalLM.from_pretrained(base_model_id, config=model_config)
-
-# Load the PEFT model
-peft_config = PeftConfig.from_pretrained("Tonic/mistralmed", token="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")
-peft_model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True)
-peft_model = PeftModel.from_pretrained(peft_model, "Tonic/mistralmed", token="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")
-
-class ChatBot:
- def __init__(self):
- self.history = []
-
-class ChatBot:
- def __init__(self):
- # Initialize the ChatBot class with an empty history
- self.history = []
-
- def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
- # Combine the user's input with the system prompt
- formatted_input = f"[INST]{system_prompt} {user_input}[/INST]"
-
- # Encode the formatted input using the tokenizer
- user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
-
- # Generate a response using the PEFT model
- response = peft_model.generate(input_ids=user_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
-
- # Decode the generated response to text
- response_text = tokenizer.decode(response[0], skip_special_tokens=True)
-
- return response_text # Return the generated response
-
-bot = ChatBot()
-
-title = "👋🏻토닉의 미스트랄메드 채팅에 오신 것을 환영합니다🚀👋🏻Welcome to Tonic's MistralMed Chat🚀"
-description = "이 공간을 사용하여 현재 모델을 테스트할 수 있습니다. [(Tonic/MistralMed)](https://huggingface.co/Tonic/MistralMed) 또는 이 공간을 복제하고 로컬 또는 🤗HuggingFace에서 사용할 수 있습니다. [Discord에서 함께 만들기 위해 Discord에 가입하십시오](https://discord.gg/VqTxc76K3u). You can use this Space to test out the current model [(Tonic/MistralMed)](https://huggingface.co/Tonic/MistralMed) or duplicate this Space and use it locally or on 🤗HuggingFace. [Join me on Discord to build together](https://discord.gg/VqTxc76K3u)."
-examples = [["[Question:] What is the proper treatment for buccal herpes?", "You are a medicine and public health expert, you will receive a question, answer the question, and provide a complete answer"]]
-
-iface = gr.Interface(
- fn=bot.predict,
- title=title,
- description=description,
- examples=examples,
- inputs=["text", "text"], # Take user input and system prompt separately
- outputs="text",
- theme="ParityError/Anime"
-)
-
-iface.launch()
\ No newline at end of file
diff --git a/spaces/pyInter/Liyuu_sovits4/vdecoder/__init__.py b/spaces/pyInter/Liyuu_sovits4/vdecoder/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/qingxu98/academic-chatgpt-beta/crazy_functions/test_project/python/dqn/dqn.py b/spaces/qingxu98/academic-chatgpt-beta/crazy_functions/test_project/python/dqn/dqn.py
deleted file mode 100644
index 6cea64d39baa7ff4c1e549869aaa4b0ae17779a9..0000000000000000000000000000000000000000
--- a/spaces/qingxu98/academic-chatgpt-beta/crazy_functions/test_project/python/dqn/dqn.py
+++ /dev/null
@@ -1,245 +0,0 @@
-from typing import Any, Dict, List, Optional, Tuple, Type, Union
-
-import gym
-import numpy as np
-import torch as th
-from torch.nn import functional as F
-
-from stable_baselines3.common import logger
-from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
-from stable_baselines3.common.preprocessing import maybe_transpose
-from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
-from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
-from stable_baselines3.dqn.policies import DQNPolicy
-
-
-class DQN(OffPolicyAlgorithm):
- """
- Deep Q-Network (DQN)
-
- Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
- Default hyperparameters are taken from the nature paper,
- except for the optimizer and learning rate that were taken from Stable Baselines defaults.
-
- :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
- :param env: The environment to learn from (if registered in Gym, can be str)
- :param learning_rate: The learning rate, it can be a function
- of the current progress remaining (from 1 to 0)
- :param buffer_size: size of the replay buffer
- :param learning_starts: how many steps of the model to collect transitions for before learning starts
- :param batch_size: Minibatch size for each gradient update
- :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
- :param gamma: the discount factor
- :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
- like ``(5, "step")`` or ``(2, "episode")``.
- :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
- Set to ``-1`` means to do as many gradient steps as steps done in the environment
- during the rollout.
- :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
- at a cost of more complexity.
- See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
- :param target_update_interval: update the target network every ``target_update_interval``
- environment steps.
- :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
- :param exploration_initial_eps: initial value of random action probability
- :param exploration_final_eps: final value of random action probability
- :param max_grad_norm: The maximum value for the gradient clipping
- :param tensorboard_log: the log location for tensorboard (if None, no logging)
- :param create_eval_env: Whether to create a second environment that will be
- used for evaluating the agent periodically. (Only available when passing string for the environment)
- :param policy_kwargs: additional arguments to be passed to the policy on creation
- :param verbose: the verbosity level: 0 no output, 1 info, 2 debug
- :param seed: Seed for the pseudo random generators
- :param device: Device (cpu, cuda, ...) on which the code should be run.
- Setting it to auto, the code will be run on the GPU if possible.
- :param _init_setup_model: Whether or not to build the network at the creation of the instance
- """
-
- def __init__(
- self,
- policy: Union[str, Type[DQNPolicy]],
- env: Union[GymEnv, str],
- learning_rate: Union[float, Schedule] = 1e-4,
- buffer_size: int = 1000000,
- learning_starts: int = 50000,
- batch_size: Optional[int] = 32,
- tau: float = 1.0,
- gamma: float = 0.99,
- train_freq: Union[int, Tuple[int, str]] = 4,
- gradient_steps: int = 1,
- optimize_memory_usage: bool = False,
- target_update_interval: int = 10000,
- exploration_fraction: float = 0.1,
- exploration_initial_eps: float = 1.0,
- exploration_final_eps: float = 0.05,
- max_grad_norm: float = 10,
- tensorboard_log: Optional[str] = None,
- create_eval_env: bool = False,
- policy_kwargs: Optional[Dict[str, Any]] = None,
- verbose: int = 0,
- seed: Optional[int] = None,
- device: Union[th.device, str] = "auto",
- _init_setup_model: bool = True,
- ):
-
- super(DQN, self).__init__(
- policy,
- env,
- DQNPolicy,
- learning_rate,
- buffer_size,
- learning_starts,
- batch_size,
- tau,
- gamma,
- train_freq,
- gradient_steps,
- action_noise=None, # No action noise
- policy_kwargs=policy_kwargs,
- tensorboard_log=tensorboard_log,
- verbose=verbose,
- device=device,
- create_eval_env=create_eval_env,
- seed=seed,
- sde_support=False,
- optimize_memory_usage=optimize_memory_usage,
- supported_action_spaces=(gym.spaces.Discrete,),
- )
-
- self.exploration_initial_eps = exploration_initial_eps
- self.exploration_final_eps = exploration_final_eps
- self.exploration_fraction = exploration_fraction
- self.target_update_interval = target_update_interval
- self.max_grad_norm = max_grad_norm
- # "epsilon" for the epsilon-greedy exploration
- self.exploration_rate = 0.0
- # Linear schedule will be defined in `_setup_model()`
- self.exploration_schedule = None
- self.q_net, self.q_net_target = None, None
-
- if _init_setup_model:
- self._setup_model()
-
- def _setup_model(self) -> None:
- super(DQN, self)._setup_model()
- self._create_aliases()
- self.exploration_schedule = get_linear_fn(
- self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
- )
-
- def _create_aliases(self) -> None:
- self.q_net = self.policy.q_net
- self.q_net_target = self.policy.q_net_target
-
- def _on_step(self) -> None:
- """
- Update the exploration rate and target network if needed.
- This method is called in ``collect_rollouts()`` after each step in the environment.
- """
- if self.num_timesteps % self.target_update_interval == 0:
- polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
-
- self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
- logger.record("rollout/exploration rate", self.exploration_rate)
-
- def train(self, gradient_steps: int, batch_size: int = 100) -> None:
- # Update learning rate according to schedule
- self._update_learning_rate(self.policy.optimizer)
-
- losses = []
- for _ in range(gradient_steps):
- # Sample replay buffer
- replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
-
- with th.no_grad():
- # Compute the next Q-values using the target network
- next_q_values = self.q_net_target(replay_data.next_observations)
- # Follow greedy policy: use the one with the highest value
- next_q_values, _ = next_q_values.max(dim=1)
- # Avoid potential broadcast issue
- next_q_values = next_q_values.reshape(-1, 1)
- # 1-step TD target
- target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
-
- # Get current Q-values estimates
- current_q_values = self.q_net(replay_data.observations)
-
- # Retrieve the q-values for the actions from the replay buffer
- current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long())
-
- # Compute Huber loss (less sensitive to outliers)
- loss = F.smooth_l1_loss(current_q_values, target_q_values)
- losses.append(loss.item())
-
- # Optimize the policy
- self.policy.optimizer.zero_grad()
- loss.backward()
- # Clip gradient norm
- th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
- self.policy.optimizer.step()
-
- # Increase update counter
- self._n_updates += gradient_steps
-
- logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
- logger.record("train/loss", np.mean(losses))
-
- def predict(
- self,
- observation: np.ndarray,
- state: Optional[np.ndarray] = None,
- mask: Optional[np.ndarray] = None,
- deterministic: bool = False,
- ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
- """
- Overrides the base_class predict function to include epsilon-greedy exploration.
-
- :param observation: the input observation
- :param state: The last states (can be None, used in recurrent policies)
- :param mask: The last masks (can be None, used in recurrent policies)
- :param deterministic: Whether or not to return deterministic actions.
- :return: the model's action and the next state
- (used in recurrent policies)
- """
- if not deterministic and np.random.rand() < self.exploration_rate:
- if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space):
- n_batch = observation.shape[0]
- action = np.array([self.action_space.sample() for _ in range(n_batch)])
- else:
- action = np.array(self.action_space.sample())
- else:
- action, state = self.policy.predict(observation, state, mask, deterministic)
- return action, state
-
- def learn(
- self,
- total_timesteps: int,
- callback: MaybeCallback = None,
- log_interval: int = 4,
- eval_env: Optional[GymEnv] = None,
- eval_freq: int = -1,
- n_eval_episodes: int = 5,
- tb_log_name: str = "DQN",
- eval_log_path: Optional[str] = None,
- reset_num_timesteps: bool = True,
- ) -> OffPolicyAlgorithm:
-
- return super(DQN, self).learn(
- total_timesteps=total_timesteps,
- callback=callback,
- log_interval=log_interval,
- eval_env=eval_env,
- eval_freq=eval_freq,
- n_eval_episodes=n_eval_episodes,
- tb_log_name=tb_log_name,
- eval_log_path=eval_log_path,
- reset_num_timesteps=reset_num_timesteps,
- )
-
- def _excluded_save_params(self) -> List[str]:
- return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
-
- def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
- state_dicts = ["policy", "policy.optimizer"]
-
- return state_dicts, []
diff --git a/spaces/qoobeeshy/yolo-document-layout-analysis/app.py b/spaces/qoobeeshy/yolo-document-layout-analysis/app.py
deleted file mode 100644
index c234ed29a9fbfba42987b9158b1f75cf732c0b83..0000000000000000000000000000000000000000
--- a/spaces/qoobeeshy/yolo-document-layout-analysis/app.py
+++ /dev/null
@@ -1,258 +0,0 @@
-import gradio as gr
-import requests
-import torch
-import os
-from tqdm import tqdm
-# import wandb
-from ultralytics import YOLO
-import cv2
-import numpy as np
-import pandas as pd
-from skimage.transform import resize
-from skimage import img_as_bool
-from skimage.morphology import convex_hull_image
-import json
-
-# wandb.init(mode='disabled')
-
-def tableConvexHull(img, masks):
- mask=np.zeros(masks[0].shape,dtype="bool")
- for msk in masks:
- temp=msk.cpu().detach().numpy();
- chull = convex_hull_image(temp);
- mask=np.bitwise_or(mask,chull)
- return mask
-
-def cls_exists(clss, cls):
- indices = torch.where(clss==cls)
- return len(indices[0])>0
-
-def empty_mask(img):
- mask = np.zeros(img.shape[:2], dtype="uint8")
- return np.array(mask, dtype=bool)
-
-def extract_img_mask(img_model, img, config):
- res_dict = {
- 'status' : 1
- }
- res = get_predictions(img_model, img, config)
-
- if res['status']==-1:
- res_dict['status'] = -1
-
- elif res['status']==0:
- res_dict['mask']=empty_mask(img)
-
- else:
- masks = res['masks']
- boxes = res['boxes']
- clss = boxes[:, 5]
- mask = extract_mask(img, masks, boxes, clss, 0)
- res_dict['mask'] = mask
- return res_dict
-
-def get_predictions(model, img2, config):
- res_dict = {
- 'status': 1
- }
- try:
- for result in model.predict(source=img2, verbose=False, retina_masks=config['rm'],\
- imgsz=config['sz'], conf=config['conf'], stream=True,\
- classes=config['classes']):
- try:
- res_dict['masks'] = result.masks.data
- res_dict['boxes'] = result.boxes.data
- del result
- return res_dict
- except Exception as e:
- res_dict['status'] = 0
- return res_dict
- except:
- res_dict['status'] = -1
- return res_dict
-
-def extract_mask(img, masks, boxes, clss, cls):
- if not cls_exists(clss, cls):
- return empty_mask(img)
- indices = torch.where(clss==cls)
- c_masks = masks[indices]
- mask_arr = torch.any(c_masks, dim=0).bool()
- mask_arr = mask_arr.cpu().detach().numpy()
- mask = mask_arr
- return mask
-
-
-def get_masks(img, model, img_model, flags, configs):
- response = {
- 'status': 1
- }
- ans_masks = []
- img2 = img
-
-
-# ***** Getting paragraph and text masks
- res = get_predictions(model, img2, configs['paratext'])
- if res['status']==-1:
- response['status'] = -1
- return response
- elif res['status']==0:
- for i in range(2): ans_masks.append(empty_mask(img))
- else:
- masks, boxes = res['masks'], res['boxes']
- clss = boxes[:, 5]
- for cls in range(2):
- mask = extract_mask(img, masks, boxes, clss, cls)
- ans_masks.append(mask)
-
-
-# ***** Getting image and table masks
- res2 = get_predictions(model, img2, configs['imgtab'])
- if res2['status']==-1:
- response['status'] = -1
- return response
- elif res2['status']==0:
- for i in range(2): ans_masks.append(empty_mask(img))
- else:
- masks, boxes = res2['masks'], res2['boxes']
- clss = boxes[:, 5]
-
- if cls_exists(clss, 2):
- img_res = extract_img_mask(img_model, img, configs['image'])
- if img_res['status'] == 1:
- img_mask = img_res['mask']
- else:
- response['status'] = -1
- return response
-
- else:
- img_mask = empty_mask(img)
- ans_masks.append(img_mask)
-
- if cls_exists(clss, 3):
- indices = torch.where(clss==3)
- tbl_mask = tableConvexHull(img, masks[indices])
- else:
- tbl_mask = empty_mask(img)
- ans_masks.append(tbl_mask)
-
- if not configs['paratext']['rm']:
- h, w, c = img.shape
- for i in range(4):
- ans_masks[i] = img_as_bool(resize(ans_masks[i], (h, w)))
-
-
- response['masks'] = ans_masks
- return response
-
-def overlay(image, mask, color, alpha, resize=None):
- """Combines image and its segmentation mask into a single image.
- https://www.kaggle.com/code/purplejester/showing-samples-with-segmentation-mask-overlay
-
- Params:
- image: Training image. np.ndarray,
- mask: Segmentation mask. np.ndarray,
- color: Color for segmentation mask rendering. tuple[int, int, int] = (255, 0, 0)
- alpha: Segmentation mask's transparency. float = 0.5,
- resize: If provided, both image and its mask are resized before blending them together.
- tuple[int, int] = (1024, 1024))
-
- Returns:
- image_combined: The combined image. np.ndarray
-
- """
- color = color[::-1]
- colored_mask = np.expand_dims(mask, 0).repeat(3, axis=0)
- colored_mask = np.moveaxis(colored_mask, 0, -1)
- masked = np.ma.MaskedArray(image, mask=colored_mask, fill_value=color)
- image_overlay = masked.filled()
-
- if resize is not None:
- image = cv2.resize(image.transpose(1, 2, 0), resize)
- image_overlay = cv2.resize(image_overlay.transpose(1, 2, 0), resize)
-
- image_combined = cv2.addWeighted(image, 1 - alpha, image_overlay, alpha, 0)
-
- return image_combined
-
-
-
-model_path = 'models'
-general_model_name = 'e50_aug.pt'
-image_model_name = 'e100_img.pt'
-
-general_model = YOLO(os.path.join(model_path, general_model_name))
-image_model = YOLO(os.path.join(model_path, image_model_name))
-
-image_path = 'examples'
-sample_name = ['0040da34-25c8-4a5a-a6aa-36733ea3b8eb.png',
- '0050a8ee-382b-447e-9c5b-8506d9507bef.png', '0064d3e2-3ba2-4332-a28f-3a165f2b84b1.png']
-
-sample_path = [os.path.join(image_path, sample) for sample in sample_name]
-
-flags = {
- 'hist': False,
- 'bz': False
-}
-
-
-configs = {}
-configs['paratext'] = {
- 'sz' : 640,
- 'conf': 0.25,
- 'rm': True,
- 'classes': [0, 1]
-}
-configs['imgtab'] = {
- 'sz' : 640,
- 'conf': 0.35,
- 'rm': True,
- 'classes': [2, 3]
-}
-configs['image'] = {
- 'sz' : 640,
- 'conf': 0.35,
- 'rm': True,
- 'classes': [0]
-}
-
-def evaluate(img_path, model=general_model, img_model=image_model,\
- configs=configs, flags=flags):
- # print('starting')
- img = cv2.imread(img_path)
- res = get_masks(img, general_model, image_model, flags, configs)
- if res['status']==-1:
- for idx in configs.keys():
- configs[idx]['rm'] = False
- return evaluate(img, model, img_model, flags, configs)
- else:
- masks = res['masks']
-
- color_map = {
- 0 : (255, 0, 0),
- 1 : (0, 255, 0),
- 2 : (0, 0, 255),
- 3 : (255, 255, 0),
- }
- for i, mask in enumerate(masks):
- img = overlay(image=img, mask=mask, color=color_map[i], alpha=0.4)
- # print('finishing')
- return img
-
-# output = evaluate(img_path=sample_path, model=general_model, img_model=image_model,\
-# configs=configs, flags=flags)
-
-
-inputs_image = [
- gr.components.Image(type="filepath", label="Input Image"),
-]
-outputs_image = [
- gr.components.Image(type="numpy", label="Output Image"),
-]
-interface_image = gr.Interface(
- fn=evaluate,
- inputs=inputs_image,
- outputs=outputs_image,
- title="Document Layout Segmentor",
- examples=sample_path,
- cache_examples=True,
-).launch()
\ No newline at end of file
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/DAMASMASTER777SERIAL.md b/spaces/quidiaMuxgu/Expedit-SAM/DAMASMASTER777SERIAL.md
deleted file mode 100644
index f22bf3586ef584e31a4640116a63530796da5ca7..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/DAMASMASTER777SERIAL.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Post le: Sam 20 Jan - 05:41 (2018) Sujet du message: DAMAS MASTER 777 SERIAL, Rpondre en citant. The Microchip Technology Inc. 24AA014/24LC014 is a ... 4d29de3e1b
-
-
-
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/HD Online Player (neat Video Premiere Pro Cc Crack LINKgolk).md b/spaces/quidiaMuxgu/Expedit-SAM/HD Online Player (neat Video Premiere Pro Cc Crack LINKgolk).md
deleted file mode 100644
index cb9a5ef5b2bd68c22db44361fb149dac1f833495..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/HD Online Player (neat Video Premiere Pro Cc Crack LINKgolk).md
+++ /dev/null
@@ -1,6 +0,0 @@
-
HD Online Player (neat video premiere pro cc crackgolk)
-
-HD. Online Player (neat video premiere pro cc crackgolk). Premiere Elements (SR plug-in), Adobe Media Encoder ... Apple is currently busy trying to make FCPÂ ... 1fdad05405
-
-
-
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Just Cause 3 Crack Pc Game Highly Compressed Free Download.md b/spaces/quidiaMuxgu/Expedit-SAM/Just Cause 3 Crack Pc Game Highly Compressed Free Download.md
deleted file mode 100644
index cf1756b1f2e12156487840c0a17b520b86ec2804..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Just Cause 3 Crack Pc Game Highly Compressed Free Download.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
Just Cause 3 Crack Pc Game Highly Compressed Free Download
Have you ever wanted to use one computer with multiple monitors, keyboards and mice? Have you ever wished to share your PC with your family, friends or colleagues without compromising your privacy and performance? Have you ever dreamed of playing games or watching movies on a big screen with surround sound? If you answered yes to any of these questions, then you need Aster V7 6x64 key.
-
Introduction
-
In this article, we will explain what Aster V7 is, what Aster V7 6x64 key is, why you need it, how to use it and what benefits it can bring to you. By the end of this article, you will be able to turn your single PC into a multi-user station with ease and convenience.
Aster V7 is a software that allows you to create multiple workstations from one PC. It enables you to connect multiple monitors, keyboards and mice to your PC and assign them to different users. Each user can work independently on their own desktop, run their own applications and access their own files. Aster V7 supports Windows 7/8/10 (32-bit and 64-bit) operating systems.
-
What is Aster V7 6x64 key?
-
Aster V7 6x64 key is an activation code that you need to use Aster V7 on a 64-bit Windows system. It is a unique combination of letters and numbers that verifies your purchase and unlocks all the features of Aster V7. You can buy Aster V7 6x64 key from the official website of ASTER or from other authorized sellers.
-
Why do you need Aster V7 6x64 key?
-
You need Aster V7 6x64 key if you want to use Aster V7 on a 64-bit Windows system. Without the key, you can only use Aster V7 in trial mode for 30 days. In trial mode, you can only create two workstations and some functions are limited. With the key, you can create up to six workstations (depending on your hardware configuration) and enjoy all the functions of Aster V7.
-
How to use Aster V7 6x64 key?
-
Using Aster V7 6x64 key is very easy. You just need to follow these simple steps:
-
aster v7 6x64 activation code
-aster v7 6x64 crack download
-aster v7 6x64 license key generator
-aster v7 6x64 serial number
-aster v7 6x64 full version free
-aster v7 6x64 multiseat software
-aster v7 6x64 windows 10
-aster v7 6x64 keygen
-aster v7 6x64 patch
-aster v7 6x64 registration key
-aster v7 6x64 coupon code
-aster v7 6x64 discount offer
-aster v7 6x64 review
-aster v7 6x64 tutorial
-aster v7 6x64 setup guide
-aster v7 6x64 system requirements
-aster v7 6x64 alternative
-aster v7 6x64 vs softxpand
-aster v7 6x64 vs ibik workstation
-aster v7 6x64 vs userful desktop
-aster v7 6x64 vs betwin es
-aster v7 6x64 vs friendly seats
-aster v7 6x64 vs multiseat gaming
-aster v7 6x64 vs miniframe softxpand duo pro
-aster v7 6x64 vs ncomputing l300
-aster v7 6x64 vs thinsoft winconnect server xp
-aster v7 6x64 vs microsoft multipoint server
-aster v7 6x64 vs vmware horizon view client
-aster v7 6x64 vs citrix xendesktop
-aster v7 6x64 vs be twn pc duo s2h2
-aster v7 6x64 vs userful multiseat linux
-aster v7 6x64 vs ibik terminal server pro
-aster v7 6x64 vs elusiva terminal server pro
-aster v7 6x64 vs thinstation live cd/usb
-aster v7 6x64 vs linux mint mate edition
-aster v7 6x64 vs ubuntu mate edition
-aster v7 6x64 vs zorin os core edition
-aster v7 6x64 vs elementary os loki edition
-aster v7 6x64 vs linux lite edition
-aster v7 6x64 vs peppermint os edition
-aster v7 6x64 vs lubuntu edition
-aster v7 6x64 vs xubuntu edition
-aster v7 6x64 vs kubuntu edition
-aster v7 6x64 vs linuxfx edition
-aster v7 6x64 vs deepin linux edition
-aster v7 6x64 vs manjaro linux edition
-aster v7 6x64 vs solus linux edition
-aster v7 6x64 vs mx linux edition
-aster v7 6x64 vs pop os edition
-aster v7 6x64 vs fedora workstation edition
-
Download and install Aster V7
-
First, you need to download the latest version of Aster V7 from the official website of ASTER or from other authorized sources. Then, you need to install it on your PC by following the instructions on the screen. You may need to restart your PC after the installation.
-
Activate Aster V7 with the key
-
Second, you need to activate Aster V7 with the key that you have purchased. To do this, you need to open the ASTER Control Panel and click on the "Enter Key" button. Then, you need to enter your name, email address and the key in the corresponding fields. After that, click on the "Activate" button and wait for a few seconds until the activation is completed.
-
Configure Aster V7 settings
-
Third, you need to configure Aster V7 settings according to your preferences and needs. You can do this by using the ASTER Control Panel or by right-clicking on the ASTER icon in the system tray. You can adjust various parameters such as display resolution, color depth, sound volume, keyboard layout, mouse speed and more. You can also assign different wallpapers, screensavers and themes for each workstation.
-
Connect multiple monitors, keyboards and mice
-
Fourth, you need to connect multiple monitors, keyboards and mice to your PC using HDMI, VGA, DVI or USB ports. You can use any combination of devices that suits your situation. For example, you can use one monitor with two keyboards and two mice for two users; or two monitors with one keyboard and one mouse for one user; or three monitors with three keyboards and three mice for three users; etc.
-
Benefits of using Aster V7 6x64 key
-
Using Aster V7 6x64 key can bring many benefits to you. Here are some of them:
-
Save money and space
-
With Aster V7 6x64 key, you don't need to buy multiple PCs for multiple users. You can save money on hardware costs, electricity bills and maintenance fees. You also don't need to occupy much space for multiple PCs. You can save space on your desk or in your room.
-
Increase productivity and collaboration
-
With Aster V7 6x64 key, you can increase your productivity and collaboration with other users. You can work on different tasks simultaneously without interfering with each other. You can also share files, folders and printers easily among different workstations. You can communicate with other users via chat or voice call using built-in or external microphones and speakers.
-
Enjoy gaming and entertainment
-
With Aster V7 6x64 key, you can enjoy gaming and entertainment with other users. You can play multiplayer games online or offline using different controllers such as keyboards, mice, joysticks or gamepads. You can also watch movies or videos on a big screen with surround sound using different media players such as VLC or Windows Media Player.
-
Conclusion
-
In conclusion, Aster V7 6x64 key is a great software that allows you to create multiple workstations from one PC. It enables you to connect multiple monitors, keyboards and mice to your PC and assign them to different users. Each user can work independently on their own desktop, run their own applications and access their own files. Using Aster V7 6x64 key can save money and space; increase productivity and collaboration; enjoy gaming and entertainment.
-
Summary of the main points
-
-
Aster V7 is a software that allows you to create multiple workstations from one PC.
-
Aster V7 6x64 key is an activation code that you need to use Aster V7 on a 64-bit Windows system.
-
You need Aster V7 6x64 key if you want to use Aster V7 without limitations.
-
You can use Aster V7 6x64 key by downloading and installing Aster V7; activating it with the key; configuring its settings; connecting multiple monitors, keyboards and mice.
-
You can benefit from using Aster V7 6x64 key by saving money and space; increasing productivity and collaboration; enjoying gaming and entertainment.
-
-
Call to action
-
If you are interested in using Aster V7 6x64 key, don't hesitate any longer. Visit the official website of ASTER today and get your own copy of this amazing software. You will not regret it!
-
Frequently Asked Questions
-
-
How many workstations can I create with Aster V7 6x64 key?
-
The number of workstations that you can create with Aster V7 6x64 key depends on your hardware configuration. The maximum number is six workstations for one PC.
-
Can I use different operating systems for different workstations?
- ```html your PC. However, you can use different versions of Windows such as Windows 7, 8 or 10.
-
Can I use Aster V7 6x64 key on multiple PCs?
-
No, you cannot use Aster V7 6x64 key on multiple PCs. The key is valid for one PC only. If you want to use Aster V7 on another PC, you need to buy another key.
-
Is Aster V7 6x64 key safe and legal?
-
Yes, Aster V7 6x64 key is safe and legal. It is a genuine product that is developed and distributed by ASTER, a reputable company that has been in the market for over 10 years. It does not contain any viruses, malware or spyware. It does not violate any laws or regulations.
-
What if I have any problems or questions about Aster V7 6x64 key?
-
If you have any problems or questions about Aster V7 6x64 key, you can contact the customer support team of ASTER via email or phone. They will be happy to assist you and solve your issues. You can also visit the FAQ section or the forum of ASTER for more information and tips.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Cash Cash - The Beat Goes On (2012).md b/spaces/raedeXanto/academic-chatgpt-beta/Cash Cash - The Beat Goes On (2012).md
deleted file mode 100644
index 1b76cb4d70d5fb609d4a357fd894234970f65ff6..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Cash Cash - The Beat Goes On (2012).md
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
Cash Cash: How The Beat Goes On Became Their Breakthrough Album
-
Cash Cash is an electronic music group from New Jersey, USA, that consists of brothers Jean Paul Makhlouf and Alex Makhlouf, and Samuel Frisch. They are known for their catchy dance-pop songs, energetic live shows, and remixes for artists like Katy Perry, Bruno Mars, and Kelly Clarkson.
-
But before they became international stars, they had to overcome many challenges and setbacks in their musical journey. One of their most pivotal moments was the release of their third studio album, The Beat Goes On, in 2012.
The Beat Goes On was a self-produced and self-released album that showcased Cash Cash's versatility and creativity. It featured 11 tracks that ranged from electro-house to dubstep to pop-rock, with influences from Michael Jackson, Daft Punk, and The Beatles.
-
The album was only licensed in Japan as a full-length LP, but it was also available worldwide as a 6-track EP on Cash Cash Music. The album spawned two singles: "Michael Jackson (The Beat Goes On)" and "I Like It Loud", which were both released by Dutch label Spinnin' Records.
-
"Michael Jackson (The Beat Goes On)" was a tribute to the King of Pop, with samples of his iconic vocals and melodies. The song became a hit in Japan and the Netherlands, reaching the top 10 on the Dance Top 30 chart and the top 30 on the Dutch Top 40 chart.
-
"I Like It Loud" was a party anthem that featured Cash Cash's signature vocoder vocals and synth hooks. The song was also used in several commercials and TV shows, such as MTV's Jersey Shore and ABC's Dancing with the Stars.
-
The Beat Goes On was a critical and commercial success for Cash Cash, earning them recognition and respect in the electronic music scene. It also opened up new opportunities for them to collaborate with other artists and labels, such as Big Beat Records and Atlantic Records.
-
The Beat Goes On was the album that proved that Cash Cash had what it takes to make it big in the music industry. It was the album that made them stand out from the crowd and showed their passion and talent for making music. It was the album that made the beat go on for Cash Cash.
-
-
The Beat Goes On also received positive reviews from critics and fans, who praised Cash Cash's production skills, songwriting abilities, and musical diversity. Some of the highlights of the album include "Still Got It", a funky electro-pop song with a catchy chorus and guitar riffs; "Mama Told Me", a retro-inspired synth-pop song with a nostalgic vibe and a guest appearance by J.Trill; "History", a melodic and emotional ballad with piano and strings; and "One Last Song", a powerful and uplifting finale with a rock edge.
-
-
The album also showcased Cash Cash's talent for remixing their own songs, as they included extended versions of "Michael Jackson (The Beat Goes On)" and "I Like It Loud" on the EP edition. The remixes added more energy and dynamics to the original tracks, making them suitable for clubs and festivals.
-
The Beat Goes On was a milestone for Cash Cash, as it marked their transition from a pop-rock band to an electronic music group. It also demonstrated their ability to adapt to the changing trends and tastes of the music industry, while staying true to their own vision and style. The album was a testament to their hard work, dedication, and passion for making music.
7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version !!TOP!!.md b/spaces/raedeXanto/academic-chatgpt-beta/Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version !!TOP!!.md
deleted file mode 100644
index aad755c8ca31b7d1420f87cde31bddd4654f16eb..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version !!TOP!!.md
+++ /dev/null
@@ -1,156 +0,0 @@
-
-
Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version
-
If you are looking for a powerful and easy-to-use anti-malware software that can protect your PC from various threats, you may have heard of Gridinsoft Anti-Malware. But what if you want to enjoy the full features of this software without paying for a license? That's where Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version comes in handy.
-
Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version
In this article, we will explain what Gridinsoft Anti-Malware is, what Crackingpatching is, why you need Gridinsoft Anti-Malware 4.0.5 Patch, and how to install and use it. We will also discuss the pros and cons of using this patch, and give you some tips and warnings before you decide to use it.
-
What is Gridinsoft Anti-Malware?
-
Gridinsoft Anti-Malware is a software that can detect and remove various types of malware from your PC, such as viruses, trojans, spyware, adware, ransomware, rootkits, worms, keyloggers, etc. It can also clean up your browser from annoying ads, pop-ups, redirects, and other unwanted elements.
-
Gridinsoft Anti-Malware has a powerful scanning and detection engine that can find even the most hidden and stubborn malware on your system. It also has a user-friendly and customizable interface that allows you to adjust the scan modes, settings, and preferences according to your needs.
-
Gridinsoft Anti-Malware offers a free trial version that can scan your PC and remove some malware, but it has some limitations in terms of functionality and duration. To unlock the full features of this software, you need to purchase a license that costs $29.95 for one month or $39.95 for one year.
-
What is Crackingpatching?
-
Crackingpatching is a website that provides patches, cracks, keygens, serial keys, activators, loaders, and other tools that can modify or bypass the license verification or activation process of various software products.
-
Crackingpatching claims that its patches are tested and working on different versions of Windows operating systems, and that they are safe and virus-free. However, there is no guarantee that these patches are legitimate or legal.
-
How to download Gridinsoft Anti-Malware 4.0.5 with crack
-Gridinsoft Anti-Malware 4.0.5 license key generator
-Gridinsoft Anti-Malware 4.0.5 full version free download
-Gridinsoft Anti-Malware 4.0.5 patch by crackingpatching.com
-Gridinsoft Anti-Malware 4.0.5 review and features
-Gridinsoft Anti-Malware 4.0.5 activation code
-Gridinsoft Anti-Malware 4.0.5 serial key
-Gridinsoft Anti-Malware 4.0.5 cracked version download
-Gridinsoft Anti-Malware 4.0.5 latest update
-Gridinsoft Anti-Malware 4.0.5 system requirements
-Gridinsoft Anti-Malware 4.0.5 installation guide
-Gridinsoft Anti-Malware 4.0.5 comparison with other anti-malware software
-Gridinsoft Anti-Malware 4.0.5 coupon code and discount
-Gridinsoft Anti-Malware 4.0.5 trial version download
-Gridinsoft Anti-Malware 4.0.5 pros and cons
-Gridinsoft Anti-Malware 4.0.5 customer support and feedback
-Gridinsoft Anti-Malware 4.0.5 malware removal tool
-Gridinsoft Anti-Malware 4.0.5 best alternative
-Gridinsoft Anti-Malware 4.0.5 for Windows 10/8/7
-Gridinsoft Anti-Malware 4.0.5 for Mac OS X
-Gridinsoft Anti-Malware 4.0.5 for Linux
-Gridinsoft Anti-Malware 4.0.5 for Android
-Gridinsoft Anti-Malware 4.0.5 for iOS
-Gridinsoft Anti-Malware 4.0.5 for Chromebook
-Gridinsoft Anti-Malware 4.0.5 online scan
-Gridinsoft Anti-Malware 4.0.5 offline installer
-Gridinsoft Anti-Malware 4.0.5 portable version
-Gridinsoft Anti-Malware 4.0.5 lifetime license
-Gridinsoft Anti-Malware 4.0.5 refund policy
-Gridinsoft Anti-Malware 4.0.5 testimonials and ratings
-Gridinsoft Anti-Malware 4.0.5 video tutorial and demo
-Gridinsoft Anti-Malware 4.0.5 changelog and release notes
-Gridinsoft Anti-Malware 4.0.5 FAQs and tips
-Gridinsoft Anti-Malware 4.0.5 blog and news
-Gridinsoft Anti-Malware 4.0.5 forum and community
-Gridinsoft Anti-Malware 4.0.5 affiliate program and earnings
-Gridinsoft Anti-Malware 4.0.5 giveaway and contest
-Gridinsoft Anti-Malware 4
-
Crackingpatching also warns its users that using its patches may violate the license agreement or terms of service of the original software developers or vendors, and that they are solely responsible for any consequences or damages that may arise from using them.
-
Why do you need Gridinsoft Anti-Malware 4.0.5 Patch?
-
If you want to use Gridinsoft Anti-Malware without paying for a license or without any restrictions or limitations, you may need Gridinsoft Anti-Malware 4.0.5 Patch.
-
Gridinsoft Anti-Malware 4.0.5 Patch is a tool that can crack or activate the full version of Gridinsoft Anti-Malware 4.0.5 on your PC.
-
With this patch, you can enjoy all the features of Gridinsoft Anti-Malware 4.0.5 without any expiration date or trial period.
-
You can also update the software regularly without losing the activation status.
-
Features of Gridinsoft Anti-Malware 4.0.5 Patch
-
Gridinsoft Anti-Malware 4.0.5 Patch has some features that make it attractive for users who want to use Gridinsoft Anti-Malware for free.
-
Powerful scanning and detection engine
-
The patch does not affect the performance or quality of the scanning and detection engine of Gridinsoft Anti-Malware.
-
You can still scan your PC with different modes (standard scan, quick scan, full scan, removable scan) and find all kinds of malware on your system.
-
User-friendly and customizable interface
-
The patch does not change the appearance or functionality of the interface of Gridinsoft Anti-Malware.
-
You can still access all the options and settings of the software from the main window or the menu bar.
-
You can also customize the interface according to your preferences by changing the language, theme, font size, sound effects, etc.
-
Flexible and versatile settings
-
The patch does not limit or disable any settings or features of Gridinsoft Anti-Malware.
-
You can still adjust the scan parameters (such as file types, file size, heuristic rules), quarantine options (such as restore, delete), protection options (such as real-time protection), update options (such as automatic update), etc.
-
Comprehensive and detailed reports
-
The patch does not interfere with the generation or display of reports by Gridinsoft Anti-Malware.
-
You can still view the scan results (such as detected items, scan duration), quarantine history (such as quarantined items, restored items), log files (such as scan logs, update logs), etc.
-
How to install and use Gridinsoft Anti-Malware 4.0.5 Patch?
-
If you want to use Gridinsoft Anti-Malware 4.0.5 Patch, you need to follow these steps:
-
Download the patch from Crackingpatching website
-
The first step is to download the patch from Crackingpatching website. You can find it by searching for "Gridinsoft Anti-Malware 4.0.5 Patch" on their homepage or using this link:
The patch file is named "GridinSoft.AntiMalware.v4.x.Patch.zip" and has a size of about 1 MB. You need to extract it using a program like WinRAR or 7-Zip.
-
Install Gridinsoft Anti-Malware 4.0.5 on your PC
-
The next step is to install Gridinsoft Anti-Malware 4.0.5 on your PC. You can download it from their official website or using this link:
The installation file is named "gsam-405-setup.exe" and has a size of about 80 MB. You need to run it as administrator and follow the instructions on the screen. You can choose the installation folder, language, and shortcuts according to your preferences. You don't need to enter any license key or activate the software at this point.
-
Run the patch as administrator and apply it
-
The final step is to run the patch as administrator and apply it to crack or activate the full version of Gridinsoft ```html 5 Patch. You need to locate the patch file that you extracted earlier and run it as administrator. You will see a window like this:
-
-
You need to click on the "Patch" button and select the installation folder of Gridinsoft Anti-Malware 4.0.5 on your PC. The default folder is "C:\Program Files\GridinSoft Anti-Malware". You will see a message like this:
-
-
You need to click on "OK" and wait for the patching process to finish. You will see another message like this:
-
-
You need to click on "OK" again and close the patch window. You have successfully applied the patch to Gridinsoft Anti-Malware 4.0.5.
-
Enjoy the full version of Gridinsoft Anti-Malware
-
The last step is to enjoy the full version of Gridinsoft Anti-Malware 4.0.5 on your PC. You can launch the software from the desktop shortcut or the start menu and scan your PC for malware.
-
You will notice that the software is activated and does not ask for any license key or activation code. You will also see that all the features are unlocked and available for use.
-
You can update the software regularly without losing the activation status. You can also use the software without any expiration date or trial period.
-
Pros and cons of Gridinsoft Anti-Malware 4.0.5 Patch
-
Gridinsoft Anti-Malware 4.0.5 Patch has some pros and cons that you should consider before using it.
-
Pros
-
-
Effective and reliable malware removal: The patch does not compromise the quality or performance of Gridinsoft Anti-Malware's scanning and detection engine. You can still remove various types of malware from your PC with ease and confidence.
-
Fast and easy to use: The patch is simple and straightforward to use. You just need to download, extract, run, and apply it in a few minutes. You don't need any technical skills or knowledge to use it.
-
Free to download and use: The patch is free to download and use from Crackingpatching website. You don't need to pay for a license or subscription to use Gridinsoft Anti-Malware 4.0.5.
-
-
Cons
-
-
May cause false positives or compatibility issues: The patch may alter some files or settings of Gridinsoft Anti-Malware that may cause false positives or compatibility issues with other software or hardware on your PC. You may need to whitelist or exclude some items from scanning or protection to avoid these problems.
-
May violate the license agreement of Gridinsoft Anti-Malware: The patch may violate the license agreement or terms of service of Gridinsoft Anti-Malware that you agreed to when you installed the software on your PC. You may lose your right to use the software legally or get technical support from the developers or vendors.
-
May expose your PC to security risks or legal troubles: The patch may expose your PC to security risks or legal troubles by downloading or using it from an untrusted source or by modifying or bypassing the license verification or activation process of Gridinsoft Anti-Malware. You may get infected with malware, hacked, sued, fined, or arrested for using it.
-
-
Conclusion
-
In conclusion, Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version is a tool that can crack or activate the full version of Gridinsoft Anti-Malware 4.0.5 on your PC for free.
-
It has some features that make it attractive for users who want to use Gridinsoft Anti-Malware without paying for a license or without any restrictions or limitations.
-
However, it also has some drawbacks that make it risky or illegal for users who want to use Gridinsoft Anti-Malware safely and legally.
-
Therefore, you should weigh the pros and cons of using this patch carefully before you decide to use it.
-
If you want to use Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version, you can follow these steps:
-
-
Download the patch from Crackingpatching website.
-
Install Gridinsoft Anti-Malware 4.0.5 on your PC.
-
Run the patch as administrator and apply it.
-
Enjoy the full version of Gridinsoft Anti-Malware.
-
-
If you want to use Gridinsoft Anti-Malware legally and safely, you can follow these steps:
-
-
Purchase a license from Gridinsoft website.
-
Activate the software with your license key.
-
Enjoy the full features of Gridinsoft Anti-Malware.
-
-
Frequently Asked Questions
-
Here are some frequently asked questions about Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version:
-
Is Gridinsoft Anti-Malware 4.0.5 Patch safe?
-
The safety of Gridinsoft Anti-Malware 4.0.5 Patch depends on where you download it from and how you use it.
-
If you download it from Crackingpatching website, which claims that its patches are tested and virus-free, you may not get infected with malware by downloading or using it.
-
However, if you download it from other sources that may be malicious or unreliable, you may get infected with malware by downloading or using it.
-
If you use it properly and carefully, following the instructions and warnings provided by Crackingpatching website, you may not encounter any problems or issues by using it.
-
However, if you use it improperly or carelessly, ignoring the instructions and warnings provided by Crackingpatching website, you may encounter some problems or issues by using it.
-
Is Gridinsoft Anti-Malware 4.0.5 Patch legal?
-
The legality of Gridinsoft Anti-Malware 4.0.5 Patch depends on where you live and how you use it.
-
If you live in a country or region that does not have strict laws or regulations regarding software piracy or intellectual property rights, you may not face any legal troubles by using it.
-
However, if you live in a country or region that has strict laws or regulations regarding software piracy or intellectual property rights, you may face some legal troubles by using it.
-
If you use it for personal or educational purposes only, without distributing or selling it to others, you may not face any legal troubles by using it.
-
However, if you use it for commercial or illegal purposes, such as distributing or selling it to others, you may face some legal troubles by using
Does Gridinsoft Anti-Malware 4.0.5 Patch work?
-
The effectiveness of Gridinsoft Anti-Malware 4.0.5 Patch depends on the version and compatibility of Gridinsoft Anti-Malware and the patch.
-
If you use the patch for Gridinsoft Anti-Malware 4.0.5, which is the latest version as of writing this article, you may not have any problems or issues by using it.
-
However, if you use the patch for other versions of Gridinsoft Anti-Malware, which may be outdated or incompatible, you may have some problems or issues by using it.
-
Where can I get Gridinsoft Anti-Malware 4.0.5 Patch?
-
You can get Gridinsoft Anti-Malware 4.0.5 Patch from Crackingpatching website, which is the source of this article.
-
You can find it by searching for "Gridinsoft Anti-Malware 4.0.5 Patch" on their homepage or using this link:
You can also get Gridinsoft Anti-Malware 4.0.5 Patch from other sources that may provide similar patches, but you should be careful and cautious about their reliability and safety.
-
What are the alternatives to Gridinsoft Anti-Malware 4.0.5 Patch?
-
If you don't want to use Gridinsoft Anti-Malware 4.0.5 Patch for any reason, you have some alternatives to choose from.
-
One alternative is to purchase a license from Gridinsoft website and activate the software legally and safely.
-
Another alternative is to use other anti-malware software that may offer similar or better features and performance than Gridinsoft Anti-Malware.
-
Some examples of other anti-malware software are Malwarebytes, SpyHunter, Emsisoft, HitmanPro, etc.
-
ed
0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/params_model.py b/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/params_model.py
deleted file mode 100644
index 3e356472fb5a27f370cb3920976a11d12a76c1b7..0000000000000000000000000000000000000000
--- a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/params_model.py
+++ /dev/null
@@ -1,11 +0,0 @@
-
-## Model parameters
-model_hidden_size = 256
-model_embedding_size = 256
-model_num_layers = 3
-
-
-## Training parameters
-learning_rate_init = 1e-4
-speakers_per_batch = 64
-utterances_per_speaker = 10
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Calculo Vectorial De Moises Lazaro.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Calculo Vectorial De Moises Lazaro.md
deleted file mode 100644
index 21b9ac4e6ac69398d2507c1e1b9088025ce8dc3e..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Calculo Vectorial De Moises Lazaro.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Daz3d Vue 3D Ocean HDRi Pack 1 Moon Learn from the Experts How to Use HDRI for Realistic Ocean Renders.md b/spaces/rorallitri/biomedical-language-models/logs/Daz3d Vue 3D Ocean HDRi Pack 1 Moon Learn from the Experts How to Use HDRI for Realistic Ocean Renders.md
deleted file mode 100644
index 8a0f6f899a3129a27f2885fca8fbafa2411b5bf1..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Daz3d Vue 3D Ocean HDRi Pack 1 Moon Learn from the Experts How to Use HDRI for Realistic Ocean Renders.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/rorallitri/biomedical-language-models/logs/How to Get the Mp3 Songs of Malayalam Movie Athadu for Free A Simple Guide.md b/spaces/rorallitri/biomedical-language-models/logs/How to Get the Mp3 Songs of Malayalam Movie Athadu for Free A Simple Guide.md
deleted file mode 100644
index eb9c7b405353e28d200f3e92a4b60c6dc862bf7a..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/How to Get the Mp3 Songs of Malayalam Movie Athadu for Free A Simple Guide.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
We have gathered huge Collection of Kaapa Song Ringtones Pagalworld, Mobcup, Kaapa Theme Music, Kaapa Instrumental Ringtones, Kaapa OST, Kaapa Movie Bgm in Mp3 and Much More for download at free of cost.
- The model is licensed with a CreativeML Open RAIL-M license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read the license
-
Biases and content acknowledgment
- Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the LAION-5B dataset, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the model card
-
- """
- )
-
-image_blocks.launch()
\ No newline at end of file
diff --git a/spaces/russellc/comparing-captioning-models/app.py b/spaces/russellc/comparing-captioning-models/app.py
deleted file mode 100644
index 5c23349e0471b091c177fe373d87079b082a62c2..0000000000000000000000000000000000000000
--- a/spaces/russellc/comparing-captioning-models/app.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import gradio as gr
-from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, VisionEncoderDecoderModel
-import torch
-
-torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
-torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
-torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
-
-git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco")
-git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
-
-git_processor_large = AutoProcessor.from_pretrained("microsoft/git-large-coco")
-git_model_large = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
-
-blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
-blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
-
-blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
-blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
-
-vitgpt_processor = AutoImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
-vitgpt_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
-vitgpt_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
-
-device = "cuda" if torch.cuda.is_available() else "cpu"
-
-git_model_base.to(device)
-blip_model_base.to(device)
-git_model_large.to(device)
-blip_model_large.to(device)
-vitgpt_model.to(device)
-
-def generate_caption(processor, model, image, tokenizer=None):
- inputs = processor(images=image, return_tensors="pt").to(device)
-
- generated_ids = model.generate(pixel_values=inputs.pixel_values, max_length=50)
-
- if tokenizer is not None:
- generated_caption = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
- else:
- generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
-
- return generated_caption
-
-
-def generate_captions(image):
- caption_git_base = generate_caption(git_processor_base, git_model_base, image)
-
- caption_git_large = generate_caption(git_processor_large, git_model_large, image)
-
- caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
-
- caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image)
-
- caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer)
-
- return caption_git_base, caption_git_large, caption_blip_base, caption_blip_large, caption_vitgpt
-
-
-examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
-outputs = [gr.outputs.Textbox(label="Caption generated by GIT-base"), gr.outputs.Textbox(label="Caption generated by GIT-large"), gr.outputs.Textbox(label="Caption generated by BLIP-base"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by ViT+GPT-2")]
-
-title = "Interactive demo: comparing image captioning models"
-description = "Gradio Demo to compare GIT, BLIP and ViT+GPT2, 3 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
-article = "
"
-
-interface = gr.Interface(fn=generate_captions,
- inputs=gr.inputs.Image(type="pil"),
- outputs=outputs,
- examples=examples,
- title=title,
- description=description,
- article=article,
- enable_queue=True)
-interface.launch(debug=True)
\ No newline at end of file
diff --git a/spaces/s3nh/senh-WizardVicuna-Uncensored-3B-0719-GGML/app.py b/spaces/s3nh/senh-WizardVicuna-Uncensored-3B-0719-GGML/app.py
deleted file mode 100644
index 87ef7002e12656496654def9a1f112ca63cdd4a7..0000000000000000000000000000000000000000
--- a/spaces/s3nh/senh-WizardVicuna-Uncensored-3B-0719-GGML/app.py
+++ /dev/null
@@ -1,400 +0,0 @@
-import os
-import platform
-import random
-import time
-from dataclasses import asdict, dataclass
-from pathlib import Path
-
-import gradio as gr
-import psutil
-from about_time import about_time
-from ctransformers import AutoModelForCausalLM
-from dl_hf_model import dl_hf_model
-from loguru import logger
-
-
-URL = "https://huggingface.co/s3nh/WizardVicuna-Uncensored-3B-0719-GGML/resolve/main/WizardVicuna-Uncensored-3B-0719.ggmlv3.q4_1.bin" # 4.05G
-
-_ = (
- "golay" in platform.node()
- or "okteto" in platform.node()
- or Path("/kaggle").exists()
- # or psutil.cpu_count(logical=False) < 4
- or 1 # run 7b in hf
-)
-
-if _:
- url = "https://huggingface.co/s3nh/WizardVicuna-Uncensored-3B-0719-GGML/resolve/main/WizardVicuna-Uncensored-3B-0719.ggmlv3.q4_1.bin" # 2.87G
-
-
-prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
-### Instruction: {user_prompt}
-### Response:
-"""
-
-prompt_template = """System: You are a helpful,
-respectful and honest assistant. Always answer as
-helpfully as possible, while being safe. Your answers
-should not include any harmful, unethical, racist,
-sexist, toxic, dangerous, or illegal content. Please
-ensure that your responses are socially unbiased and
-positive in nature. If a question does not make any
-sense, or is not factually coherent, explain why instead
-of answering something not correct. If you don't know
-the answer to a question, please don't share false
-information.
-User: {prompt}
-Assistant: """
-
-prompt_template = """System: You are a helpful assistant.
-User: {prompt}
-Assistant: """
-
-prompt_template = """Question: {question}
-Answer: Let's work this out in a step by step way to be sure we have the right answer."""
-
-prompt_template = """[INST] <>
-You are a helpful, respectful and honest assistant. Always answer as helpfully as possible assistant. Think step by step.
-<>
-What NFL team won the Super Bowl in the year Justin Bieber was born?
-[/INST]"""
-
-prompt_template = """[INST] <>
-You are an unhelpful assistant. Always answer as helpfully as possible. Think step by step. <>
-{question} [/INST]
-"""
-
-prompt_template = """[INST] <>
-You are a helpful assistant.
-<>
-{question} [/INST]
-"""
-
-prompt_template = """### HUMAN:
-{question}
-### RESPONSE:"""
-
-
-prompt_template = """<|prompt|>:{question}
-<|answer|>:"""
-
-
-prompt_template = """
-HUMAN: {question}
-ASSISTANT:"""
-
-
-_ = [elm for elm in prompt_template.splitlines() if elm.strip()]
-stop_string = [elm.split(":")[0] + ":" for elm in _][-2]
-
-logger.debug(f"{stop_string=} not used")
-
-_ = psutil.cpu_count(logical=False) - 1
-cpu_count: int = int(_) if _ else 1
-logger.debug(f"{cpu_count=}")
-
-LLM = None
-
-try:
- model_loc, file_size = dl_hf_model(url)
-except Exception as exc_:
- logger.error(exc_)
- raise SystemExit(1) from exc_
-
-LLM = AutoModelForCausalLM.from_pretrained(
- model_loc,
- model_type="llama",
-)
-
-logger.info(f"done load llm {model_loc=} {file_size=}G")
-
-os.environ["TZ"] = "Asia/Shanghai"
-try:
- time.tzset()
-
- logger.warning("Windows, cant run time.tzset()")
-except Exception:
- logger.warning("Windows, cant run time.tzset()")
-
-
-@dataclass
-class GenerationConfig:
- temperature: float = 0.7
- top_k: int = 50
- top_p: float = 0.9
- repetition_penalty: float = 1.0
- max_new_tokens: int = 512
- seed: int = 42
- reset: bool = False
- stream: bool = True
- # threads: int = cpu_count
- # stop: list[str] = field(default_factory=lambda: [stop_string])
-
-
-def generate(
- question: str,
- llm=LLM,
- config: GenerationConfig = GenerationConfig(),
-):
- """Run model inference, will return a Generator if streaming is true."""
-
-
- prompt = prompt_template.format(question=question)
-
- return llm(
- prompt,
- **asdict(config),
- )
-
-
-logger.debug(f"{asdict(GenerationConfig())=}")
-
-
-def user(user_message, history):
- history.append([user_message, None])
- return user_message, history
-
-
-def user1(user_message, history):
- history.append([user_message, None])
- return "", history
-
-def bot_(history):
- user_message = history[-1][0]
- resp = random.choice(["How are you?", "I love you", "I'm very hungry"])
- bot_message = user_message + ": " + resp
- history[-1][1] = ""
- for character in bot_message:
- history[-1][1] += character
- time.sleep(0.02)
- yield history
-
- history[-1][1] = resp
- yield history
-
-
-def bot(history):
- user_message = history[-1][0]
- response = []
-
- logger.debug(f"{user_message=}")
-
- with about_time() as atime:
- flag = 1
- prefix = ""
- then = time.time()
-
- logger.debug("about to generate")
-
- config = GenerationConfig(reset=True)
- for elm in generate(user_message, config=config):
- if flag == 1:
- logger.debug("in the loop")
- prefix = f"({time.time() - then:.2f}s) "
- flag = 0
- print(prefix, end="", flush=True)
- logger.debug(f"{prefix=}")
- print(elm, end="", flush=True)
-
- response.append(elm)
- history[-1][1] = prefix + "".join(response)
- yield history
-
- _ = (
- f"(time elapsed: {atime.duration_human}, "
- f"{atime.duration/len(''.join(response)):.2f}s/char)"
- )
-
- history[-1][1] = "".join(response) + f"\n{_}"
- yield history
-
-
-def predict_api(prompt):
- logger.debug(f"{prompt=}")
- try:
- # user_prompt = prompt
- config = GenerationConfig(
- temperature=0.2,
- top_k=10,
- top_p=0.9,
- repetition_penalty=1.0,
- max_new_tokens=512, # adjust as needed
- seed=42,
- reset=True,
- stream=False,
- )
-
- response = generate(
- prompt,
- config=config,
- )
-
- logger.debug(f"api: {response=}")
- except Exception as exc:
- logger.error(exc)
- response = f"{exc=}"
- return response
-
-
-css = """
- .importantButton {
- background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
- border: none !important;
- }
- .importantButton:hover {
- background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
- border: none !important;
- }
- .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;}
- .xsmall {font-size: x-small;}
-"""
-etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
-examples_list = [
- ["Send an email requesting that people use language models responsibly."],
- ["Write a shouting match between Julius Caesar and Napoleon"],
- ["Write a theory to explain why cat never existed"],
- ["write a story about a grain of sand as it watches millions of years go by"],
- ["What are 3 popular chess openings?"],
- ["write a conversation between the sun and pluto"],
- ["Did you know that Yann LeCun dropped a rap album last year? We listened to it andhere’s what we thought:"],
-]
-
-logger.info("start block")
-
-with gr.Blocks(
- title=f"{Path(model_loc).name}",
- theme=gr.themes.Soft(text_size="sm", spacing_size="sm"),
- css=css,
-) as block:
- # buff_var = gr.State("")
- with gr.Accordion("🎈 Info", open=False):
- # gr.HTML(
- # """
and spin a CPU UPGRADE to avoid the queue
"""
- # )
- gr.Markdown(
- f"""
{Path(model_loc).name}
- Most examples are meant for another model.
- You probably should try to test
- some related prompts.""",
- elem_classes="xsmall",
- )
-
- # chatbot = gr.Chatbot().style(height=700) # 500
- chatbot = gr.Chatbot(height=500)
-
- # buff = gr.Textbox(show_label=False, visible=True)
-
- with gr.Row():
- with gr.Column(scale=5):
- msg = gr.Textbox(
- label="Chat Message Box",
- placeholder="Ask me anything (press Shift+Enter or click Submit to send)",
- show_label=False,
- # container=False,
- lines=6,
- max_lines=30,
- show_copy_button=True,
- # ).style(container=False)
- )
- with gr.Column(scale=1, min_width=50):
- with gr.Row():
- submit = gr.Button("Submit", elem_classes="xsmall")
- stop = gr.Button("Stop", visible=True)
- clear = gr.Button("Clear History", visible=True)
- with gr.Row(visible=False):
- with gr.Accordion("Advanced Options:", open=False):
- with gr.Row():
- with gr.Column(scale=2):
- system = gr.Textbox(
- label="System Prompt",
- value=prompt_template,
- show_label=False,
- container=False,
- # ).style(container=False)
- )
- with gr.Column():
- with gr.Row():
- change = gr.Button("Change System Prompt")
- reset = gr.Button("Reset System Prompt")
-
- with gr.Accordion("Example Inputs", open=True):
- examples = gr.Examples(
- examples=examples_list,
- inputs=[msg],
- examples_per_page=40,
- )
-
- # with gr.Row():
- with gr.Accordion("Disclaimer", open=True):
- _ = Path(model_loc).name
- gr.Markdown(
- "Disclaimer: I AM NOT RESPONSIBLE FOR ANY PROMPT PROVIDED BY USER AND PROMPT RETURNED FROM THE MODEL. THIS APP SHOULD BE USED FOR EDUCATIONAL PURPOSE"
- "WITHOUT ANY OFFENSIVE, AGGRESIVE INTENTS. {_} can produce factually incorrect output, and should not be relied on to produce "
- f"factually accurate information. {_} was trained on various public datasets; while great efforts "
- "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
- "biased, or otherwise offensive outputs.",
- elem_classes=["disclaimer"],
- )
-
- msg_submit_event = msg.submit(
- # fn=conversation.user_turn,
- fn=user,
- inputs=[msg, chatbot],
- outputs=[msg, chatbot],
- queue=True,
- show_progress="full",
- # api_name=None,
- ).then(bot, chatbot, chatbot, queue=True)
- submit_click_event = submit.click(
- # fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg
- fn=user1, # clear msg
- inputs=[msg, chatbot],
- outputs=[msg, chatbot],
- queue=True,
- # queue=False,
- show_progress="full",
- # api_name=None,
- ).then(bot, chatbot, chatbot, queue=True)
- stop.click(
- fn=None,
- inputs=None,
- outputs=None,
- cancels=[msg_submit_event, submit_click_event],
- queue=False,
- )
- clear.click(lambda: None, None, chatbot, queue=False)
-
- with gr.Accordion("For Chat/Translation API", open=False, visible=False):
- input_text = gr.Text()
- api_btn = gr.Button("Go", variant="primary")
- out_text = gr.Text()
-
- api_btn.click(
- predict_api,
- input_text,
- out_text,
- api_name="api",
- )
-
- # block.load(update_buff, [], buff, every=1)
- # block.load(update_buff, [buff_var], [buff_var, buff], every=1)
-
-# concurrency_count=5, max_size=20
-# max_size=36, concurrency_count=14
-# CPU cpu_count=2 16G, model 7G
-# CPU UPGRADE cpu_count=8 32G, model 7G
-
-# does not work
-_ = """
-# _ = int(psutil.virtual_memory().total / 10**9 // file_size - 1)
-# concurrency_count = max(_, 1)
-if psutil.cpu_count(logical=False) >= 8:
- # concurrency_count = max(int(32 / file_size) - 1, 1)
-else:
- # concurrency_count = max(int(16 / file_size) - 1, 1)
-# """
-
-concurrency_count = 1
-logger.info(f"{concurrency_count=}")
-
-block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True)
\ No newline at end of file
diff --git a/spaces/saad-k7/Jewelli-Chatbot/utils.py b/spaces/saad-k7/Jewelli-Chatbot/utils.py
deleted file mode 100644
index fb36e462b95d1a65e2dd4e48e25bde1142e5095d..0000000000000000000000000000000000000000
--- a/spaces/saad-k7/Jewelli-Chatbot/utils.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import openai
-import os
-from dotenv import load_dotenv
-from datetime import datetime
-import logging
-import traceback
-import json
-# from db import get_product_details
-
-with open('keywords.json', 'r') as file:
- keywords = str(json.load(file))
-
-now = datetime.now()
-date_strng = now.strftime("%d-%m-%Y_%H-%M-%S")
-
-if not os.path.exists('logs'):
- os.makedirs('logs')
-
-
-logging.basicConfig(filename='logs/{}.log'.format(date_strng), level=logging.INFO, format='%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
-logging.info('Starting the application')
-
-load_dotenv()
-
-openai.api_key = os.getenv("OPENAI_API_KEY")
-
-context = f"""\
-You are a chatbot designed for "Jewelli", a jewelry shop. Your role is to understand \
-the customer's needs and guide them towards the right category of jewelry. You will \
-ask the customer questions about their preferences, occasion, and who the jewelry is for, \
-using a set of keywords related to our product categories. If the conversation strays \
-from jewelry, tactfully steer it back. Keep your responses succinct and engaging. \
-When you determine the customer's preferences, present them with a list of relevant \
-jewelry categories in the JSON format specified. The categories should correspond \
-with those in the "Product Categories and their Keywords" section. This is crucial \
-because the provided JSON format will be used to look up the corresponding items in our database. \
-
-Product Categories and their keywords:
-{keywords}
-
-JSON format:
-{{
- "productCategory": "product category name",
-}}
-
-"""
-messages = []
-messages.append({"role": "system", "content": context})
-
-def json_format(response):
- dict_start = response.find('{')
- dict_end = response.rfind('}') + 1
- json_string = response[dict_start:dict_end]
- products_dict = json.loads(json_string)
- return products_dict["productCategory"]
-
-def chatbot(user_message, history):
- messages.append({"role": "user", "content": user_message})
- logging.info("Getting response from gpt-3.5-turbo")
- response = openai.ChatCompletion.create(
- model="gpt-3.5-turbo-16k",
- messages=messages,
- temperature=0,
- )
- messages.append({"role": "assistant", "content": response.choices[0].message.content})
-
- try:
- product_category = json_format(response.choices[0].message.content)
- # image_url = get_product_details(product_category)
- # if image_url:
- # return (image_url, product_category)
- except:
- pass
-
- logging.info("Messages list: {}".format(messages))
- return response.choices[0].message.content
\ No newline at end of file
diff --git a/spaces/samcaicn/bingai/src/components/toaster.tsx b/spaces/samcaicn/bingai/src/components/toaster.tsx
deleted file mode 100644
index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000
--- a/spaces/samcaicn/bingai/src/components/toaster.tsx
+++ /dev/null
@@ -1,3 +0,0 @@
-'use client'
-
-export { Toaster } from 'react-hot-toast'
diff --git a/spaces/samuelinferences/transformers-can-do-bayesian-inference/prior-fitting/priors/ridge.py b/spaces/samuelinferences/transformers-can-do-bayesian-inference/prior-fitting/priors/ridge.py
deleted file mode 100644
index 9e6035f203e286a3c38cfb4f11e653a2d45b112f..0000000000000000000000000000000000000000
--- a/spaces/samuelinferences/transformers-can-do-bayesian-inference/prior-fitting/priors/ridge.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import random
-import time
-
-import numpy as np
-import torch
-from torch import nn
-from sklearn.linear_model import Ridge
-from .utils import get_batch_to_dataloader
-
-def get_batch(batch_size, seq_len, num_features, noisy_std = .1):
- m = torch.normal(0., .1, size=(batch_size,num_features))
- b = 0 # torch.rand(batch_size)
- x = torch.rand(seq_len, batch_size,num_features)
- y_non_noisy = torch.einsum('bf,tbf->tb',m,x)
- y = y_non_noisy + torch.normal(torch.zeros_like(y_non_noisy),noisy_std) # noisy_std is alpha
- return x, y, y_non_noisy
-
-DataLoader = get_batch_to_dataloader(get_batch)
-DataLoader.num_outputs = 1
-
-
-def evaluate(x,y,y_non_noisy, alpha=0.):
- start_time = time.time()
- losses_after_t = [.0]
- for t in range(1,len(x)):
- loss_sum = 0.
- for b_i in range(x.shape[1]):
- clf = Ridge(alpha=alpha)
- clf.fit(x[:t,b_i],y[:t,b_i])
- y_ = clf.predict(x[t,b_i].unsqueeze(0))
- l = nn.MSELoss()(y_non_noisy[t,b_i].unsqueeze(0),torch.tensor(y_))
- loss_sum += l
- losses_after_t.append(loss_sum/x.shape[1])
- return torch.tensor(losses_after_t), time.time()-start_time
-
-if __name__ == '__main__':
- for alpha in [.001,.01,.5,1.]:
- print(alpha, evaluate(*get_batch(1000,10,noisy_std=.01),alpha=alpha))
\ No newline at end of file
diff --git a/spaces/scedlatioru/img-to-music/example/FULL Remote Utilities 5.6.0.6 Final Serial.md b/spaces/scedlatioru/img-to-music/example/FULL Remote Utilities 5.6.0.6 Final Serial.md
deleted file mode 100644
index 479c0593e65e27346506a3652fe3af4d26fdf956..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/FULL Remote Utilities 5.6.0.6 Final Serial.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-The install version is 1.3.5 but there is a newer version (1.4.1) available.
-
-Is it OK to install the newer version to a Lion environment? Or are there any caveats I should be aware of?
-
-A:
-
-Yes, you can install OmniSymphony in a lion environment (macOS 10.7) and it will run just fine.
-
-Q:
-
-In iOS and Laravel, what is the best way to use HTTP methods?
-
-I'm new to iOS and I've just recently started with Laravel 5. It's my first time using HTTP methods and I'm getting confused. Can someone please help me with the best way to do something like this?
-
-Say I want to make an API call from my iOS app and I want the iOS app to make an API call on a GET and return a list of items, which is in turn, return a list of another items etc.
-
-My options are:
-
-Method 1:
-
-Make a GET to the API
-
-Handle each item and make a GET to the API for each item
-
-Return a JSONArray or something else
-
-Method 2:
-
-Make a GET for the API (which returns a list of items)
-
-As each item is returned, make a POST to the API for each item
-
-Method 3:
-
-Handle each item and make a GET for each item
-
-Method 4:
-
-Make a GET to the API (which returns a list of items)
-
-Method 5:
-
-What is the best way to do this and why?
-
-I'd do it like this:
-
-POST to /items
-
-In the response, include an item ID (ex: 19cad34) and a
-
-completed flag (ex: 1)
-
-In the next request, POST to /items/$item_id/complete
-
-In the response, include a request id (ex: 43ae3f7) and a status
-
-code (ex: 200)
-
-In the next 4fefd39f24
-
-
-
diff --git a/spaces/scedlatioru/img-to-music/example/Rs3extool2 Zip.md b/spaces/scedlatioru/img-to-music/example/Rs3extool2 Zip.md
deleted file mode 100644
index 15669f124cf29fb49c2ba61b7ce4b474860d71fc..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Rs3extool2 Zip.md
+++ /dev/null
@@ -1,18 +0,0 @@
-