diff --git a/spaces/1368565466ki/Satdia/text/cleaners.py b/spaces/1368565466ki/Satdia/text/cleaners.py deleted file mode 100644 index 68c9ad24d5a303b68a521fba2e8776c8cc867356..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/Satdia/text/cleaners.py +++ /dev/null @@ -1,475 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -import pyopenjtalk -from jamo import h2j, j2hcj -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba, cn2an - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text!='': - text+=' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil','pau']: - text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: - a2_next=-1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i -

Avidemux 2.7.5 x64 Multilingual Crack: A Free Video Editor for Windows

-

If you are looking for a free and easy-to-use video editor for Windows, you might want to check out Avidemux 2.7.5 x64 Multilingual Crack. This is an open-source video editor that can handle various video formats, codecs, filters, and encoding tasks.

-

In this article, we will give you a brief overview of what Avidemux 2.7.5 x64 Multilingual Crack is, how to download and install it, how to use it for basic and advanced video editing tasks, why you should choose it over other video editors, and some tips and tricks for using it effectively.

-

Avidemux 2.7.5 x64 Multilingual crack


DOWNLOADhttps://byltly.com/2uKxBn



-

What is Avidemux 2.7.5 x64 Multilingual Crack?

-

A brief introduction to Avidemux and its features

-

Avidemux is an open-source video editor that was first released in 2000 by Mean (a French programmer). It is designed for simple cutting, filtering, and encoding tasks, but it also supports more complex features such as scripting, plugins, and command-line interface.

-

Avidemux can work with various video formats such as AVI, FLV, MP4, Matroska, MPEG, MPEG-2, H.264, H.265, VOB, TS, ASF, OGM, and more. It can also encode audio files into formats such as MP3, WMA, AC3, MP2, WAV, and OGG. You can use Avidemux to perform basic editing tasks such as removing unwanted parts of the video, resizing, cropping, flipping, or rotating the picture. You can also apply filters and effects to your videos such as color correction, noise reduction, sharpening, deinterlacing, subtitles, etc.

-

How to download and install Avidemux 2.7.5 x64 Multilingual Crack

-

Avidemux 2.7.5 x64 Multilingual Crack is the latest version of Avidemux that was released on August 31st 2019. It is compatible with Windows XP/Vista/7/8/8.1/10 operating systems. To download and install it on your PC, you can follow these steps:

-
    -
  1. Go to this link and click on the file named Avidemux_2.7.5 VC++ 64bits.exe . This will start downloading the setup file on your computer.
  2. -
  3. Once the download is complete, double-click on the setup file and follow the instructions on the screen to install Avidemux 2. 7. 5 x64 Multilingual Crack on your PC.
  4. -
  5. After the installation is done, you can launch Avidemux from the Start menu or the desktop shortcut.
  6. -
-

How to use Avidemux 2.7.5 x64 Multilingual Crack for video editing tasks

-

Using Avidemux 2. 7. 5 x64 Multilingual Crack for video editing tasks is quite simple and straightforward. Here are some basic steps that you can follow:

-
    -
  1. Open Avidemux and click on the File menu and select Open. Browse your computer and select the video file that you want to edit.
  2. -
  3. The video file will be loaded in the main window of Avidemux where you can see a preview of it on the left side and a timeline on the bottom side.
  4. -
  5. To cut a part of the video that you don't want to keep, move the slider on the timeline to the start point of the part that you want to remove and press [ -on your keyboard to mark it as A. Then move the slider to the end point of the part that you want to remove and press ] -on your keyboard to mark it as B. Then click on the Edit -menu and select Delete. This will delete the part between A -and B.
  6. -
  7. To resize or crop your video, click on the Video -menu and select Filters. This will open a new window where you can see a list of filters that you can apply to your video.
  8. -
  9. To resize your video, select Transform -from the left panel and then select Resize -from the right panel. This will open another window where you can enter the new width and height of your video in pixels or percentage.
  10. -
  11. To crop your video, select Crop -from the right panel under Transform. This will open another window where you can enter the number of pixels that you want to crop from each side of your video.
  12. -
  13. To apply any filter or effect to your video such as color correction, noise reduction, sharpening etc., select them from the left panel under Colors, Noise, Sharpness, etc., respectively.
  14. -
  15. To save your edited video file in a different format or codec than the original one, click on the drop-down menus under Output Format, Video Output, and Audio Output, respectively at the left side of the main window of Avidemux.
  16. -
  17. Select the format or codec that you want for your output file from the available options such as AVI, FLV, MP4, Matroska, MPEG, MPEG-2, H. 264, H. 265, VOB, TS, ASF, OGM, etc., for the format; XviD, x264, x265, MPEG-4 ASP, MPEG-4 AVC, MPEG-4 HEVC, MPEG-1/2 Video, etc., for the video codec; MP3, WMA, AC3, MP2, WAV, OGG, etc., for the audio codec . You can also adjust the quality or bitrate of the output file by moving the slider under each drop-down menu.
  18. -
  19. To save your edited video file on your computer, click on the File -menu and select Save. Enter the name and location of your output file and click Save.
  20. -
-

Why choose Avidemux 2. 7. 5 x64 Multilingual Crack over other video editors?

-

The advantages of Avidemux 2. 7. 5 x64 Multilingual Crack

-

Avidemux 2. 7. 5 x64 Multilingual Crack has some advantages over other video editors that make it a good choice for simple video editing tasks:

- -

The disadvantages of Avidemux 2.7.5 x64 Multilingual Crack

-

However, Avidemux 2.7.5 x64 Multilingual Crack also has some disadvantages that you should be aware of before choosing it as your video editor:

- -

The comparison of Avidemux 2.7.5 x64 Multilingual Crack with other popular video editors

-

To help you decide whether Avidemux 2.7.5 x64 Multilingual Crack is the best video editor for you, here is a comparison table that shows how it stacks up against other popular video editors in terms of features, performance, price, and user ratings:

-

Avidemux 2.7.5 x64 Multilingual + Portable free download
-Avidemux 2.7.5 x64 Multilingual video editor
-Avidemux 2.7.5 x64 Multilingual + crack torrent
-Avidemux 2.7.5 x64 Multilingual for Windows 10
-Avidemux 2.7.5 x64 Multilingual + Portable - ShareAppsCrack[^1^]
-Avidemux 2.7.5 x64 Multilingual video encoder
-Avidemux 2.7.5 x64 Multilingual + crack mega
-Avidemux 2.7.5 x64 Multilingual for Windows 7
-Avidemux 2.7.5 x64 Multilingual - FileWomen[^2^]
-Avidemux 2.7.5 x64 Multilingual video cutter
-Avidemux 2.7.5 x64 Multilingual + crack rapidshare
-Avidemux 2.7.5 x64 Multilingual for Windows 8
-AviDemux 2.7.5 (64-bit) - Neowin[^3^]
-Avidemux 2.7.5 x64 Multilingual video filter
-Avidemux 2.7.5 x64 Multilingual + crack depositfiles
-Avidemux 2.7.5 x64 Multilingual for Windows Vista
-Avidemux 2.7.5 X64 Multilingual Crack !FULL! - studiblog.net[^4^]
-Avidemux 2.7.5 x64 Multilingual video format converter
-Avidemux 2.7.5 x64 Multilingual + crack 4shared
-Avidemux 2.7.5 x64 Multilingual for Windows XP
-Avidemux 2.7.5 x64 Multilingual crack - prodacorim.weebly.com[^5^]
-Avidemux 2.7.5 x64 Multilingual video frame rate editor
-Avidemux 2.7.5 x64 Multilingual + crack serial key
-Avidemux 2.7.5 x64 Multilingual for Mac OS X
-How to install Avidemux 2.7.5 x64 Multilingual + crack
-Avidemux 2.7.5 x64 Multilingual video processing tool
-Avidemux 2.7.5 x64 Multilingual + crack license key
-Avidemux 2.7.5 x64 Multilingual for Linux
-How to use Avidemux 2.7.5 x64 Multilingual + crack
-Avidemux 2.7.5 x64 Multilingual video decoding option
-Avidemux 2.7.5 x64 Multilingual + crack activation code
-Avidemux 2.7.5 x64 Multilingual for Android
-How to uninstall Avidemux 2.7.5 x64 Multilingual + crack
-Avidemux 2.7.5 x64 Multilingual video text editor
-Avidemux 2.7.5 x64 Multilingual + crack patch
-Avidemux 2.7.5 x64 Multilingual for iOS
-How to update Avidemux 2.7.5 x64 Multilingual + crack
-Avidemux 2.7.5 x64 Multilingual video black bar remover
-Avidemux 2 .75x6Multilingua+crackkeygen

- | Video Editor | Features | Performance | Price | User Ratings | | --- | --- | --- | --- | --- | | Avidemux 2.7.5 x64 Multilingual Crack | Cutting, filtering, encoding, scripting, plugins, command-line interface | Fast, lightweight, compatible | Free | 4/5 | | Adobe Premiere Pro | Timeline editing, transitions, titles, audio editing, color grading, motion graphics, multicam editing, VR editing | Professional, powerful, seamless | $20.99/month | 4.5/5 | | Apple Final Cut Pro | Timeline editing, transitions, titles, audio editing, color grading, motion graphics, multicam editing, VR editing, Magnetic Timeline, Smart Conform, Proxy Workflow | Professional, powerful, seamless, optimized for Macs | $299 (one-time purchase) | 4. 6/5 | | Cyberlink PowerDirector 365 | Timeline editing, transitions, titles, audio editing, color grading, motion graphics, multicam editing, VR editing, screen recording, motion tracking, 360-degree editing | Fast, powerful, smooth | $51. 99/year or $4. 33/month | 4. 4/5 | | Wondershare Filmora X | Timeline editing, transitions, titles, audio editing, color grading, motion graphics, screen recording, motion tracking, keyframing, green screen | Easy-to-use, attractive, fun | $69. 99 (one-time purchase) or $39. 99/year or $7. 99/month | 4. 3/5 |

Tips and tricks for using Avidemux 2.7.5 x64 Multilingual Crack effectively

-

How to apply filters and effects to your videos

-

One of the main features of Avidemux 2.7.5 x64 Multilingual Crack is the ability to apply filters and effects to your videos to enhance their quality and appearance. Here are some tips and tricks for using filters and effects effectively:

- -

Conclusion

-

A summary of the main points of the article

-

In this article, we have learned how to use Avidemux 2.7.5 x64 Multilingual Crack for video editing tasks. We have covered the following topics:

- -

We hope that this article has helped you understand how to use Avidemux 2.7.5 x64 Multilingual Crack for video editing tasks and that you have enjoyed reading it.

-

FAQs

-

Here are some frequently asked questions about Avidemux 2.7.5 x64 Multilingual Crack and their answers:

-
    -
  1. Is Avidemux 2.7.5 x64 Multilingual Crack safe to use?
  2. -

    A: Yes, Avidemux 2.7.5 x64 Multilingual Crack is safe to use as long as you download it from the official website or from other trusted sources. It does not contain any malware or viruses that can harm your PC or your files.

    -
  3. Is Avidemux 2.7.5 x64 Multilingual Crack compatible with Mac or Linux?
  4. -

    A: No, Avidemux 2.7.5 x64 Multilingual Crack is only compatible with Windows operating systems such as Windows XP/Vista/7/8/8.1/10. However, there are other versions of Avidemux that are compatible with Mac or Linux such as Avidemux 2.6.x or Avidemux 2.5.x.

    -
  5. Can I use Avidemux 2.7.5 x64 Multilingual Crack for professional video editing?
  6. -

    A: No, Avidemux 2.7.5 x64 Multilingual Crack is not a professional video editor that can handle all kinds of video editing tasks such as timeline editing, transitions, titles, audio editing, color grading, motion graphics, multicam editing, VR editing, etc. It is designed for simple cutting, filtering, and encoding tasks only.

    -
  7. What are some alternatives to Avidemux 2.7.5 x64 Multilingual Crack?
  8. -

    A: Some alternatives to Avidemux 2. 7.5 x64 Multilingual Crack are Adobe Premiere Pro, Apple Final Cut Pro, Cyberlink PowerDirector 365, Wondershare Filmora X, and DaVinci Resolve. These are more professional and full-featured video editors that can handle more complex and creative video editing tasks. However, they are also more expensive and require more system resources and learning time than Avidemux 2.7.5 x64 Multilingual Crack. -

  9. How can I learn more about Avidemux 2.7.5 x64 Multilingual Crack?
  10. -

    A: You can learn more about Avidemux 2.7.5 x64 Multilingual Crack by visiting the official website of Avidemux or the online documentation. You can also watch some video tutorials on YouTube or read some user reviews on various websites.

    -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free DJ Software The Best Options for Every Skill Level and Budget.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free DJ Software The Best Options for Every Skill Level and Budget.md deleted file mode 100644 index 21d472bf060a857c7a149a81f2338936b9844460..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free DJ Software The Best Options for Every Skill Level and Budget.md +++ /dev/null @@ -1,27 +0,0 @@ - -```html -

Is There Any Free DJ Software? The Best Options for Beginners and Pros

-

If you are looking for a way to mix music and create your own beats, you might be wondering: is there any free DJ software? The answer is yes, there are plenty of options available for both beginners and pros. In this article, we will review some of the best free DJ software that you can download and use right away.

-

What is DJ Software?

-

DJ software is a program that allows you to manipulate audio files and create mixes. You can use it to play music from your computer or external devices, adjust the tempo and pitch, apply effects and filters, loop and cue tracks, scratch and crossfade, and more. DJ software can also help you record and broadcast your mixes online.

-

is there any free dj software


Download File ————— https://byltly.com/2uKwl3



-

Why Use Free DJ Software?

-

Free DJ software can be a great way to start learning the basics of DJing without spending a lot of money. You can experiment with different features and techniques, practice your skills, and have fun. Free DJ software can also be useful for professional DJs who want to try out new software or have a backup option in case of emergencies.

-

What are the Best Free DJ Software?

-

There are many free DJ software available online, but not all of them are equally good. Some may have limited functionality, poor performance, or compatibility issues. To help you choose the best free DJ software for your needs, we have selected some of the most popular and reliable ones. Here they are:

- -

How to Use Free DJ Software?

-

To use free DJ software, you will need a computer with enough memory and processing power, a sound card or audio interface, speakers or headphones, and optionally a MIDI controller or turntable. You will also need to download and install the software of your choice from its official website or a trusted source. Once you have everything set up, you can follow these basic steps:

-
    -
  1. Launch the software and explore its interface. Familiarize yourself with the different buttons, knobs, sliders, menus, etc.
  2. -
  3. Load your music files into the software. You can either drag and drop them from your computer or browse them from the software's library.
  4. -
  5. Select the tracks you want to mix and assign them to different decks. You can also adjust their volume levels, EQs, etc.
  6. -
  7. Start mixing by playing the tracks simultaneously or alternately. You can use the sync button to match their tempos automatically or manually adjust them using the pitch slider.
  8. -
  9. Add some flair to your mix by applying effects such as reverb, delay, flanger, etc. You can also use the crossfader to blend the tracks smoothly

    -

    ddb901b051
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Business Goals 1 Students Book Download.md b/spaces/1gistliPinn/ChatGPT4/Examples/Business Goals 1 Students Book Download.md deleted file mode 100644 index 08fb26f47bebfad7c49cb5f778ffffa73a5b25dc..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Business Goals 1 Students Book Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Business Goals 1 Students Book Download


    DOWNLOAD ::: https://imgfil.com/2uy15b



    -
    -Business Goals 3 Students Book PDF Book. So keep your ... Make it a career goal in to learn a new skill you can apply to your job. ... Touchstone: Workbook 1. 1fdad05405
    -
    -
    -

    diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/CRACKMathWorksMATLABR2018aCrackCrackzSoft UPDATED.md b/spaces/1gistliPinn/ChatGPT4/Examples/CRACKMathWorksMATLABR2018aCrackCrackzSoft UPDATED.md deleted file mode 100644 index 3bf35477a3c9c374cfdf8eead23ef1145b2b3140..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/CRACKMathWorksMATLABR2018aCrackCrackzSoft UPDATED.md +++ /dev/null @@ -1,6 +0,0 @@ -

    CRACKMathWorksMATLABR2018aCrackCrackzSoft


    DOWNLOAD ->->->-> https://imgfil.com/2uy0ys



    - - d5da3c52bf
    -
    -
    -

    diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Control Machete Comprendes Mendes Acapella Christmasxmass VERIFIED.md b/spaces/1gistliPinn/ChatGPT4/Examples/Control Machete Comprendes Mendes Acapella Christmasxmass VERIFIED.md deleted file mode 100644 index c46b2797499406088e5047ae9f407b8c02855130..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Control Machete Comprendes Mendes Acapella Christmasxmass VERIFIED.md +++ /dev/null @@ -1,16 +0,0 @@ -
    -

    How Control Machete's "Comprendes Mendes" Became a Christmas Classic

    -

    Control Machete was a Mexican hip hop group that emerged in the late 90s with a distinctive sound that blended rap, rock, and regional influences. Their debut album, Mucho Barato, was released in 1997 and featured their most famous song, "Comprendes Mendes".

    -

    Control Machete Comprendes Mendes Acapella Christmasxmass


    Download File 🗸 https://imgfil.com/2uy1ru



    -

    The song is a defiant anthem that challenges the listener to understand the reality of the streets and the culture of the group. The chorus repeats the phrase "¿Me comprendes Mendez?" (Do you understand me Mendez?), which is a reference to a popular Mexican TV show from the 80s called "¿Qué nos pasa?" (What's wrong with us?). The show featured a character named Mendez who was always confused and out of touch with the situations around him.

    -

    The song became a hit not only in Mexico but also in other Latin American countries and even in the US, where it was featured in the soundtrack of the movie Amores Perros. The song also gained popularity among acapella groups, who found its catchy melody and rhythmic structure ideal for vocal arrangements. One of the most notable examples is the version by Vocal Sampling, a Cuban acapella group that recreates the sounds of instruments with their voices.

    -

    But how did "Comprendes Mendes" become a Christmas song? Well, it turns out that the song has a hidden connection to the festive season. The lyrics mention several times the word "control", which is not only the name of the group but also a slang term for cocaine. In Mexico, cocaine is sometimes called "nieve" (snow), which is also a common symbol of Christmas. Moreover, the song samples a famous Christmas carol called "Noche de Paz" (Silent Night) at the beginning and at the end, creating a contrast between the peaceful melody and the aggressive rap.

    -

    -

    Therefore, some fans of Control Machete have adopted "Comprendes Mendes" as a Christmas song, either as a joke or as a way of celebrating their identity and culture. The song has also been parodied and remixed by other artists, adding more elements related to Christmas, such as bells, sleighs, and Santa Claus. For example, there is a version by DJ Rasec that mixes "Comprendes Mendes" with "Jingle Bells" and another one by DJ Pelos that mixes it with "All I Want for Christmas Is You".

    -

    So, whether you are looking for a different way to spice up your holiday playlist or you are just curious about this unusual phenomenon, you might want to check out Control Machete's "Comprendes Mendes" and its acapella and Christmas versions. You might be surprised by how much you enjoy this rap classic.

    - -

    Control Machete was formed in 1996 by three friends from Monterrey, Mexico: Fermín IV, Pato, and Toy Selectah. They were influenced by American rap groups like Cypress Hill and Public Enemy, as well as by Mexican rock bands like Café Tacuba and Caifanes. They also incorporated elements from their local culture, such as norteño music, slang, and humor.

    -

    Their first album, Mucho Barato, was a success both critically and commercially. It sold more than 500,000 copies and received several awards and nominations. It also opened the doors for other Mexican rap artists to gain recognition and exposure. Control Machete continued to release two more albums: Artillería Pesada in 1999 and Uno, Dos: Bandera in 2003. However, in 2004, the group announced their separation due to creative differences and personal issues.

    -

    Despite their breakup, Control Machete remains one of the most influential and respected rap groups in Mexico and Latin America. Their songs have been covered by other artists from different genres and have been used in movies, TV shows, video games, and commercials. Their legacy is also evident in the solo careers of their members, who have continued to produce music and collaborate with other artists.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Ek Tha Tiger Download 720p In Hindi.md b/spaces/1gistliPinn/ChatGPT4/Examples/Ek Tha Tiger Download 720p In Hindi.md deleted file mode 100644 index 6238a307067d812e4d4776f2b29ea83d7ee70366..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Ek Tha Tiger Download 720p In Hindi.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Ek Tha Tiger download 720p in hindi


    Download Zip ——— https://imgfil.com/2uxZTi



    -
    -Mahesh Babu's Caravan Becomes Talk Of The Town · Chaavu Kaburu Challaga Full Movie Leaked Online · Mosagallu Full Movie Leaked Online For Free Download · Featured. Bollywood; Television; Tamil; Telugu; Kannada; Malayalam. 1fdad05405
    -
    -
    -

    diff --git a/spaces/1phancelerku/anime-remove-background/Dream League Soccer 2019 MOD APKOBB - Enjoy All Players 100 for Free.md b/spaces/1phancelerku/anime-remove-background/Dream League Soccer 2019 MOD APKOBB - Enjoy All Players 100 for Free.md deleted file mode 100644 index 22dd01c2cc58898e796f6fae530a3a8484d3312f..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Dream League Soccer 2019 MOD APKOBB - Enjoy All Players 100 for Free.md +++ /dev/null @@ -1,127 +0,0 @@ -
    -

    Dream League Soccer 2019 Mod APK All Players 100: How to Download and Install

    -

    If you are a fan of soccer games, you might have heard of Dream League Soccer 2019, one of the most popular and realistic soccer games on Android. But did you know that you can play Dream League Soccer 2019 mod apk all players 100, which gives you access to all the players in the game with maximum ratings? In this article, we will show you how to download and install Dream League Soccer 2019 mod apk all players 100, and how to enjoy this amazing game on your device.

    -

    What is Dream League Soccer 2019?

    -

    Dream League Soccer 2019 is a soccer game developed by First Touch Games, a studio that specializes in creating soccer games for mobile devices. Dream League Soccer 2019 allows you to create your own dream team, compete in various modes and leagues, and customize your stadium and kits. You can also play online with other players from around the world, or offline with friends using local Wi-Fi.

    -

    dream league soccer 2019 mod apk all players 100


    DOWNLOAD >>> https://jinyurl.com/2uNMz1



    -

    Features of Dream League Soccer 2019

    -

    Some of the features of Dream League Soccer 2019 are:

    -
      -
    • Realistic gameplay and graphics, with smooth animations and physics.
    • -
    • Over 3500 licensed players from FIFPro™, with authentic names, faces, and skills.
    • -
    • 6 divisions and 7 cup competitions to play in, plus the prestigious Dream League Online mode.
    • -
    • Build your own stadium and show it off to your opponents.
    • -
    • Customize your team's logo, kits, and manager.
    • -
    • Sync your progress between devices using Google Play Cloud.
    • -
    • Soundtrack featuring The Luka State, Sunset Sons, Vistas, and more.
    • -
    -

    Why play Dream League Soccer 2019 mod apk?

    -

    Dream League Soccer 2019 is already a great game, but if you want to make it even better, you can try playing Dream League Soccer 2019 mod apk. This is a modified version of the game that gives you some advantages over the original version. For example, with Dream League Soccer 2019 mod apk all players 100, you can have all the players in the game with maximum ratings. This means that you can create the ultimate dream team with any players you want, without worrying about their skills or attributes. You can also have unlimited coins and gems, which you can use to buy new players, upgrade your stadium, or unlock new features. With Dream League Soccer 2019 mod apk all players 100, you can enjoy the game without any limitations or restrictions.

    -

    dream league soccer 2019 mod apk unlimited money and players
    -dream league soccer 2019 mod apk download with all players unlocked
    -dream league soccer 2019 mod apk latest version with 100 rated players
    -dream league soccer 2019 mod apk obb file download for android
    -dream league soccer 2019 mod apk hack with all players maxed out
    -dream league soccer 2019 mod apk free download full version
    -dream league soccer 2019 mod apk unlimited coins and gems
    -dream league soccer 2019 mod apk offline mode with all players
    -dream league soccer 2019 mod apk no root required
    -dream league soccer 2019 mod apk mega mod with all features
    -dream league soccer 2019 mod apk data download for ios
    -dream league soccer 2019 mod apk unlimited player development
    -dream league soccer 2019 mod apk new update with all teams
    -dream league soccer 2019 mod apk best players in the world
    -dream league soccer 2019 mod apk easy installation guide
    -dream league soccer 2019 mod apk real madrid team with all players
    -dream league soccer 2019 mod apk unlimited everything unlocked
    -dream league soccer 2019 mod apk high graphics and sound quality
    -dream league soccer 2019 mod apk barcelona team with all players
    -dream league soccer 2019 mod apk online multiplayer mode with all players
    -dream league soccer 2019 mod apk liverpool team with all players
    -dream league soccer 2019 mod apk cheats and tricks for beginners
    -dream league soccer 2019 mod apk juventus team with all players
    -dream league soccer 2019 mod apk custom kits and logos for all teams
    -dream league soccer 2019 mod apk manchester city team with all players
    -dream league soccer 2019 mod apk unlimited stamina and energy for all players
    -dream league soccer 2019 mod apk psg team with all players
    -dream league soccer 2019 mod apk original game with all players modified
    -dream league soccer 2019 mod apk bayern munich team with all players
    -dream league soccer 2019 mod apk arsenal team with all players

    -

    How to download Dream League Soccer 2019 mod apk all players 100?

    -

    Requirements for Dream League Soccer 2019 mod apk

    -

    Before you download and install Dream League Soccer 2019 mod apk all players 100, you need to make sure that your device meets the following requirements:

    -
      -
    • Your device must have Android version 4.4 or higher.
    • -
    • Your device must have at least 1 GB of RAM and free storage space.
    • -
    • You must enable unknown sources in your device's settings. This will allow you to install apps from sources other than the Google Play Store.
    • -
    -

    Steps to download and install Dream League Soccer 2019 mod apk

    -

    Once you have checked the requirements, you can follow these steps to download and install Dream League Soccer 2019 mod apk all players 100:

    -
      -
    1. Download the Dream League Soccer 2019 mod apk file from a trusted source. You can use this link to download the file.
    2. -
    3. Download the Dream League Soccer 2019 OBB file from the same source. You can use this link to download the file.
    4. -
    5. Locate the downloaded files in your device's file manager and tap on them to install them. You may need to grant some permissions to the app.
    6. -
    7. After installing the apk file, do not open the app yet. Instead, move the OBB file to the Android/OBB folder in your device's internal storage. If you don't have this folder, create it manually.
    8. -
    9. Now you can open the app and enjoy Dream League Soccer 2019 mod apk all players 100.
    10. -
    -

    How to play Dream League Soccer 2019 mod apk all players 100?

    -

    Playing Dream League Soccer 2019 mod apk all players 100 is similar to playing the original version, but with some differences. Here are some tips on how to play Dream League Soccer 2019 mod apk all players 100:

    -

    How to create your dream team

    -

    With Dream League Soccer 2019 mod apk all players 100, you can create your dream team with any players you want, regardless of their ratings or prices. You can also edit their attributes, positions, and skills as you wish. To create your dream team, follow these steps:

    -
      -
    • Go to the Team Management menu and tap on the Transfer icon.
    • -
    • Select any player you want from the list of available players. You can use the filters to narrow down your search by name, rating, position, or league.
    • -
    • Tap on the Buy button to add the player to your team. You don't need to pay any coins or gems for the player.
    • -
    • Repeat this process until you have filled your squad with your desired players.
    • -
    • You can also go to the Player Development menu and tap on any player to edit their attributes, positions, and skills. You can increase or decrease their ratings as you like.
    • -
    -

    How to compete in different modes and leagues

    -

    Dream League Soccer 2019 mod apk all players 100 offers you various modes and leagues to play in, such as Career Mode, Dream League Online, Friendly Matches, and Cup Competitions. You can choose any mode or league you want and compete against different teams with different difficulties. To compete in different modes and leagues, follow these steps:

    -
      -
    • Go to the Main Menu and tap on the Play icon.
    • -
    • Select the mode or league you want to play in. You can see the details of each mode or league, such as the number of matches, the rewards, and the difficulty level.
    • -
    • Select your team and your opponent's team. You can also customize your team's formation, tactics, and kits before starting the match.
    • -
    • Tap on the Start Match button to begin playing. You can use the virtual buttons on the screen to control your players, pass, shoot, tackle, and perform other actions.
    • -
    • Try to score more goals than your opponent and win the match. You can also pause the game and make substitutions or change tactics if needed.
    • -
    • After finishing the match, you can see the match statistics, such as the scoreline, the possession, the shots, and the fouls. You can also see your progress in the mode or league you are playing in.
    • -
    -

    How to customize your stadium and kits

    -

    Dream League Soccer 2019 mod apk all players 100 allows you to customize your stadium and kits according to your preferences. You can change the name, color, design, and capacity of your stadium, as well as the logo, color, and design of your kits. To customize your stadium and kits, follow these steps:

    -
      -
    • Go to the My Club menu and tap on the Stadium icon or the Kit icon.
    • -
    • Select the option you want to customize, such as Stadium Name, Stadium Color, Stadium Design, or Stadium Capacity for the stadium, or Logo, Home Kit, Away Kit, or Third Kit for the kits.
    • -
    • Use the sliders, buttons, or menus to change the features of your stadium or kits. You can see a preview of your changes on the screen.
    • -
    • Tap on the Save button to confirm your changes. You can also tap on the Reset button to undo your changes.
    • -
    -

    Pros and cons of Dream League Soccer 2019 mod apk all players 100

    -

    Dream League Soccer 2019 mod apk all players 100 has its pros and cons, like any other game. Here are some of the pros and cons of playing Dream League Soccer 2019 mod apk all players 100:

    -

    Pros of Dream League Soccer 2019 mod apk

    -
      -
    • You can have all the players in the game with maximum ratings, which makes your team unbeatable and fun to play with.
    • -
    • You can have unlimited coins and gems, which you can use to buy new players, upgrade your stadium, or unlock new features.
    • -
    • You can customize your team's logo, kits, and manager as you like, without any restrictions or costs.
    • -
    • You can enjoy the game without any ads or in-app purchases.
    • -
    -

    Cons of Dream League Soccer 2019 mod apk

    -
      -
    • The game may not be compatible with some devices or may crash or lag sometimes.
    • -
    • The game may not be updated regularly or may not have the latest features or players from the original version.
    • -
    • The game may not be fair or challenging for some players who prefer to play with the original rules and ratings.
    • -
    • The game may not be safe or secure for your device or data, as it is not from an official source.
    • -
    -

    Conclusion

    -

    Dream League Soccer 2019 is a fantastic soccer game that lets you create your own dream team, compete in various modes and leagues, and customize your stadium and kits. But if you want to make it even more exciting and enjoyable, you can try playing Dream League Soccer 2019 mod apk all players 100, which gives you all the players in the game with maximum ratings, unlimited coins and gems, and more. In this article, we showed you how to download and install Dream League Soccer 2019 mod apk all players 100, and how to play it on your device. We hope you found this article helpful and informative. Now go ahead and enjoy Dream League Soccer 2019 mod apk all players 100!

    -

    FAQs

    -

    Here are some frequently asked questions about Dream League Soccer 2019 mod apk all players 100:

    -

    Q: Is Dream League Soccer 2019 mod apk all players 100 legal?

    -

    A: No, Dream League Soccer 2019 mod apk all players 100 is not legal, as it is a modified version of the original game that violates its terms and conditions. We do not endorse or promote the use of Dream League Soccer 2019 mod apk all players 100, and we are not responsible for any consequences that may arise from using it.

    -

    Q: Is Dream League Soccer 2019 mod apk all players 100 safe?

    -

    A: No, Dream League Soccer 2019 mod apk all players 100 is not safe, as it is not from an official source and may contain viruses or malware that can harm your device or data. We recommend that you download and install Dream League Soccer 2019 from the Google Play Store or other trusted sources.

    -

    Q: How can I update Dream League Soccer 2019 mod apk all players 100?

    -

    A: You cannot update Dream League Soccer 2019 mod apk all players 100 from the app itself, as it is not connected to the original server. You may need to download and install a new version of Dream League Soccer 2019 mod apk all players 100 from a different source if there is one available. However, we advise you to uninstall Dream League Soccer 2019 mod apk all players 100 and install the original version of Dream League Soccer 2019 instead.

    -

    Q: How can I play Dream League Soccer 2019 mod apk all players 100 online?

    -

    A: You cannot play Dream League Soccer 2019 mod apk all players 100 online with other players from around the world, as it is not compatible with the original server. You can only play offline with friends using local Wi-Fi. If you want to play online with other players, you need to play the original version of Dream League Soccer 2019.

    -

    Q: How can I get more coins and gems in Dream League Soccer 2019?

    -

    A: You can get more coins and gems in Dream League Soccer 2019 by playing matches, completing achievements, watching ads, or buying them with real money. You can also use some tricks and hacks to get more coins and gems, but we do not recommend that, as it may ruin the fun of the game or get you banned.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/52Hz/SRMNet_real_world_denoising/app.py b/spaces/52Hz/SRMNet_real_world_denoising/app.py deleted file mode 100644 index fdf1a4be723e832aed814338e0a343958a6b4bd4..0000000000000000000000000000000000000000 --- a/spaces/52Hz/SRMNet_real_world_denoising/app.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -import gradio as gr -from PIL import Image - - -os.system( - 'wget https://github.com/FanChiMao/SRMNet/releases/download/0.0/real_denoising_SRMNet.pth -P experiments/pretrained_models') - - -def inference(img): - os.system('mkdir test') - #basewidth = 256 - #wpercent = (basewidth / float(img.size[0])) - #hsize = int((float(img.size[1]) * float(wpercent))) - #img = img.resize((basewidth, hsize), Image.ANTIALIAS) - img.save("test/1.png", "PNG") - os.system( - 'python main_test_SRMNet.py --input_dir test --weights experiments/pretrained_models/real_denoising_SRMNet.pth') - return 'result/1.png' - - -title = "Selective Residual M-Net for Real-world Image Denoising" -description = "Gradio demo for SRMNet. SRMNet has competitive performance results on two synthetic and two realworld noisy datasets in terms of quantitative metrics and visual quality. See the paper and project page for detailed results below. Here, we provide a demo for real-world image denoising. To use it, simply upload your image, or click one of the examples to load them. Reference from: https://huggingface.co/akhaliq" -article = "

    Selective Residual M-Net | Github Repo

    visitor badge
    " - -examples = [['Noise.png'], ['Noise2.png']] -gr.Interface( - inference, - [gr.inputs.Image(type="pil", label="Input")], - gr.outputs.Image(type="filepath", label="Output"), - title=title, - description=description, - article=article, - allow_flagging=False, - allow_screenshot=False, - examples=examples -).launch(debug=True) \ No newline at end of file diff --git a/spaces/801artistry/RVC801/tools/torchgate/torchgate.py b/spaces/801artistry/RVC801/tools/torchgate/torchgate.py deleted file mode 100644 index 086f2ab38e4ad79e432a51c38ed7e59defae0acd..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/tools/torchgate/torchgate.py +++ /dev/null @@ -1,264 +0,0 @@ -import torch -from torch.nn.functional import conv1d, conv2d -from typing import Union, Optional -from .utils import linspace, temperature_sigmoid, amp_to_db - - -class TorchGate(torch.nn.Module): - """ - A PyTorch module that applies a spectral gate to an input signal. - - Arguments: - sr {int} -- Sample rate of the input signal. - nonstationary {bool} -- Whether to use non-stationary or stationary masking (default: {False}). - n_std_thresh_stationary {float} -- Number of standard deviations above mean to threshold noise for - stationary masking (default: {1.5}). - n_thresh_nonstationary {float} -- Number of multiplies above smoothed magnitude spectrogram. for - non-stationary masking (default: {1.3}). - temp_coeff_nonstationary {float} -- Temperature coefficient for non-stationary masking (default: {0.1}). - n_movemean_nonstationary {int} -- Number of samples for moving average smoothing in non-stationary masking - (default: {20}). - prop_decrease {float} -- Proportion to decrease signal by where the mask is zero (default: {1.0}). - n_fft {int} -- Size of FFT for STFT (default: {1024}). - win_length {[int]} -- Window length for STFT. If None, defaults to `n_fft` (default: {None}). - hop_length {[int]} -- Hop length for STFT. If None, defaults to `win_length` // 4 (default: {None}). - freq_mask_smooth_hz {float} -- Frequency smoothing width for mask (in Hz). If None, no smoothing is applied - (default: {500}). - time_mask_smooth_ms {float} -- Time smoothing width for mask (in ms). If None, no smoothing is applied - (default: {50}). - """ - - @torch.no_grad() - def __init__( - self, - sr: int, - nonstationary: bool = False, - n_std_thresh_stationary: float = 1.5, - n_thresh_nonstationary: float = 1.3, - temp_coeff_nonstationary: float = 0.1, - n_movemean_nonstationary: int = 20, - prop_decrease: float = 1.0, - n_fft: int = 1024, - win_length: bool = None, - hop_length: int = None, - freq_mask_smooth_hz: float = 500, - time_mask_smooth_ms: float = 50, - ): - super().__init__() - - # General Params - self.sr = sr - self.nonstationary = nonstationary - assert 0.0 <= prop_decrease <= 1.0 - self.prop_decrease = prop_decrease - - # STFT Params - self.n_fft = n_fft - self.win_length = self.n_fft if win_length is None else win_length - self.hop_length = self.win_length // 4 if hop_length is None else hop_length - - # Stationary Params - self.n_std_thresh_stationary = n_std_thresh_stationary - - # Non-Stationary Params - self.temp_coeff_nonstationary = temp_coeff_nonstationary - self.n_movemean_nonstationary = n_movemean_nonstationary - self.n_thresh_nonstationary = n_thresh_nonstationary - - # Smooth Mask Params - self.freq_mask_smooth_hz = freq_mask_smooth_hz - self.time_mask_smooth_ms = time_mask_smooth_ms - self.register_buffer("smoothing_filter", self._generate_mask_smoothing_filter()) - - @torch.no_grad() - def _generate_mask_smoothing_filter(self) -> Union[torch.Tensor, None]: - """ - A PyTorch module that applies a spectral gate to an input signal using the STFT. - - Returns: - smoothing_filter (torch.Tensor): a 2D tensor representing the smoothing filter, - with shape (n_grad_freq, n_grad_time), where n_grad_freq is the number of frequency - bins to smooth and n_grad_time is the number of time frames to smooth. - If both self.freq_mask_smooth_hz and self.time_mask_smooth_ms are None, returns None. - """ - if self.freq_mask_smooth_hz is None and self.time_mask_smooth_ms is None: - return None - - n_grad_freq = ( - 1 - if self.freq_mask_smooth_hz is None - else int(self.freq_mask_smooth_hz / (self.sr / (self.n_fft / 2))) - ) - if n_grad_freq < 1: - raise ValueError( - f"freq_mask_smooth_hz needs to be at least {int((self.sr / (self._n_fft / 2)))} Hz" - ) - - n_grad_time = ( - 1 - if self.time_mask_smooth_ms is None - else int(self.time_mask_smooth_ms / ((self.hop_length / self.sr) * 1000)) - ) - if n_grad_time < 1: - raise ValueError( - f"time_mask_smooth_ms needs to be at least {int((self.hop_length / self.sr) * 1000)} ms" - ) - - if n_grad_time == 1 and n_grad_freq == 1: - return None - - v_f = torch.cat( - [ - linspace(0, 1, n_grad_freq + 1, endpoint=False), - linspace(1, 0, n_grad_freq + 2), - ] - )[1:-1] - v_t = torch.cat( - [ - linspace(0, 1, n_grad_time + 1, endpoint=False), - linspace(1, 0, n_grad_time + 2), - ] - )[1:-1] - smoothing_filter = torch.outer(v_f, v_t).unsqueeze(0).unsqueeze(0) - - return smoothing_filter / smoothing_filter.sum() - - @torch.no_grad() - def _stationary_mask( - self, X_db: torch.Tensor, xn: Optional[torch.Tensor] = None - ) -> torch.Tensor: - """ - Computes a stationary binary mask to filter out noise in a log-magnitude spectrogram. - - Arguments: - X_db (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the log-magnitude spectrogram. - xn (torch.Tensor): 1D tensor containing the audio signal corresponding to X_db. - - Returns: - sig_mask (torch.Tensor): Binary mask of the same shape as X_db, where values greater than the threshold - are set to 1, and the rest are set to 0. - """ - if xn is not None: - XN = torch.stft( - xn, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - return_complex=True, - pad_mode="constant", - center=True, - window=torch.hann_window(self.win_length).to(xn.device), - ) - - XN_db = amp_to_db(XN).to(dtype=X_db.dtype) - else: - XN_db = X_db - - # calculate mean and standard deviation along the frequency axis - std_freq_noise, mean_freq_noise = torch.std_mean(XN_db, dim=-1) - - # compute noise threshold - noise_thresh = mean_freq_noise + std_freq_noise * self.n_std_thresh_stationary - - # create binary mask by thresholding the spectrogram - sig_mask = X_db > noise_thresh.unsqueeze(2) - return sig_mask - - @torch.no_grad() - def _nonstationary_mask(self, X_abs: torch.Tensor) -> torch.Tensor: - """ - Computes a non-stationary binary mask to filter out noise in a log-magnitude spectrogram. - - Arguments: - X_abs (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the magnitude spectrogram. - - Returns: - sig_mask (torch.Tensor): Binary mask of the same shape as X_abs, where values greater than the threshold - are set to 1, and the rest are set to 0. - """ - X_smoothed = ( - conv1d( - X_abs.reshape(-1, 1, X_abs.shape[-1]), - torch.ones( - self.n_movemean_nonstationary, - dtype=X_abs.dtype, - device=X_abs.device, - ).view(1, 1, -1), - padding="same", - ).view(X_abs.shape) - / self.n_movemean_nonstationary - ) - - # Compute slowness ratio and apply temperature sigmoid - slowness_ratio = (X_abs - X_smoothed) / (X_smoothed + 1e-6) - sig_mask = temperature_sigmoid( - slowness_ratio, self.n_thresh_nonstationary, self.temp_coeff_nonstationary - ) - - return sig_mask - - def forward( - self, x: torch.Tensor, xn: Optional[torch.Tensor] = None - ) -> torch.Tensor: - """ - Apply the proposed algorithm to the input signal. - - Arguments: - x (torch.Tensor): The input audio signal, with shape (batch_size, signal_length). - xn (Optional[torch.Tensor]): The noise signal used for stationary noise reduction. If `None`, the input - signal is used as the noise signal. Default: `None`. - - Returns: - torch.Tensor: The denoised audio signal, with the same shape as the input signal. - """ - assert x.ndim == 2 - if x.shape[-1] < self.win_length * 2: - raise Exception(f"x must be bigger than {self.win_length * 2}") - - assert xn is None or xn.ndim == 1 or xn.ndim == 2 - if xn is not None and xn.shape[-1] < self.win_length * 2: - raise Exception(f"xn must be bigger than {self.win_length * 2}") - - # Compute short-time Fourier transform (STFT) - X = torch.stft( - x, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - return_complex=True, - pad_mode="constant", - center=True, - window=torch.hann_window(self.win_length).to(x.device), - ) - - # Compute signal mask based on stationary or nonstationary assumptions - if self.nonstationary: - sig_mask = self._nonstationary_mask(X.abs()) - else: - sig_mask = self._stationary_mask(amp_to_db(X), xn) - - # Propagate decrease in signal power - sig_mask = self.prop_decrease * (sig_mask * 1.0 - 1.0) + 1.0 - - # Smooth signal mask with 2D convolution - if self.smoothing_filter is not None: - sig_mask = conv2d( - sig_mask.unsqueeze(1), - self.smoothing_filter.to(sig_mask.dtype), - padding="same", - ) - - # Apply signal mask to STFT magnitude and phase components - Y = X * sig_mask.squeeze(1) - - # Inverse STFT to obtain time-domain signal - y = torch.istft( - Y, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - center=True, - window=torch.hann_window(self.win_length).to(Y.device), - ) - - return y.to(dtype=x.dtype) diff --git a/spaces/834188divi/cardiffnlp-twitter-roberta-base-sentiment-latest/app.py b/spaces/834188divi/cardiffnlp-twitter-roberta-base-sentiment-latest/app.py deleted file mode 100644 index bdd26e83f8423fa760b4260a28e22e891a9ee55b..0000000000000000000000000000000000000000 --- a/spaces/834188divi/cardiffnlp-twitter-roberta-base-sentiment-latest/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/cardiffnlp/twitter-roberta-base-sentiment-latest").launch() \ No newline at end of file diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/commons/normalizing_flow/utils.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/commons/normalizing_flow/utils.py deleted file mode 100644 index 7eb56ec514bff822ba1a19a6474207ed82492410..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/commons/normalizing_flow/utils.py +++ /dev/null @@ -1,29 +0,0 @@ -import torch - - -def squeeze(x, x_mask=None, n_sqz=2): - b, c, t = x.size() - - t = (t // n_sqz) * n_sqz - x = x[:, :, :t] - x_sqz = x.view(b, c, t // n_sqz, n_sqz) - x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * n_sqz, t // n_sqz) - - if x_mask is not None: - x_mask = x_mask[:, :, n_sqz - 1::n_sqz] - else: - x_mask = torch.ones(b, 1, t // n_sqz).to(device=x.device, dtype=x.dtype) - return x_sqz * x_mask, x_mask - - -def unsqueeze(x, x_mask=None, n_sqz=2): - b, c, t = x.size() - - x_unsqz = x.view(b, n_sqz, c // n_sqz, t) - x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // n_sqz, t * n_sqz) - - if x_mask is not None: - x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz) - else: - x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype) - return x_unsqz * x_mask, x_mask diff --git a/spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/pytorch/main.py b/spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/pytorch/main.py deleted file mode 100644 index 358293521706ff525f6f1b1274085a08236394ff..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/pytorch/main.py +++ /dev/null @@ -1,378 +0,0 @@ -import os -import sys -sys.path.insert(1, os.path.join(sys.path[0], '../utils')) -import numpy as np -import argparse -import time -import logging - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -import torch.utils.data - -from utilities import (create_folder, get_filename, create_logging, Mixup, - StatisticsContainer) -from models import (PVT, PVT2, PVT_lr, PVT_nopretrain, PVT_2layer, Cnn14, Cnn14_no_specaug, Cnn14_no_dropout, - Cnn6, Cnn10, ResNet22, ResNet38, ResNet54, Cnn14_emb512, Cnn14_emb128, - Cnn14_emb32, MobileNetV1, MobileNetV2, LeeNet11, LeeNet24, DaiNet19, - Res1dNet31, Res1dNet51, Wavegram_Cnn14, Wavegram_Logmel_Cnn14, - Wavegram_Logmel128_Cnn14, Cnn14_16k, Cnn14_8k, Cnn14_mel32, Cnn14_mel128, - Cnn14_mixup_time_domain, Cnn14_DecisionLevelMax, Cnn14_DecisionLevelAtt, Cnn6_Transformer, GLAM, GLAM2, GLAM3, Cnn4, EAT) -#from models_test import (PVT_test) -#from models1 import (PVT1) -#from models_vig import (VIG, VIG2) -#from models_vvt import (VVT) -#from models2 import (MPVIT, MPVIT2) -#from models_reshape import (PVT_reshape, PVT_tscam) -#from models_swin import (Swin, Swin_nopretrain) -#from models_swin2 import (Swin2) -#from models_van import (Van, Van_tiny) -#from models_focal import (Focal) -#from models_cross import (Cross) -#from models_cov import (Cov) -#from models_cnn import (Cnn_light) -#from models_twins import (Twins) -#from models_cmt import (Cmt, Cmt1) -#from models_shunted import (Shunted) -#from models_quadtree import (Quadtree, Quadtree2, Quadtree_nopretrain) -#from models_davit import (Davit_tscam, Davit, Davit_nopretrain) -from pytorch_utils import (move_data_to_device, count_parameters, count_flops, - do_mixup) -from data_generator import (AudioSetDataset, TrainSampler, BalancedTrainSampler, - AlternateTrainSampler, EvaluateSampler, collate_fn) -from evaluate import Evaluator -import config -from losses import get_loss_func - - -def train(args): - """Train AudioSet tagging model. - - Args: - dataset_dir: str - workspace: str - data_type: 'balanced_train' | 'full_train' - window_size: int - hop_size: int - mel_bins: int - model_type: str - loss_type: 'clip_bce' - balanced: 'none' | 'balanced' | 'alternate' - augmentation: 'none' | 'mixup' - batch_size: int - learning_rate: float - resume_iteration: int - early_stop: int - accumulation_steps: int - cuda: bool - """ - - # Arugments & parameters - workspace = args.workspace - data_type = args.data_type - sample_rate = args.sample_rate - window_size = args.window_size - hop_size = args.hop_size - mel_bins = args.mel_bins - fmin = args.fmin - fmax = args.fmax - model_type = args.model_type - loss_type = args.loss_type - balanced = args.balanced - augmentation = args.augmentation - batch_size = args.batch_size - learning_rate = args.learning_rate - resume_iteration = args.resume_iteration - early_stop = args.early_stop - device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu') - filename = args.filename - - num_workers = 8 - clip_samples = config.clip_samples - classes_num = config.classes_num - loss_func = get_loss_func(loss_type) - - # Paths - black_list_csv = None - - train_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes', - '{}.h5'.format(data_type)) - - eval_bal_indexes_hdf5_path = os.path.join(workspace, - 'hdf5s', 'indexes', 'balanced_train.h5') - - eval_test_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes', - 'eval.h5') - - checkpoints_dir = os.path.join(workspace, 'checkpoints', filename, - 'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format( - sample_rate, window_size, hop_size, mel_bins, fmin, fmax), - 'data_type={}'.format(data_type), model_type, - 'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced), - 'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size)) - create_folder(checkpoints_dir) - - statistics_path = os.path.join(workspace, 'statistics', filename, - 'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format( - sample_rate, window_size, hop_size, mel_bins, fmin, fmax), - 'data_type={}'.format(data_type), model_type, - 'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced), - 'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size), - 'statistics.pkl') - create_folder(os.path.dirname(statistics_path)) - - logs_dir = os.path.join(workspace, 'logs', filename, - 'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format( - sample_rate, window_size, hop_size, mel_bins, fmin, fmax), - 'data_type={}'.format(data_type), model_type, - 'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced), - 'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size)) - - create_logging(logs_dir, filemode='w') - logging.info(args) - - if 'cuda' in str(device): - logging.info('Using GPU.') - device = 'cuda' - else: - logging.info('Using CPU. Set --cuda flag to use GPU.') - device = 'cpu' - - # Model - Model = eval(model_type) - model = Model(sample_rate=sample_rate, window_size=window_size, - hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax, - classes_num=classes_num) - total = sum(p.numel() for p in model.parameters()) - print("Total params: %.2fM" % (total/1e6)) - logging.info("Total params: %.2fM" % (total/1e6)) - #params_num = count_parameters(model) - # flops_num = count_flops(model, clip_samples) - #logging.info('Parameters num: {}'.format(params_num)) - # logging.info('Flops num: {:.3f} G'.format(flops_num / 1e9)) - - # Dataset will be used by DataLoader later. Dataset takes a meta as input - # and return a waveform and a target. - dataset = AudioSetDataset(sample_rate=sample_rate) - - # Train sampler - if balanced == 'none': - Sampler = TrainSampler - elif balanced == 'balanced': - Sampler = BalancedTrainSampler - elif balanced == 'alternate': - Sampler = AlternateTrainSampler - - train_sampler = Sampler( - indexes_hdf5_path=train_indexes_hdf5_path, - batch_size=batch_size * 2 if 'mixup' in augmentation else batch_size, - black_list_csv=black_list_csv) - - # Evaluate sampler - eval_bal_sampler = EvaluateSampler( - indexes_hdf5_path=eval_bal_indexes_hdf5_path, batch_size=batch_size) - - eval_test_sampler = EvaluateSampler( - indexes_hdf5_path=eval_test_indexes_hdf5_path, batch_size=batch_size) - - # Data loader - train_loader = torch.utils.data.DataLoader(dataset=dataset, - batch_sampler=train_sampler, collate_fn=collate_fn, - num_workers=num_workers, pin_memory=True) - - eval_bal_loader = torch.utils.data.DataLoader(dataset=dataset, - batch_sampler=eval_bal_sampler, collate_fn=collate_fn, - num_workers=num_workers, pin_memory=True) - - eval_test_loader = torch.utils.data.DataLoader(dataset=dataset, - batch_sampler=eval_test_sampler, collate_fn=collate_fn, - num_workers=num_workers, pin_memory=True) - mix=0.5 - if 'mixup' in augmentation: - mixup_augmenter = Mixup(mixup_alpha=mix) - print(mix) - logging.info(mix) - - # Evaluator - evaluator = Evaluator(model=model) - - # Statistics - statistics_container = StatisticsContainer(statistics_path) - - # Optimizer - optimizer = optim.AdamW(model.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.05, amsgrad=True) - scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=4, min_lr=1e-06, verbose=True) - train_bgn_time = time.time() - - # Resume training - if resume_iteration > 0: - resume_checkpoint_path = os.path.join(workspace, 'checkpoints', filename, - 'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format( - sample_rate, window_size, hop_size, mel_bins, fmin, fmax), - 'data_type={}'.format(data_type), model_type, - 'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced), - 'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size), - '{}_iterations.pth'.format(resume_iteration)) - - logging.info('Loading checkpoint {}'.format(resume_checkpoint_path)) - checkpoint = torch.load(resume_checkpoint_path) - model.load_state_dict(checkpoint['model']) - train_sampler.load_state_dict(checkpoint['sampler']) - statistics_container.load_state_dict(resume_iteration) - iteration = checkpoint['iteration'] - - else: - iteration = 0 - - # Parallel - print('GPU number: {}'.format(torch.cuda.device_count())) - model = torch.nn.DataParallel(model) - - if 'cuda' in str(device): - model.to(device) - - if resume_iteration: - optimizer.load_state_dict(checkpoint['optimizer']) - scheduler.load_state_dict(checkpoint['scheduler']) - print(optimizer.state_dict()['param_groups'][0]['lr']) - - time1 = time.time() - - for batch_data_dict in train_loader: - """batch_data_dict: { - 'audio_name': (batch_size [*2 if mixup],), - 'waveform': (batch_size [*2 if mixup], clip_samples), - 'target': (batch_size [*2 if mixup], classes_num), - (ifexist) 'mixup_lambda': (batch_size * 2,)} - """ - - # Evaluate - if (iteration % 2000 == 0 and iteration >= resume_iteration) or (iteration == 0): - train_fin_time = time.time() - - bal_statistics = evaluator.evaluate(eval_bal_loader) - test_statistics = evaluator.evaluate(eval_test_loader) - - logging.info('Validate bal mAP: {:.3f}'.format( - np.mean(bal_statistics['average_precision']))) - - logging.info('Validate test mAP: {:.3f}'.format( - np.mean(test_statistics['average_precision']))) - - statistics_container.append(iteration, bal_statistics, data_type='bal') - statistics_container.append(iteration, test_statistics, data_type='test') - statistics_container.dump() - - train_time = train_fin_time - train_bgn_time - validate_time = time.time() - train_fin_time - - logging.info( - 'iteration: {}, train time: {:.3f} s, validate time: {:.3f} s' - ''.format(iteration, train_time, validate_time)) - - logging.info('------------------------------------') - - train_bgn_time = time.time() - - # Save model - if iteration % 2000 == 0: - checkpoint = { - 'iteration': iteration, - 'model': model.module.state_dict(), - 'sampler': train_sampler.state_dict(), - 'optimizer': optimizer.state_dict(), - 'scheduler': scheduler.state_dict()} - - checkpoint_path = os.path.join( - checkpoints_dir, '{}_iterations.pth'.format(iteration)) - - torch.save(checkpoint, checkpoint_path) - logging.info('Model saved to {}'.format(checkpoint_path)) - - # Mixup lambda - if 'mixup' in augmentation: - batch_data_dict['mixup_lambda'] = mixup_augmenter.get_lambda( - batch_size=len(batch_data_dict['waveform'])) - - # Move data to device - for key in batch_data_dict.keys(): - batch_data_dict[key] = move_data_to_device(batch_data_dict[key], device) - - # Forward - model.train() - - if 'mixup' in augmentation: - batch_output_dict = model(batch_data_dict['waveform'], - batch_data_dict['mixup_lambda']) - """{'clipwise_output': (batch_size, classes_num), ...}""" - - batch_target_dict = {'target': do_mixup(batch_data_dict['target'], - batch_data_dict['mixup_lambda'])} - """{'target': (batch_size, classes_num)}""" - else: - batch_output_dict = model(batch_data_dict['waveform'], None) - """{'clipwise_output': (batch_size, classes_num), ...}""" - - batch_target_dict = {'target': batch_data_dict['target']} - """{'target': (batch_size, classes_num)}""" - - # Loss - loss = loss_func(batch_output_dict, batch_target_dict) - # Backward - loss.backward() - - optimizer.step() - optimizer.zero_grad() - - if iteration % 10 == 0: - print(iteration, loss) - #print('--- Iteration: {}, train time: {:.3f} s / 10 iterations ---'\ - # .format(iteration, time.time() - time1)) - #time1 = time.time() - - if iteration % 2000 == 0: - scheduler.step(np.mean(test_statistics['average_precision'])) - print(optimizer.state_dict()['param_groups'][0]['lr']) - logging.info(optimizer.state_dict()['param_groups'][0]['lr']) - - # Stop learning - if iteration == early_stop: - break - - iteration += 1 - - -if __name__ == '__main__': - - parser = argparse.ArgumentParser(description='Example of parser. ') - subparsers = parser.add_subparsers(dest='mode') - - parser_train = subparsers.add_parser('train') - parser_train.add_argument('--workspace', type=str, required=True) - parser_train.add_argument('--data_type', type=str, default='full_train', choices=['balanced_train', 'full_train']) - parser_train.add_argument('--sample_rate', type=int, default=32000) - parser_train.add_argument('--window_size', type=int, default=1024) - parser_train.add_argument('--hop_size', type=int, default=320) - parser_train.add_argument('--mel_bins', type=int, default=64) - parser_train.add_argument('--fmin', type=int, default=50) - parser_train.add_argument('--fmax', type=int, default=14000) - parser_train.add_argument('--model_type', type=str, required=True) - parser_train.add_argument('--loss_type', type=str, default='clip_bce', choices=['clip_bce']) - parser_train.add_argument('--balanced', type=str, default='balanced', choices=['none', 'balanced', 'alternate']) - parser_train.add_argument('--augmentation', type=str, default='mixup', choices=['none', 'mixup']) - parser_train.add_argument('--batch_size', type=int, default=32) - parser_train.add_argument('--learning_rate', type=float, default=1e-3) - parser_train.add_argument('--resume_iteration', type=int, default=0) - parser_train.add_argument('--early_stop', type=int, default=1000000) - parser_train.add_argument('--cuda', action='store_true', default=False) - - args = parser.parse_args() - args.filename = get_filename(__file__) - - if args.mode == 'train': - train(args) - - else: - raise Exception('Error argument!') \ No newline at end of file diff --git a/spaces/AIGC-Audio/AudioGPT/audio_to_text/inference_waveform.py b/spaces/AIGC-Audio/AudioGPT/audio_to_text/inference_waveform.py deleted file mode 100644 index 714d30b260b85beb30581b31f5b2c0005a9fe6b5..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/audio_to_text/inference_waveform.py +++ /dev/null @@ -1,102 +0,0 @@ -import sys -import os -import librosa -import numpy as np -import torch -import audio_to_text.captioning.models -import audio_to_text.captioning.models.encoder -import audio_to_text.captioning.models.decoder -import audio_to_text.captioning.utils.train_util as train_util - - -def load_model(config, checkpoint): - ckpt = torch.load(checkpoint, "cpu") - encoder_cfg = config["model"]["encoder"] - encoder = train_util.init_obj( - audio_to_text.captioning.models.encoder, - encoder_cfg - ) - if "pretrained" in encoder_cfg: - pretrained = encoder_cfg["pretrained"] - train_util.load_pretrained_model(encoder, - pretrained, - sys.stdout.write) - decoder_cfg = config["model"]["decoder"] - if "vocab_size" not in decoder_cfg["args"]: - decoder_cfg["args"]["vocab_size"] = len(ckpt["vocabulary"]) - decoder = train_util.init_obj( - audio_to_text.captioning.models.decoder, - decoder_cfg - ) - if "word_embedding" in decoder_cfg: - decoder.load_word_embedding(**decoder_cfg["word_embedding"]) - if "pretrained" in decoder_cfg: - pretrained = decoder_cfg["pretrained"] - train_util.load_pretrained_model(decoder, - pretrained, - sys.stdout.write) - model = train_util.init_obj(audio_to_text.captioning.models, config["model"], - encoder=encoder, decoder=decoder) - train_util.load_pretrained_model(model, ckpt) - model.eval() - return { - "model": model, - "vocabulary": ckpt["vocabulary"] - } - - -def decode_caption(word_ids, vocabulary): - candidate = [] - for word_id in word_ids: - word = vocabulary[word_id] - if word == "": - break - elif word == "": - continue - candidate.append(word) - candidate = " ".join(candidate) - return candidate - - -class AudioCapModel(object): - def __init__(self,weight_dir,device='cpu'): - config = os.path.join(weight_dir,'config.yaml') - self.config = train_util.parse_config_or_kwargs(config) - checkpoint = os.path.join(weight_dir,'swa.pth') - resumed = load_model(self.config, checkpoint) - model = resumed["model"] - self.vocabulary = resumed["vocabulary"] - self.model = model.to(device) - self.device = device - - def caption(self,audio_list): - if isinstance(audio_list,np.ndarray): - audio_list = [audio_list] - elif isinstance(audio_list,str): - audio_list = [librosa.load(audio_list,sr=32000)[0]] - - captions = [] - for wav in audio_list: - inputwav = torch.as_tensor(wav).float().unsqueeze(0).to(self.device) - wav_len = torch.LongTensor([len(wav)]) - input_dict = { - "mode": "inference", - "wav": inputwav, - "wav_len": wav_len, - "specaug": False, - "sample_method": "beam", - } - print(input_dict) - out_dict = self.model(input_dict) - caption_batch = [decode_caption(seq, self.vocabulary) for seq in \ - out_dict["seq"].cpu().numpy()] - captions.extend(caption_batch) - return captions - - - - def __call__(self, audio_list): - return self.caption(audio_list) - - - diff --git a/spaces/AIWaves/SOP_Generation-single/gen_utils.py b/spaces/AIWaves/SOP_Generation-single/gen_utils.py deleted file mode 100644 index c6067a8c827ee3d131699fec6dfb7475b86df4ae..0000000000000000000000000000000000000000 --- a/spaces/AIWaves/SOP_Generation-single/gen_utils.py +++ /dev/null @@ -1,43 +0,0 @@ -def get_agents(design_states): - final_agents = {} - role = design_states[0]["role"] - style = design_states[0]["style"] - agent_name = "_".join(role.split(" ")) - final_agents[agent_name] = {"style":style,"roles":{}} - final_agents["User"] = {"style":"","roles":{}} - for design_state in design_states: - final_agents[agent_name]["roles"][design_state["state_name"]] = agent_name - final_agents["User"]["roles"][design_state["state_name"]] = "User" - return final_agents - -def get_relations(design_states): - relations = {} - n = len(design_states) - for i in range(n): - relations[design_states[i]["state_name"]] = {} - relations[design_states[i]["state_name"]]["0"] = design_states[i]["state_name"] - relations[design_states[i]["state_name"]]["1"] = design_states[i+1]["state_name"] if i!=n-1 else "end_state" - return relations - - -def gen_states(design_states): - states = {"end_state":{ - "agent_states":{} - }} - for design_state in design_states: - state_name = design_state["state_name"] - role = design_state["role"] - agent_name = "_".join(role.split(" ")) - states[state_name] = {"controller":{"controller_type": "order", "max_chat_nums" : 1000,"judge_system_prompt":design_state["judge"],"judge_last_prompt":"Please contact the above to extract and . Do not perform additional output. Please strictly follow the above format for output! Remember, please strictly follow the above format for output!"}} - states[state_name]["agent_states"] = { - agent_name : { - "role" : {"role" : role}, - "task" : {"task" : design_state["task"]}, - "rule" : {"rule" : design_state["rule"]} - }, - "User" : { - } - } - - return states - diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192.py deleted file mode 100644 index 262b21d98e79bc27dd993bd2a09e91a1dc66db0e..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192.py +++ /dev/null @@ -1,2861 +0,0 @@ -default_scope = 'mmpose' -default_hooks = dict( - timer=dict(type='IterTimerHook'), - logger=dict(type='LoggerHook', interval=50), - param_scheduler=dict(type='ParamSchedulerHook'), - checkpoint=dict( - type='CheckpointHook', interval=10, save_best='PCK', rule='greater'), - sampler_seed=dict(type='DistSamplerSeedHook'), - visualization=dict(type='PoseVisualizationHook', enable=False)) -custom_hooks = [dict(type='SyncBuffersHook')] -env_cfg = dict( - cudnn_benchmark=False, - mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), - dist_cfg=dict(backend='nccl')) -vis_backends = [dict(type='LocalVisBackend')] -visualizer = dict( - type='PoseLocalVisualizer', - vis_backends=[dict(type='LocalVisBackend'), - dict(type='WandbVisBackend')], - name='visualizer') -log_processor = dict( - type='LogProcessor', window_size=50, by_epoch=True, num_digits=6) -log_level = 'INFO' -load_from = None -resume = False -backend_args = dict(backend='local') -train_cfg = dict(by_epoch=True, max_epochs=150, val_interval=10) -val_cfg = dict() -test_cfg = dict() -colors = dict( - sss=[255, 128, 0], - lss=[255, 0, 128], - sso=[128, 0, 255], - lso=[0, 128, 255], - vest=[0, 128, 128], - sling=[0, 0, 128], - shorts=[128, 128, 128], - trousers=[128, 0, 128], - skirt=[64, 128, 128], - ssd=[64, 64, 128], - lsd=[128, 64, 0], - vd=[128, 64, 255], - sd=[128, 64, 0]) -dataset_info = dict( - dataset_name='deepfashion2', - paper_info=dict( - author= - 'Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo', - title= - 'DeepFashion2: A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images', - container= - 'Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)', - year='2019', - homepage='https://github.com/switchablenorms/DeepFashion2'), - keypoint_info=dict({ - 0: - dict(name='sss_kpt1', id=0, color=[255, 128, 0], type='', swap=''), - 1: - dict( - name='sss_kpt2', - id=1, - color=[255, 128, 0], - type='', - swap='sss_kpt6'), - 2: - dict( - name='sss_kpt3', - id=2, - color=[255, 128, 0], - type='', - swap='sss_kpt5'), - 3: - dict(name='sss_kpt4', id=3, color=[255, 128, 0], type='', swap=''), - 4: - dict( - name='sss_kpt5', - id=4, - color=[255, 128, 0], - type='', - swap='sss_kpt3'), - 5: - dict( - name='sss_kpt6', - id=5, - color=[255, 128, 0], - type='', - swap='sss_kpt2'), - 6: - dict( - name='sss_kpt7', - id=6, - color=[255, 128, 0], - type='', - swap='sss_kpt25'), - 7: - dict( - name='sss_kpt8', - id=7, - color=[255, 128, 0], - type='', - swap='sss_kpt24'), - 8: - dict( - name='sss_kpt9', - id=8, - color=[255, 128, 0], - type='', - swap='sss_kpt23'), - 9: - dict( - name='sss_kpt10', - id=9, - color=[255, 128, 0], - type='', - swap='sss_kpt22'), - 10: - dict( - name='sss_kpt11', - id=10, - color=[255, 128, 0], - type='', - swap='sss_kpt21'), - 11: - dict( - name='sss_kpt12', - id=11, - color=[255, 128, 0], - type='', - swap='sss_kpt20'), - 12: - dict( - name='sss_kpt13', - id=12, - color=[255, 128, 0], - type='', - swap='sss_kpt19'), - 13: - dict( - name='sss_kpt14', - id=13, - color=[255, 128, 0], - type='', - swap='sss_kpt18'), - 14: - dict( - name='sss_kpt15', - id=14, - color=[255, 128, 0], - type='', - swap='sss_kpt17'), - 15: - dict(name='sss_kpt16', id=15, color=[255, 128, 0], type='', swap=''), - 16: - dict( - name='sss_kpt17', - id=16, - color=[255, 128, 0], - type='', - swap='sss_kpt15'), - 17: - dict( - name='sss_kpt18', - id=17, - color=[255, 128, 0], - type='', - swap='sss_kpt14'), - 18: - dict( - name='sss_kpt19', - id=18, - color=[255, 128, 0], - type='', - swap='sss_kpt13'), - 19: - dict( - name='sss_kpt20', - id=19, - color=[255, 128, 0], - type='', - swap='sss_kpt12'), - 20: - dict( - name='sss_kpt21', - id=20, - color=[255, 128, 0], - type='', - swap='sss_kpt11'), - 21: - dict( - name='sss_kpt22', - id=21, - color=[255, 128, 0], - type='', - swap='sss_kpt10'), - 22: - dict( - name='sss_kpt23', - id=22, - color=[255, 128, 0], - type='', - swap='sss_kpt9'), - 23: - dict( - name='sss_kpt24', - id=23, - color=[255, 128, 0], - type='', - swap='sss_kpt8'), - 24: - dict( - name='sss_kpt25', - id=24, - color=[255, 128, 0], - type='', - swap='sss_kpt7'), - 25: - dict(name='lss_kpt1', id=25, color=[255, 0, 128], type='', swap=''), - 26: - dict( - name='lss_kpt2', - id=26, - color=[255, 0, 128], - type='', - swap='lss_kpt6'), - 27: - dict( - name='lss_kpt3', - id=27, - color=[255, 0, 128], - type='', - swap='lss_kpt5'), - 28: - dict(name='lss_kpt4', id=28, color=[255, 0, 128], type='', swap=''), - 29: - dict( - name='lss_kpt5', - id=29, - color=[255, 0, 128], - type='', - swap='lss_kpt3'), - 30: - dict( - name='lss_kpt6', - id=30, - color=[255, 0, 128], - type='', - swap='lss_kpt2'), - 31: - dict( - name='lss_kpt7', - id=31, - color=[255, 0, 128], - type='', - swap='lss_kpt33'), - 32: - dict( - name='lss_kpt8', - id=32, - color=[255, 0, 128], - type='', - swap='lss_kpt32'), - 33: - dict( - name='lss_kpt9', - id=33, - color=[255, 0, 128], - type='', - swap='lss_kpt31'), - 34: - dict( - name='lss_kpt10', - id=34, - color=[255, 0, 128], - type='', - swap='lss_kpt30'), - 35: - dict( - name='lss_kpt11', - id=35, - color=[255, 0, 128], - type='', - swap='lss_kpt29'), - 36: - dict( - name='lss_kpt12', - id=36, - color=[255, 0, 128], - type='', - swap='lss_kpt28'), - 37: - dict( - name='lss_kpt13', - id=37, - color=[255, 0, 128], - type='', - swap='lss_kpt27'), - 38: - dict( - name='lss_kpt14', - id=38, - color=[255, 0, 128], - type='', - swap='lss_kpt26'), - 39: - dict( - name='lss_kpt15', - id=39, - color=[255, 0, 128], - type='', - swap='lss_kpt25'), - 40: - dict( - name='lss_kpt16', - id=40, - color=[255, 0, 128], - type='', - swap='lss_kpt24'), - 41: - dict( - name='lss_kpt17', - id=41, - color=[255, 0, 128], - type='', - swap='lss_kpt23'), - 42: - dict( - name='lss_kpt18', - id=42, - color=[255, 0, 128], - type='', - swap='lss_kpt22'), - 43: - dict( - name='lss_kpt19', - id=43, - color=[255, 0, 128], - type='', - swap='lss_kpt21'), - 44: - dict(name='lss_kpt20', id=44, color=[255, 0, 128], type='', swap=''), - 45: - dict( - name='lss_kpt21', - id=45, - color=[255, 0, 128], - type='', - swap='lss_kpt19'), - 46: - dict( - name='lss_kpt22', - id=46, - color=[255, 0, 128], - type='', - swap='lss_kpt18'), - 47: - dict( - name='lss_kpt23', - id=47, - color=[255, 0, 128], - type='', - swap='lss_kpt17'), - 48: - dict( - name='lss_kpt24', - id=48, - color=[255, 0, 128], - type='', - swap='lss_kpt16'), - 49: - dict( - name='lss_kpt25', - id=49, - color=[255, 0, 128], - type='', - swap='lss_kpt15'), - 50: - dict( - name='lss_kpt26', - id=50, - color=[255, 0, 128], - type='', - swap='lss_kpt14'), - 51: - dict( - name='lss_kpt27', - id=51, - color=[255, 0, 128], - type='', - swap='lss_kpt13'), - 52: - dict( - name='lss_kpt28', - id=52, - color=[255, 0, 128], - type='', - swap='lss_kpt12'), - 53: - dict( - name='lss_kpt29', - id=53, - color=[255, 0, 128], - type='', - swap='lss_kpt11'), - 54: - dict( - name='lss_kpt30', - id=54, - color=[255, 0, 128], - type='', - swap='lss_kpt10'), - 55: - dict( - name='lss_kpt31', - id=55, - color=[255, 0, 128], - type='', - swap='lss_kpt9'), - 56: - dict( - name='lss_kpt32', - id=56, - color=[255, 0, 128], - type='', - swap='lss_kpt8'), - 57: - dict( - name='lss_kpt33', - id=57, - color=[255, 0, 128], - type='', - swap='lss_kpt7'), - 58: - dict(name='sso_kpt1', id=58, color=[128, 0, 255], type='', swap=''), - 59: - dict( - name='sso_kpt2', - id=59, - color=[128, 0, 255], - type='', - swap='sso_kpt26'), - 60: - dict( - name='sso_kpt3', - id=60, - color=[128, 0, 255], - type='', - swap='sso_kpt5'), - 61: - dict( - name='sso_kpt4', - id=61, - color=[128, 0, 255], - type='', - swap='sso_kpt6'), - 62: - dict( - name='sso_kpt5', - id=62, - color=[128, 0, 255], - type='', - swap='sso_kpt3'), - 63: - dict( - name='sso_kpt6', - id=63, - color=[128, 0, 255], - type='', - swap='sso_kpt4'), - 64: - dict( - name='sso_kpt7', - id=64, - color=[128, 0, 255], - type='', - swap='sso_kpt25'), - 65: - dict( - name='sso_kpt8', - id=65, - color=[128, 0, 255], - type='', - swap='sso_kpt24'), - 66: - dict( - name='sso_kpt9', - id=66, - color=[128, 0, 255], - type='', - swap='sso_kpt23'), - 67: - dict( - name='sso_kpt10', - id=67, - color=[128, 0, 255], - type='', - swap='sso_kpt22'), - 68: - dict( - name='sso_kpt11', - id=68, - color=[128, 0, 255], - type='', - swap='sso_kpt21'), - 69: - dict( - name='sso_kpt12', - id=69, - color=[128, 0, 255], - type='', - swap='sso_kpt20'), - 70: - dict( - name='sso_kpt13', - id=70, - color=[128, 0, 255], - type='', - swap='sso_kpt19'), - 71: - dict( - name='sso_kpt14', - id=71, - color=[128, 0, 255], - type='', - swap='sso_kpt18'), - 72: - dict( - name='sso_kpt15', - id=72, - color=[128, 0, 255], - type='', - swap='sso_kpt17'), - 73: - dict( - name='sso_kpt16', - id=73, - color=[128, 0, 255], - type='', - swap='sso_kpt29'), - 74: - dict( - name='sso_kpt17', - id=74, - color=[128, 0, 255], - type='', - swap='sso_kpt15'), - 75: - dict( - name='sso_kpt18', - id=75, - color=[128, 0, 255], - type='', - swap='sso_kpt14'), - 76: - dict( - name='sso_kpt19', - id=76, - color=[128, 0, 255], - type='', - swap='sso_kpt13'), - 77: - dict( - name='sso_kpt20', - id=77, - color=[128, 0, 255], - type='', - swap='sso_kpt12'), - 78: - dict( - name='sso_kpt21', - id=78, - color=[128, 0, 255], - type='', - swap='sso_kpt11'), - 79: - dict( - name='sso_kpt22', - id=79, - color=[128, 0, 255], - type='', - swap='sso_kpt10'), - 80: - dict( - name='sso_kpt23', - id=80, - color=[128, 0, 255], - type='', - swap='sso_kpt9'), - 81: - dict( - name='sso_kpt24', - id=81, - color=[128, 0, 255], - type='', - swap='sso_kpt8'), - 82: - dict( - name='sso_kpt25', - id=82, - color=[128, 0, 255], - type='', - swap='sso_kpt7'), - 83: - dict( - name='sso_kpt26', - id=83, - color=[128, 0, 255], - type='', - swap='sso_kpt2'), - 84: - dict( - name='sso_kpt27', - id=84, - color=[128, 0, 255], - type='', - swap='sso_kpt30'), - 85: - dict( - name='sso_kpt28', - id=85, - color=[128, 0, 255], - type='', - swap='sso_kpt31'), - 86: - dict( - name='sso_kpt29', - id=86, - color=[128, 0, 255], - type='', - swap='sso_kpt16'), - 87: - dict( - name='sso_kpt30', - id=87, - color=[128, 0, 255], - type='', - swap='sso_kpt27'), - 88: - dict( - name='sso_kpt31', - id=88, - color=[128, 0, 255], - type='', - swap='sso_kpt28'), - 89: - dict(name='lso_kpt1', id=89, color=[0, 128, 255], type='', swap=''), - 90: - dict( - name='lso_kpt2', - id=90, - color=[0, 128, 255], - type='', - swap='lso_kpt6'), - 91: - dict( - name='lso_kpt3', - id=91, - color=[0, 128, 255], - type='', - swap='lso_kpt5'), - 92: - dict( - name='lso_kpt4', - id=92, - color=[0, 128, 255], - type='', - swap='lso_kpt34'), - 93: - dict( - name='lso_kpt5', - id=93, - color=[0, 128, 255], - type='', - swap='lso_kpt3'), - 94: - dict( - name='lso_kpt6', - id=94, - color=[0, 128, 255], - type='', - swap='lso_kpt2'), - 95: - dict( - name='lso_kpt7', - id=95, - color=[0, 128, 255], - type='', - swap='lso_kpt33'), - 96: - dict( - name='lso_kpt8', - id=96, - color=[0, 128, 255], - type='', - swap='lso_kpt32'), - 97: - dict( - name='lso_kpt9', - id=97, - color=[0, 128, 255], - type='', - swap='lso_kpt31'), - 98: - dict( - name='lso_kpt10', - id=98, - color=[0, 128, 255], - type='', - swap='lso_kpt30'), - 99: - dict( - name='lso_kpt11', - id=99, - color=[0, 128, 255], - type='', - swap='lso_kpt29'), - 100: - dict( - name='lso_kpt12', - id=100, - color=[0, 128, 255], - type='', - swap='lso_kpt28'), - 101: - dict( - name='lso_kpt13', - id=101, - color=[0, 128, 255], - type='', - swap='lso_kpt27'), - 102: - dict( - name='lso_kpt14', - id=102, - color=[0, 128, 255], - type='', - swap='lso_kpt26'), - 103: - dict( - name='lso_kpt15', - id=103, - color=[0, 128, 255], - type='', - swap='lso_kpt25'), - 104: - dict( - name='lso_kpt16', - id=104, - color=[0, 128, 255], - type='', - swap='lso_kpt24'), - 105: - dict( - name='lso_kpt17', - id=105, - color=[0, 128, 255], - type='', - swap='lso_kpt23'), - 106: - dict( - name='lso_kpt18', - id=106, - color=[0, 128, 255], - type='', - swap='lso_kpt22'), - 107: - dict( - name='lso_kpt19', - id=107, - color=[0, 128, 255], - type='', - swap='lso_kpt21'), - 108: - dict( - name='lso_kpt20', - id=108, - color=[0, 128, 255], - type='', - swap='lso_kpt37'), - 109: - dict( - name='lso_kpt21', - id=109, - color=[0, 128, 255], - type='', - swap='lso_kpt19'), - 110: - dict( - name='lso_kpt22', - id=110, - color=[0, 128, 255], - type='', - swap='lso_kpt18'), - 111: - dict( - name='lso_kpt23', - id=111, - color=[0, 128, 255], - type='', - swap='lso_kpt17'), - 112: - dict( - name='lso_kpt24', - id=112, - color=[0, 128, 255], - type='', - swap='lso_kpt16'), - 113: - dict( - name='lso_kpt25', - id=113, - color=[0, 128, 255], - type='', - swap='lso_kpt15'), - 114: - dict( - name='lso_kpt26', - id=114, - color=[0, 128, 255], - type='', - swap='lso_kpt14'), - 115: - dict( - name='lso_kpt27', - id=115, - color=[0, 128, 255], - type='', - swap='lso_kpt13'), - 116: - dict( - name='lso_kpt28', - id=116, - color=[0, 128, 255], - type='', - swap='lso_kpt12'), - 117: - dict( - name='lso_kpt29', - id=117, - color=[0, 128, 255], - type='', - swap='lso_kpt11'), - 118: - dict( - name='lso_kpt30', - id=118, - color=[0, 128, 255], - type='', - swap='lso_kpt10'), - 119: - dict( - name='lso_kpt31', - id=119, - color=[0, 128, 255], - type='', - swap='lso_kpt9'), - 120: - dict( - name='lso_kpt32', - id=120, - color=[0, 128, 255], - type='', - swap='lso_kpt8'), - 121: - dict( - name='lso_kpt33', - id=121, - color=[0, 128, 255], - type='', - swap='lso_kpt7'), - 122: - dict( - name='lso_kpt34', - id=122, - color=[0, 128, 255], - type='', - swap='lso_kpt4'), - 123: - dict( - name='lso_kpt35', - id=123, - color=[0, 128, 255], - type='', - swap='lso_kpt38'), - 124: - dict( - name='lso_kpt36', - id=124, - color=[0, 128, 255], - type='', - swap='lso_kpt39'), - 125: - dict( - name='lso_kpt37', - id=125, - color=[0, 128, 255], - type='', - swap='lso_kpt20'), - 126: - dict( - name='lso_kpt38', - id=126, - color=[0, 128, 255], - type='', - swap='lso_kpt35'), - 127: - dict( - name='lso_kpt39', - id=127, - color=[0, 128, 255], - type='', - swap='lso_kpt36'), - 128: - dict(name='vest_kpt1', id=128, color=[0, 128, 128], type='', swap=''), - 129: - dict( - name='vest_kpt2', - id=129, - color=[0, 128, 128], - type='', - swap='vest_kpt6'), - 130: - dict( - name='vest_kpt3', - id=130, - color=[0, 128, 128], - type='', - swap='vest_kpt5'), - 131: - dict(name='vest_kpt4', id=131, color=[0, 128, 128], type='', swap=''), - 132: - dict( - name='vest_kpt5', - id=132, - color=[0, 128, 128], - type='', - swap='vest_kpt3'), - 133: - dict( - name='vest_kpt6', - id=133, - color=[0, 128, 128], - type='', - swap='vest_kpt2'), - 134: - dict( - name='vest_kpt7', - id=134, - color=[0, 128, 128], - type='', - swap='vest_kpt15'), - 135: - dict( - name='vest_kpt8', - id=135, - color=[0, 128, 128], - type='', - swap='vest_kpt14'), - 136: - dict( - name='vest_kpt9', - id=136, - color=[0, 128, 128], - type='', - swap='vest_kpt13'), - 137: - dict( - name='vest_kpt10', - id=137, - color=[0, 128, 128], - type='', - swap='vest_kpt12'), - 138: - dict(name='vest_kpt11', id=138, color=[0, 128, 128], type='', swap=''), - 139: - dict( - name='vest_kpt12', - id=139, - color=[0, 128, 128], - type='', - swap='vest_kpt10'), - 140: - dict(name='vest_kpt13', id=140, color=[0, 128, 128], type='', swap=''), - 141: - dict( - name='vest_kpt14', - id=141, - color=[0, 128, 128], - type='', - swap='vest_kpt8'), - 142: - dict( - name='vest_kpt15', - id=142, - color=[0, 128, 128], - type='', - swap='vest_kpt7'), - 143: - dict(name='sling_kpt1', id=143, color=[0, 0, 128], type='', swap=''), - 144: - dict( - name='sling_kpt2', - id=144, - color=[0, 0, 128], - type='', - swap='sling_kpt6'), - 145: - dict( - name='sling_kpt3', - id=145, - color=[0, 0, 128], - type='', - swap='sling_kpt5'), - 146: - dict(name='sling_kpt4', id=146, color=[0, 0, 128], type='', swap=''), - 147: - dict( - name='sling_kpt5', - id=147, - color=[0, 0, 128], - type='', - swap='sling_kpt3'), - 148: - dict( - name='sling_kpt6', - id=148, - color=[0, 0, 128], - type='', - swap='sling_kpt2'), - 149: - dict( - name='sling_kpt7', - id=149, - color=[0, 0, 128], - type='', - swap='sling_kpt15'), - 150: - dict( - name='sling_kpt8', - id=150, - color=[0, 0, 128], - type='', - swap='sling_kpt14'), - 151: - dict( - name='sling_kpt9', - id=151, - color=[0, 0, 128], - type='', - swap='sling_kpt13'), - 152: - dict( - name='sling_kpt10', - id=152, - color=[0, 0, 128], - type='', - swap='sling_kpt12'), - 153: - dict(name='sling_kpt11', id=153, color=[0, 0, 128], type='', swap=''), - 154: - dict( - name='sling_kpt12', - id=154, - color=[0, 0, 128], - type='', - swap='sling_kpt10'), - 155: - dict( - name='sling_kpt13', - id=155, - color=[0, 0, 128], - type='', - swap='sling_kpt9'), - 156: - dict( - name='sling_kpt14', - id=156, - color=[0, 0, 128], - type='', - swap='sling_kpt8'), - 157: - dict( - name='sling_kpt15', - id=157, - color=[0, 0, 128], - type='', - swap='sling_kpt7'), - 158: - dict( - name='shorts_kpt1', - id=158, - color=[128, 128, 128], - type='', - swap='shorts_kpt3'), - 159: - dict( - name='shorts_kpt2', - id=159, - color=[128, 128, 128], - type='', - swap=''), - 160: - dict( - name='shorts_kpt3', - id=160, - color=[128, 128, 128], - type='', - swap='shorts_kpt1'), - 161: - dict( - name='shorts_kpt4', - id=161, - color=[128, 128, 128], - type='', - swap='shorts_kpt10'), - 162: - dict( - name='shorts_kpt5', - id=162, - color=[128, 128, 128], - type='', - swap='shorts_kpt9'), - 163: - dict( - name='shorts_kpt6', - id=163, - color=[128, 128, 128], - type='', - swap='shorts_kpt8'), - 164: - dict( - name='shorts_kpt7', - id=164, - color=[128, 128, 128], - type='', - swap=''), - 165: - dict( - name='shorts_kpt8', - id=165, - color=[128, 128, 128], - type='', - swap='shorts_kpt6'), - 166: - dict( - name='shorts_kpt9', - id=166, - color=[128, 128, 128], - type='', - swap='shorts_kpt5'), - 167: - dict( - name='shorts_kpt10', - id=167, - color=[128, 128, 128], - type='', - swap='shorts_kpt4'), - 168: - dict( - name='trousers_kpt1', - id=168, - color=[128, 0, 128], - type='', - swap='trousers_kpt3'), - 169: - dict( - name='trousers_kpt2', - id=169, - color=[128, 0, 128], - type='', - swap=''), - 170: - dict( - name='trousers_kpt3', - id=170, - color=[128, 0, 128], - type='', - swap='trousers_kpt1'), - 171: - dict( - name='trousers_kpt4', - id=171, - color=[128, 0, 128], - type='', - swap='trousers_kpt14'), - 172: - dict( - name='trousers_kpt5', - id=172, - color=[128, 0, 128], - type='', - swap='trousers_kpt13'), - 173: - dict( - name='trousers_kpt6', - id=173, - color=[128, 0, 128], - type='', - swap='trousers_kpt12'), - 174: - dict( - name='trousers_kpt7', - id=174, - color=[128, 0, 128], - type='', - swap='trousers_kpt11'), - 175: - dict( - name='trousers_kpt8', - id=175, - color=[128, 0, 128], - type='', - swap='trousers_kpt10'), - 176: - dict( - name='trousers_kpt9', - id=176, - color=[128, 0, 128], - type='', - swap=''), - 177: - dict( - name='trousers_kpt10', - id=177, - color=[128, 0, 128], - type='', - swap='trousers_kpt8'), - 178: - dict( - name='trousers_kpt11', - id=178, - color=[128, 0, 128], - type='', - swap='trousers_kpt7'), - 179: - dict( - name='trousers_kpt12', - id=179, - color=[128, 0, 128], - type='', - swap='trousers_kpt6'), - 180: - dict( - name='trousers_kpt13', - id=180, - color=[128, 0, 128], - type='', - swap='trousers_kpt5'), - 181: - dict( - name='trousers_kpt14', - id=181, - color=[128, 0, 128], - type='', - swap='trousers_kpt4'), - 182: - dict( - name='skirt_kpt1', - id=182, - color=[64, 128, 128], - type='', - swap='skirt_kpt3'), - 183: - dict( - name='skirt_kpt2', id=183, color=[64, 128, 128], type='', swap=''), - 184: - dict( - name='skirt_kpt3', - id=184, - color=[64, 128, 128], - type='', - swap='skirt_kpt1'), - 185: - dict( - name='skirt_kpt4', - id=185, - color=[64, 128, 128], - type='', - swap='skirt_kpt8'), - 186: - dict( - name='skirt_kpt5', - id=186, - color=[64, 128, 128], - type='', - swap='skirt_kpt7'), - 187: - dict( - name='skirt_kpt6', id=187, color=[64, 128, 128], type='', swap=''), - 188: - dict( - name='skirt_kpt7', - id=188, - color=[64, 128, 128], - type='', - swap='skirt_kpt5'), - 189: - dict( - name='skirt_kpt8', - id=189, - color=[64, 128, 128], - type='', - swap='skirt_kpt4'), - 190: - dict(name='ssd_kpt1', id=190, color=[64, 64, 128], type='', swap=''), - 191: - dict( - name='ssd_kpt2', - id=191, - color=[64, 64, 128], - type='', - swap='ssd_kpt6'), - 192: - dict( - name='ssd_kpt3', - id=192, - color=[64, 64, 128], - type='', - swap='ssd_kpt5'), - 193: - dict(name='ssd_kpt4', id=193, color=[64, 64, 128], type='', swap=''), - 194: - dict( - name='ssd_kpt5', - id=194, - color=[64, 64, 128], - type='', - swap='ssd_kpt3'), - 195: - dict( - name='ssd_kpt6', - id=195, - color=[64, 64, 128], - type='', - swap='ssd_kpt2'), - 196: - dict( - name='ssd_kpt7', - id=196, - color=[64, 64, 128], - type='', - swap='ssd_kpt29'), - 197: - dict( - name='ssd_kpt8', - id=197, - color=[64, 64, 128], - type='', - swap='ssd_kpt28'), - 198: - dict( - name='ssd_kpt9', - id=198, - color=[64, 64, 128], - type='', - swap='ssd_kpt27'), - 199: - dict( - name='ssd_kpt10', - id=199, - color=[64, 64, 128], - type='', - swap='ssd_kpt26'), - 200: - dict( - name='ssd_kpt11', - id=200, - color=[64, 64, 128], - type='', - swap='ssd_kpt25'), - 201: - dict( - name='ssd_kpt12', - id=201, - color=[64, 64, 128], - type='', - swap='ssd_kpt24'), - 202: - dict( - name='ssd_kpt13', - id=202, - color=[64, 64, 128], - type='', - swap='ssd_kpt23'), - 203: - dict( - name='ssd_kpt14', - id=203, - color=[64, 64, 128], - type='', - swap='ssd_kpt22'), - 204: - dict( - name='ssd_kpt15', - id=204, - color=[64, 64, 128], - type='', - swap='ssd_kpt21'), - 205: - dict( - name='ssd_kpt16', - id=205, - color=[64, 64, 128], - type='', - swap='ssd_kpt20'), - 206: - dict( - name='ssd_kpt17', - id=206, - color=[64, 64, 128], - type='', - swap='ssd_kpt19'), - 207: - dict(name='ssd_kpt18', id=207, color=[64, 64, 128], type='', swap=''), - 208: - dict( - name='ssd_kpt19', - id=208, - color=[64, 64, 128], - type='', - swap='ssd_kpt17'), - 209: - dict( - name='ssd_kpt20', - id=209, - color=[64, 64, 128], - type='', - swap='ssd_kpt16'), - 210: - dict( - name='ssd_kpt21', - id=210, - color=[64, 64, 128], - type='', - swap='ssd_kpt15'), - 211: - dict( - name='ssd_kpt22', - id=211, - color=[64, 64, 128], - type='', - swap='ssd_kpt14'), - 212: - dict( - name='ssd_kpt23', - id=212, - color=[64, 64, 128], - type='', - swap='ssd_kpt13'), - 213: - dict( - name='ssd_kpt24', - id=213, - color=[64, 64, 128], - type='', - swap='ssd_kpt12'), - 214: - dict( - name='ssd_kpt25', - id=214, - color=[64, 64, 128], - type='', - swap='ssd_kpt11'), - 215: - dict( - name='ssd_kpt26', - id=215, - color=[64, 64, 128], - type='', - swap='ssd_kpt10'), - 216: - dict( - name='ssd_kpt27', - id=216, - color=[64, 64, 128], - type='', - swap='ssd_kpt9'), - 217: - dict( - name='ssd_kpt28', - id=217, - color=[64, 64, 128], - type='', - swap='ssd_kpt8'), - 218: - dict( - name='ssd_kpt29', - id=218, - color=[64, 64, 128], - type='', - swap='ssd_kpt7'), - 219: - dict(name='lsd_kpt1', id=219, color=[128, 64, 0], type='', swap=''), - 220: - dict( - name='lsd_kpt2', - id=220, - color=[128, 64, 0], - type='', - swap='lsd_kpt6'), - 221: - dict( - name='lsd_kpt3', - id=221, - color=[128, 64, 0], - type='', - swap='lsd_kpt5'), - 222: - dict(name='lsd_kpt4', id=222, color=[128, 64, 0], type='', swap=''), - 223: - dict( - name='lsd_kpt5', - id=223, - color=[128, 64, 0], - type='', - swap='lsd_kpt3'), - 224: - dict( - name='lsd_kpt6', - id=224, - color=[128, 64, 0], - type='', - swap='lsd_kpt2'), - 225: - dict( - name='lsd_kpt7', - id=225, - color=[128, 64, 0], - type='', - swap='lsd_kpt37'), - 226: - dict( - name='lsd_kpt8', - id=226, - color=[128, 64, 0], - type='', - swap='lsd_kpt36'), - 227: - dict( - name='lsd_kpt9', - id=227, - color=[128, 64, 0], - type='', - swap='lsd_kpt35'), - 228: - dict( - name='lsd_kpt10', - id=228, - color=[128, 64, 0], - type='', - swap='lsd_kpt34'), - 229: - dict( - name='lsd_kpt11', - id=229, - color=[128, 64, 0], - type='', - swap='lsd_kpt33'), - 230: - dict( - name='lsd_kpt12', - id=230, - color=[128, 64, 0], - type='', - swap='lsd_kpt32'), - 231: - dict( - name='lsd_kpt13', - id=231, - color=[128, 64, 0], - type='', - swap='lsd_kpt31'), - 232: - dict( - name='lsd_kpt14', - id=232, - color=[128, 64, 0], - type='', - swap='lsd_kpt30'), - 233: - dict( - name='lsd_kpt15', - id=233, - color=[128, 64, 0], - type='', - swap='lsd_kpt29'), - 234: - dict( - name='lsd_kpt16', - id=234, - color=[128, 64, 0], - type='', - swap='lsd_kpt28'), - 235: - dict( - name='lsd_kpt17', - id=235, - color=[128, 64, 0], - type='', - swap='lsd_kpt27'), - 236: - dict( - name='lsd_kpt18', - id=236, - color=[128, 64, 0], - type='', - swap='lsd_kpt26'), - 237: - dict( - name='lsd_kpt19', - id=237, - color=[128, 64, 0], - type='', - swap='lsd_kpt25'), - 238: - dict( - name='lsd_kpt20', - id=238, - color=[128, 64, 0], - type='', - swap='lsd_kpt24'), - 239: - dict( - name='lsd_kpt21', - id=239, - color=[128, 64, 0], - type='', - swap='lsd_kpt23'), - 240: - dict(name='lsd_kpt22', id=240, color=[128, 64, 0], type='', swap=''), - 241: - dict( - name='lsd_kpt23', - id=241, - color=[128, 64, 0], - type='', - swap='lsd_kpt21'), - 242: - dict( - name='lsd_kpt24', - id=242, - color=[128, 64, 0], - type='', - swap='lsd_kpt20'), - 243: - dict( - name='lsd_kpt25', - id=243, - color=[128, 64, 0], - type='', - swap='lsd_kpt19'), - 244: - dict( - name='lsd_kpt26', - id=244, - color=[128, 64, 0], - type='', - swap='lsd_kpt18'), - 245: - dict( - name='lsd_kpt27', - id=245, - color=[128, 64, 0], - type='', - swap='lsd_kpt17'), - 246: - dict( - name='lsd_kpt28', - id=246, - color=[128, 64, 0], - type='', - swap='lsd_kpt16'), - 247: - dict( - name='lsd_kpt29', - id=247, - color=[128, 64, 0], - type='', - swap='lsd_kpt15'), - 248: - dict( - name='lsd_kpt30', - id=248, - color=[128, 64, 0], - type='', - swap='lsd_kpt14'), - 249: - dict( - name='lsd_kpt31', - id=249, - color=[128, 64, 0], - type='', - swap='lsd_kpt13'), - 250: - dict( - name='lsd_kpt32', - id=250, - color=[128, 64, 0], - type='', - swap='lsd_kpt12'), - 251: - dict( - name='lsd_kpt33', - id=251, - color=[128, 64, 0], - type='', - swap='lsd_kpt11'), - 252: - dict( - name='lsd_kpt34', - id=252, - color=[128, 64, 0], - type='', - swap='lsd_kpt10'), - 253: - dict( - name='lsd_kpt35', - id=253, - color=[128, 64, 0], - type='', - swap='lsd_kpt9'), - 254: - dict( - name='lsd_kpt36', - id=254, - color=[128, 64, 0], - type='', - swap='lsd_kpt8'), - 255: - dict( - name='lsd_kpt37', - id=255, - color=[128, 64, 0], - type='', - swap='lsd_kpt7'), - 256: - dict(name='vd_kpt1', id=256, color=[128, 64, 255], type='', swap=''), - 257: - dict( - name='vd_kpt2', - id=257, - color=[128, 64, 255], - type='', - swap='vd_kpt6'), - 258: - dict( - name='vd_kpt3', - id=258, - color=[128, 64, 255], - type='', - swap='vd_kpt5'), - 259: - dict(name='vd_kpt4', id=259, color=[128, 64, 255], type='', swap=''), - 260: - dict( - name='vd_kpt5', - id=260, - color=[128, 64, 255], - type='', - swap='vd_kpt3'), - 261: - dict( - name='vd_kpt6', - id=261, - color=[128, 64, 255], - type='', - swap='vd_kpt2'), - 262: - dict( - name='vd_kpt7', - id=262, - color=[128, 64, 255], - type='', - swap='vd_kpt19'), - 263: - dict( - name='vd_kpt8', - id=263, - color=[128, 64, 255], - type='', - swap='vd_kpt18'), - 264: - dict( - name='vd_kpt9', - id=264, - color=[128, 64, 255], - type='', - swap='vd_kpt17'), - 265: - dict( - name='vd_kpt10', - id=265, - color=[128, 64, 255], - type='', - swap='vd_kpt16'), - 266: - dict( - name='vd_kpt11', - id=266, - color=[128, 64, 255], - type='', - swap='vd_kpt15'), - 267: - dict( - name='vd_kpt12', - id=267, - color=[128, 64, 255], - type='', - swap='vd_kpt14'), - 268: - dict(name='vd_kpt13', id=268, color=[128, 64, 255], type='', swap=''), - 269: - dict( - name='vd_kpt14', - id=269, - color=[128, 64, 255], - type='', - swap='vd_kpt12'), - 270: - dict( - name='vd_kpt15', - id=270, - color=[128, 64, 255], - type='', - swap='vd_kpt11'), - 271: - dict( - name='vd_kpt16', - id=271, - color=[128, 64, 255], - type='', - swap='vd_kpt10'), - 272: - dict( - name='vd_kpt17', - id=272, - color=[128, 64, 255], - type='', - swap='vd_kpt9'), - 273: - dict( - name='vd_kpt18', - id=273, - color=[128, 64, 255], - type='', - swap='vd_kpt8'), - 274: - dict( - name='vd_kpt19', - id=274, - color=[128, 64, 255], - type='', - swap='vd_kpt7'), - 275: - dict(name='sd_kpt1', id=275, color=[128, 64, 0], type='', swap=''), - 276: - dict( - name='sd_kpt2', - id=276, - color=[128, 64, 0], - type='', - swap='sd_kpt6'), - 277: - dict( - name='sd_kpt3', - id=277, - color=[128, 64, 0], - type='', - swap='sd_kpt5'), - 278: - dict(name='sd_kpt4', id=278, color=[128, 64, 0], type='', swap=''), - 279: - dict( - name='sd_kpt5', - id=279, - color=[128, 64, 0], - type='', - swap='sd_kpt3'), - 280: - dict( - name='sd_kpt6', - id=280, - color=[128, 64, 0], - type='', - swap='sd_kpt2'), - 281: - dict( - name='sd_kpt7', - id=281, - color=[128, 64, 0], - type='', - swap='sd_kpt19'), - 282: - dict( - name='sd_kpt8', - id=282, - color=[128, 64, 0], - type='', - swap='sd_kpt18'), - 283: - dict( - name='sd_kpt9', - id=283, - color=[128, 64, 0], - type='', - swap='sd_kpt17'), - 284: - dict( - name='sd_kpt10', - id=284, - color=[128, 64, 0], - type='', - swap='sd_kpt16'), - 285: - dict( - name='sd_kpt11', - id=285, - color=[128, 64, 0], - type='', - swap='sd_kpt15'), - 286: - dict( - name='sd_kpt12', - id=286, - color=[128, 64, 0], - type='', - swap='sd_kpt14'), - 287: - dict(name='sd_kpt13', id=287, color=[128, 64, 0], type='', swap=''), - 288: - dict( - name='sd_kpt14', - id=288, - color=[128, 64, 0], - type='', - swap='sd_kpt12'), - 289: - dict( - name='sd_kpt15', - id=289, - color=[128, 64, 0], - type='', - swap='sd_kpt11'), - 290: - dict( - name='sd_kpt16', - id=290, - color=[128, 64, 0], - type='', - swap='sd_kpt10'), - 291: - dict( - name='sd_kpt17', - id=291, - color=[128, 64, 0], - type='', - swap='sd_kpt9'), - 292: - dict( - name='sd_kpt18', - id=292, - color=[128, 64, 0], - type='', - swap='sd_kpt8'), - 293: - dict( - name='sd_kpt19', - id=293, - color=[128, 64, 0], - type='', - swap='sd_kpt7') - }), - skeleton_info=dict({ - 0: - dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]), - 1: - dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]), - 2: - dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]), - 3: - dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]), - 4: - dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]), - 5: - dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]), - 6: - dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]), - 7: - dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]), - 8: - dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]), - 9: - dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]), - 10: - dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]), - 11: - dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]), - 12: - dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]), - 13: - dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]), - 14: - dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]), - 15: - dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]), - 16: - dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]), - 17: - dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]), - 18: - dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]), - 19: - dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]), - 20: - dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]), - 21: - dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]), - 22: - dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]), - 23: - dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]), - 24: - dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]), - 25: - dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]), - 26: - dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]), - 27: - dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]), - 28: - dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]), - 29: - dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]), - 30: - dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]), - 31: - dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]), - 32: - dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]), - 33: - dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]), - 34: - dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]), - 35: - dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]), - 36: - dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]), - 37: - dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]), - 38: - dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]), - 39: - dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]), - 40: - dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]), - 41: - dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]), - 42: - dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]), - 43: - dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]), - 44: - dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]), - 45: - dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]), - 46: - dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]), - 47: - dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]), - 48: - dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]), - 49: - dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]), - 50: - dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]), - 51: - dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]), - 52: - dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]), - 53: - dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]), - 54: - dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]), - 55: - dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]), - 56: - dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]), - 57: - dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]), - 58: - dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]), - 59: - dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]), - 60: - dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]), - 61: - dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]), - 62: - dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]), - 63: - dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]), - 64: - dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]), - 65: - dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]), - 66: - dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]), - 67: - dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]), - 68: - dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]), - 69: - dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]), - 70: - dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]), - 71: - dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]), - 72: - dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]), - 73: - dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]), - 74: - dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]), - 75: - dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]), - 76: - dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]), - 77: - dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]), - 78: - dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]), - 79: - dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]), - 80: - dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]), - 81: - dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]), - 82: - dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]), - 83: - dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]), - 84: - dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]), - 85: - dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]), - 86: - dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]), - 87: - dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]), - 88: - dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]), - 89: - dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]), - 90: - dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]), - 91: - dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]), - 92: - dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]), - 93: - dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]), - 94: - dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]), - 95: - dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]), - 96: - dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]), - 97: - dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]), - 98: - dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]), - 99: - dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]), - 100: - dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]), - 101: - dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]), - 102: - dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]), - 103: - dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]), - 104: - dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]), - 105: - dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]), - 106: - dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]), - 107: - dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]), - 108: - dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]), - 109: - dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]), - 110: - dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]), - 111: - dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]), - 112: - dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]), - 113: - dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]), - 114: - dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]), - 115: - dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]), - 116: - dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]), - 117: - dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]), - 118: - dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]), - 119: - dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]), - 120: - dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]), - 121: - dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]), - 122: - dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]), - 123: - dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]), - 124: - dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]), - 125: - dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]), - 126: - dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]), - 127: - dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]), - 128: - dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]), - 129: - dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]), - 130: - dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]), - 131: - dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]), - 132: - dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]), - 133: - dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]), - 134: - dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]), - 135: - dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]), - 136: - dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]), - 137: - dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]), - 138: - dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]), - 139: - dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]), - 140: - dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]), - 141: - dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]), - 142: - dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]), - 143: - dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]), - 144: - dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]), - 145: - dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]), - 146: - dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]), - 147: - dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]), - 148: - dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]), - 149: - dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]), - 150: - dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]), - 151: - dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]), - 152: - dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]), - 153: - dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]), - 154: - dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]), - 155: - dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]), - 156: - dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]), - 157: - dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]), - 158: - dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]), - 159: - dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]), - 160: - dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]), - 161: - dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]), - 162: - dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]), - 163: - dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]), - 164: - dict( - link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128, - 128]), - 165: - dict( - link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128, - 128]), - 166: - dict( - link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128, - 128]), - 167: - dict( - link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128, - 128]), - 168: - dict( - link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128, - 128]), - 169: - dict( - link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128, - 128]), - 170: - dict( - link=('shorts_kpt9', 'shorts_kpt10'), - id=170, - color=[128, 128, 128]), - 171: - dict( - link=('shorts_kpt10', 'shorts_kpt3'), - id=171, - color=[128, 128, 128]), - 172: - dict( - link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128, - 128]), - 173: - dict( - link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128, - 128]), - 174: - dict( - link=('trousers_kpt1', 'trousers_kpt4'), - id=174, - color=[128, 0, 128]), - 175: - dict( - link=('trousers_kpt4', 'trousers_kpt5'), - id=175, - color=[128, 0, 128]), - 176: - dict( - link=('trousers_kpt5', 'trousers_kpt6'), - id=176, - color=[128, 0, 128]), - 177: - dict( - link=('trousers_kpt6', 'trousers_kpt7'), - id=177, - color=[128, 0, 128]), - 178: - dict( - link=('trousers_kpt7', 'trousers_kpt8'), - id=178, - color=[128, 0, 128]), - 179: - dict( - link=('trousers_kpt8', 'trousers_kpt9'), - id=179, - color=[128, 0, 128]), - 180: - dict( - link=('trousers_kpt9', 'trousers_kpt10'), - id=180, - color=[128, 0, 128]), - 181: - dict( - link=('trousers_kpt10', 'trousers_kpt11'), - id=181, - color=[128, 0, 128]), - 182: - dict( - link=('trousers_kpt11', 'trousers_kpt12'), - id=182, - color=[128, 0, 128]), - 183: - dict( - link=('trousers_kpt12', 'trousers_kpt13'), - id=183, - color=[128, 0, 128]), - 184: - dict( - link=('trousers_kpt13', 'trousers_kpt14'), - id=184, - color=[128, 0, 128]), - 185: - dict( - link=('trousers_kpt14', 'trousers_kpt3'), - id=185, - color=[128, 0, 128]), - 186: - dict( - link=('trousers_kpt3', 'trousers_kpt2'), - id=186, - color=[128, 0, 128]), - 187: - dict( - link=('trousers_kpt2', 'trousers_kpt1'), - id=187, - color=[128, 0, 128]), - 188: - dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]), - 189: - dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]), - 190: - dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]), - 191: - dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]), - 192: - dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]), - 193: - dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]), - 194: - dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]), - 195: - dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]), - 196: - dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]), - 197: - dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]), - 198: - dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]), - 199: - dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]), - 200: - dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]), - 201: - dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]), - 202: - dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]), - 203: - dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]), - 204: - dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]), - 205: - dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]), - 206: - dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]), - 207: - dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]), - 208: - dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]), - 209: - dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]), - 210: - dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]), - 211: - dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]), - 212: - dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]), - 213: - dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]), - 214: - dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]), - 215: - dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]), - 216: - dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]), - 217: - dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]), - 218: - dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]), - 219: - dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]), - 220: - dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]), - 221: - dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]), - 222: - dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]), - 223: - dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]), - 224: - dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]), - 225: - dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]), - 226: - dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]), - 227: - dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]), - 228: - dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]), - 229: - dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]), - 230: - dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]), - 231: - dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]), - 232: - dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]), - 233: - dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]), - 234: - dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]), - 235: - dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]), - 236: - dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]), - 237: - dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]), - 238: - dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]), - 239: - dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]), - 240: - dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]), - 241: - dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]), - 242: - dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]), - 243: - dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]), - 244: - dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]), - 245: - dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]), - 246: - dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]), - 247: - dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]), - 248: - dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]), - 249: - dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]), - 250: - dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]), - 251: - dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]), - 252: - dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]), - 253: - dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]), - 254: - dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]), - 255: - dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]), - 256: - dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]), - 257: - dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]), - 258: - dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]), - 259: - dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]), - 260: - dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]), - 261: - dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]), - 262: - dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]), - 263: - dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]), - 264: - dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]), - 265: - dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]), - 266: - dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]), - 267: - dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]), - 268: - dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]), - 269: - dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]), - 270: - dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]), - 271: - dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]), - 272: - dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]), - 273: - dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]), - 274: - dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]), - 275: - dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]), - 276: - dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]), - 277: - dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]), - 278: - dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]), - 279: - dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]), - 280: - dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]), - 281: - dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]), - 282: - dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]), - 283: - dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]), - 284: - dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]), - 285: - dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]), - 286: - dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]), - 287: - dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]), - 288: - dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]), - 289: - dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]), - 290: - dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]), - 291: - dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]), - 292: - dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]), - 293: - dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]), - 294: - dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]), - 295: - dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]), - 296: - dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]), - 297: - dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]), - 298: - dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]), - 299: - dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]), - 300: - dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]), - 301: - dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]), - 302: - dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]), - 303: - dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0]) - }), - joint_weights=[ - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 - ], - sigmas=[]) -param_scheduler = [ - dict( - type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False), - dict( - type='MultiStepLR', - begin=0, - end=150, - milestones=[100, 130], - gamma=0.1, - by_epoch=True) -] -optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) -auto_scale_lr = dict(base_batch_size=512) -dataset_type = 'DeepFashion2Dataset' -data_mode = 'topdown' -data_root = 'data/deepfashion2/' -codec = dict( - type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2) -train_pipeline = [ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=(192, 256)), - dict( - type='GenerateTarget', - encoder=dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2)), - dict(type='PackPoseInputs') -] -val_pipeline = [ - dict(type='LoadImage', backend_args=dict(backend='local')), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=(192, 256)), - dict(type='PackPoseInputs') -] -train_dataloader = dict( - batch_size=64, - num_workers=6, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='DeepFashion2Dataset', - data_root='data/deepfashion2/', - data_mode='topdown', - ann_file='train/deepfashion2_vest_dress.json', - data_prefix=dict(img='train/image/'), - pipeline=[ - dict(type='LoadImage'), - dict(type='GetBBoxCenterScale'), - dict(type='RandomFlip', direction='horizontal'), - dict( - type='RandomBBoxTransform', - shift_prob=0, - rotate_factor=60, - scale_factor=(0.75, 1.25)), - dict(type='TopdownAffine', input_size=(192, 256)), - dict( - type='GenerateTarget', - encoder=dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2)), - dict(type='PackPoseInputs') - ])) -val_dataloader = dict( - batch_size=32, - num_workers=6, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type='DeepFashion2Dataset', - data_root='data/deepfashion2/', - data_mode='topdown', - ann_file='validation/deepfashion2_vest_dress.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=[ - dict(type='LoadImage', backend_args=dict(backend='local')), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=(192, 256)), - dict(type='PackPoseInputs') - ])) -test_dataloader = dict( - batch_size=32, - num_workers=6, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type='DeepFashion2Dataset', - data_root='data/deepfashion2/', - data_mode='topdown', - ann_file='validation/deepfashion2_vest_dress.json', - data_prefix=dict(img='validation/image/'), - test_mode=True, - pipeline=[ - dict(type='LoadImage', backend_args=dict(backend='local')), - dict(type='GetBBoxCenterScale'), - dict(type='TopdownAffine', input_size=(192, 256)), - dict(type='PackPoseInputs') - ])) -channel_cfg = dict( - num_output_channels=294, - dataset_joints=294, - dataset_channel=[[ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, - 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, - 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, - 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, - 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, - 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, - 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, - 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, - 290, 291, 292, 293 - ]], - inference_channel=[ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, - 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, - 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, - 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, - 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, - 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, - 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, - 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, - 290, 291, 292, 293 - ]) -model = dict( - type='TopdownPoseEstimator', - data_preprocessor=dict( - type='PoseDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True), - backbone=dict( - type='ResNet', - depth=50, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - head=dict( - type='HeatmapHead', - in_channels=2048, - out_channels=294, - loss=dict(type='KeypointMSELoss', use_target_weight=True), - decoder=dict( - type='MSRAHeatmap', - input_size=(192, 256), - heatmap_size=(48, 64), - sigma=2)), - test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True)) -val_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -test_evaluator = [ - dict(type='PCKAccuracy', thr=0.2), - dict(type='AUC'), - dict(type='EPE') -] -launcher = 'pytorch' -work_dir = './work_dirs/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192' diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/datasets/pipelines/auto_aug.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/datasets/pipelines/auto_aug.py deleted file mode 100644 index 5a10f7eec61ea40336698118342939470f73d052..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/datasets/pipelines/auto_aug.py +++ /dev/null @@ -1,96 +0,0 @@ -# Policy for ImageNet, refers to -# https://github.com/DeepVoltaire/AutoAugment/blame/master/autoaugment.py -policy_imagenet = [ - [ - dict(type='Posterize', bits=4, prob=0.4), - dict(type='Rotate', angle=30., prob=0.6) - ], - [ - dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), - dict(type='AutoContrast', prob=0.6) - ], - [dict(type='Equalize', prob=0.8), - dict(type='Equalize', prob=0.6)], - [ - dict(type='Posterize', bits=5, prob=0.6), - dict(type='Posterize', bits=5, prob=0.6) - ], - [ - dict(type='Equalize', prob=0.4), - dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) - ], - [ - dict(type='Equalize', prob=0.4), - dict(type='Rotate', angle=30 / 9 * 8, prob=0.8) - ], - [ - dict(type='Solarize', thr=256 / 9 * 6, prob=0.6), - dict(type='Equalize', prob=0.6) - ], - [dict(type='Posterize', bits=6, prob=0.8), - dict(type='Equalize', prob=1.)], - [ - dict(type='Rotate', angle=10., prob=0.2), - dict(type='Solarize', thr=256 / 9, prob=0.6) - ], - [ - dict(type='Equalize', prob=0.6), - dict(type='Posterize', bits=5, prob=0.4) - ], - [ - dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), - dict(type='ColorTransform', magnitude=0., prob=0.4) - ], - [ - dict(type='Rotate', angle=30., prob=0.4), - dict(type='Equalize', prob=0.6) - ], - [dict(type='Equalize', prob=0.0), - dict(type='Equalize', prob=0.8)], - [dict(type='Invert', prob=0.6), - dict(type='Equalize', prob=1.)], - [ - dict(type='ColorTransform', magnitude=0.4, prob=0.6), - dict(type='Contrast', magnitude=0.8, prob=1.) - ], - [ - dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), - dict(type='ColorTransform', magnitude=0.2, prob=1.) - ], - [ - dict(type='ColorTransform', magnitude=0.8, prob=0.8), - dict(type='Solarize', thr=256 / 9 * 2, prob=0.8) - ], - [ - dict(type='Sharpness', magnitude=0.7, prob=0.4), - dict(type='Invert', prob=0.6) - ], - [ - dict( - type='Shear', - magnitude=0.3 / 9 * 5, - prob=0.6, - direction='horizontal'), - dict(type='Equalize', prob=1.) - ], - [ - dict(type='ColorTransform', magnitude=0., prob=0.4), - dict(type='Equalize', prob=0.6) - ], - [ - dict(type='Equalize', prob=0.4), - dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) - ], - [ - dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), - dict(type='AutoContrast', prob=0.6) - ], - [dict(type='Invert', prob=0.6), - dict(type='Equalize', prob=1.)], - [ - dict(type='ColorTransform', magnitude=0.4, prob=0.6), - dict(type='Contrast', magnitude=0.8, prob=1.) - ], - [dict(type='Equalize', prob=0.8), - dict(type='Equalize', prob=0.6)], -] diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/ConfirmDialog.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/ConfirmDialog.js deleted file mode 100644 index b11e24cc8d29f49a76d4f4f670a8d487fccd8ddd..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/ConfirmDialog.js +++ /dev/null @@ -1,103 +0,0 @@ -import Dialog from '../dialog/Dialog.js'; -import Methods from './methods/Methods.js'; -import RegisterEvents from './methods/RegisterEvents.js'; -import DeepClone from '../../../plugins/utils/object/DeepClone.js'; -import CreateBackground from '../utils/build/CreateBackground.js'; -import CreateLabel from '../utils/build/CreateLabel.js'; -import CreateContent from './methods/CreateContent.js'; -import SetValue from '../../../plugins/utils/object/SetValue.js'; -import HasValue from '../../../plugins/utils/object/HasValue.js'; -import TextArea from '../textarea/TextArea.js'; - -const GetValue = Phaser.Utils.Objects.GetValue; - -class ConfirmDialog extends Dialog { - constructor(scene, config, creators) { - config = (config) ? DeepClone(config) : {}; - - if (creators === undefined) { - creators = {}; - } - - var createBackground = GetValue(creators, 'background', CreateBackground); - if (createBackground) { - config.background = createBackground(scene, config.background); - } else { - delete config.background; - } - - config.title = CreateLabel(scene, config.title, creators.title); - - config.content = CreateContent(scene, config.content, creators.content); - if (config.content instanceof TextArea) { - if (HasValue(config, 'height') && !HasValue(config, 'proportion.content')) { - SetValue(config, 'proportion.content', 1); - } - } - - var defaultButtonConfig = config.button; - var buttonAConfig = config.buttonA || defaultButtonConfig; - var buttonBConfig = config.buttonB || defaultButtonConfig; - var buttonMode = config.buttonMode; - if (buttonMode === undefined) { - buttonMode = (!!buttonAConfig && !!buttonBConfig) ? 2 : - (!!buttonAConfig) ? 1 : - 0; - } - - var defaultButtonCreator = creators.button; - var buttonACreators = creators.buttonA || defaultButtonCreator; - var buttonBCreators = creators.buttonB || defaultButtonCreator; - switch (buttonMode) { - case 2: - config.actions = [ - CreateLabel(scene, buttonAConfig, buttonACreators), - CreateLabel(scene, buttonBConfig, buttonBCreators), - ] - break; - - case 1: - config.actions = [ - CreateLabel(scene, buttonAConfig, buttonACreators), - ] - break; - - case 0: - break; - - default: - config.actions = []; - break; - } - - var defaultChoiceConfig = config.choice; - if (defaultChoiceConfig) { - config.choices = []; - } - - super(scene, config); - this.type = 'rexConfirmDialog'; - - this.buttonMode = buttonMode; - - this.defaultActionConfig = defaultButtonConfig; - this.defaultActionButtonCreator = defaultButtonCreator; - - this.defaultChoiceConfig = defaultChoiceConfig; - this.defaultChoiceCreator = creators.choice; - - var buttons = this.childrenMap.actions; - this.addChildrenMap('buttonA', (buttons) ? buttons[0] : null); - this.addChildrenMap('buttonB', (buttons) ? buttons[1] : null); - - // Interactive - RegisterEvents.call(this); - } -} - -Object.assign( - ConfirmDialog.prototype, - Methods -) - -export default ConfirmDialog; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/InsertEmptyColumn.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/InsertEmptyColumn.js deleted file mode 100644 index 637859743ad78148726bc62380a877fcbcfd49ce..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/InsertEmptyColumn.js +++ /dev/null @@ -1,34 +0,0 @@ -var InsertEmptyColumn = function (colIndex, proportion, space) { - if (proportion === undefined) { - proportion = this.columnProportions[0] || 0; - } - if (space === undefined) { - space = this.space.column[0] || 0; - } - - this.columnCount += 1; - this.gridCount += this.rowCount; - - for (var i = this.rowCount - 1; i >= 0; i--) { - var insertIndex = (i * this.columnCount) + colIndex; - this.sizerChildren.splice(insertIndex, 0, null); - } - - this.columnProportions.push(proportion); - - this.columnWidth.length += 1; // this.columnWidth will be recalculated when layout() - - this.space.column.splice(colIndex, 0, space); - - return this; -} - -var AddEmptyColumn = function (proportion, space) { - InsertEmptyColumn.call(this, this.columnCount, proportion, space); - return this; -} - -export { - InsertEmptyColumn, - AddEmptyColumn -}; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/defaultcallbacks/MoveCallbacks.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/defaultcallbacks/MoveCallbacks.js deleted file mode 100644 index 7039b23f19a73f416c96e664823291fcfef9fbaa..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/defaultcallbacks/MoveCallbacks.js +++ /dev/null @@ -1,12 +0,0 @@ -var GetCallback = function (duration, ease) { - return function (child, key, sides, reset) { - if (key !== 'panel') { - sides.moveChild(child, ((reset) ? 0 : duration), ease); - } - } -} - -export default { - show: GetCallback, - hide: GetCallback -} \ No newline at end of file diff --git a/spaces/AlexKorGKLT/webui-cpua/README.md b/spaces/AlexKorGKLT/webui-cpua/README.md deleted file mode 100644 index ca11d960cbc8aed3be295d3c44c513a539b1fbe1..0000000000000000000000000000000000000000 --- a/spaces/AlexKorGKLT/webui-cpua/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Stable Diffusion Webui on Cpu -emoji: 🏃 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -python_version: 3.10.6 -duplicated_from: DMTuit/webui-cpu ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AlexWang/lama/models/ade20k/utils.py b/spaces/AlexWang/lama/models/ade20k/utils.py deleted file mode 100644 index f337db7db54c82be041698d694e1403e8918c4c0..0000000000000000000000000000000000000000 --- a/spaces/AlexWang/lama/models/ade20k/utils.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Modified from https://github.com/CSAILVision/semantic-segmentation-pytorch""" - -import os -import sys - -import numpy as np -import torch - -try: - from urllib import urlretrieve -except ImportError: - from urllib.request import urlretrieve - - -def load_url(url, model_dir='./pretrained', map_location=None): - if not os.path.exists(model_dir): - os.makedirs(model_dir) - filename = url.split('/')[-1] - cached_file = os.path.join(model_dir, filename) - if not os.path.exists(cached_file): - sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) - urlretrieve(url, cached_file) - return torch.load(cached_file, map_location=map_location) - - -def color_encode(labelmap, colors, mode='RGB'): - labelmap = labelmap.astype('int') - labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3), - dtype=np.uint8) - for label in np.unique(labelmap): - if label < 0: - continue - labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \ - np.tile(colors[label], - (labelmap.shape[0], labelmap.shape[1], 1)) - - if mode == 'BGR': - return labelmap_rgb[:, :, ::-1] - else: - return labelmap_rgb diff --git a/spaces/AlexZou/Deploy_Restoration/app.py b/spaces/AlexZou/Deploy_Restoration/app.py deleted file mode 100644 index 59a5865dd640dc9be32dc0ec370ebdd34bf20acb..0000000000000000000000000000000000000000 --- a/spaces/AlexZou/Deploy_Restoration/app.py +++ /dev/null @@ -1,69 +0,0 @@ -import gradio as gr -import os - -def inference(image, task): - if not os.path.exists('tmp'): - os.system('mkdir tmp') - image.save("tmp/lq_image.png", "PNG") - - if task == 'Dehazing': - os.system("python Dehazing.py --test_path ./tmp/lq_image.png --save_path ./tmp/ --pk_path model_zoo/Haze4k.tjm") - - if task == 'LLIE': - os.system("python Lowlight.py --test_path ./tmp/lq_image.png --save_path ./tmp/ --pk_path model_zoo/Lowlight.pth") - - if task == 'SuperResolutionx2': - os.system("python SuperResolution.py --test_path ./tmp/lq_image.png --save_path ./tmp/ --pk_path model_zoo/SRx2.pth --scale 2") - - if task == 'SuperResolutionx3': - os.system("python SuperResolution.py --test_path ./tmp/lq_image.png --save_path ./tmp/ --pk_path model_zoo/SRx3.pth --scale 3") - - if task == 'SuperResolutionx4': - os.system("python SuperResolution.py --test_path ./tmp/lq_image.png --save_path ./tmp/ --pk_path model_zoo/SRx4.pth --scale 4") - - if task == 'Underwater': - os.system("python Underwater.py --test_path ./tmp/lq_image.png --save_path ./tmp/ --pk_path model_zoo/underwater.pth") - - return 'tmp/output.png' - -title = "基于光学传播建模的复杂水下成像智能复原方法
    高红霞
    华南理工大学
    " -description = " 简介:项目组结合多年来关于基于光学成像建模的图像复原方法的理论研究基础,从光的物理传播建模出发,开展如下研究:建立描述复杂水体中介质的吸收、前后散射等主要成像退化特征的光学成像模型;基于光学成像模型和低秩约束实现自适应智能复原;以计算机视觉理论和光学成像模型为基础,实现消除多种退化效应的深度学习复原模型。在前期的图像复原理论研究的基础上,后续展开在雾天、低照度、低分辨率等场景下的传统与深度学习复原方法的研究。" -article = "相关成果:
    [1]Ye Cai, Hongxia Gao*, Shicheng Niu, Tian Qi. A multi-stage restoration method for degraded images with light scattering and absorption. Proceeding of 26th International Conference on Pattern Recognition (ICPR 2022).
    [2]Ye Cai, Lan Luo, Hongxia Gao*, Shicheng Niu, Weipeng Yang , Tian Qi, Guoheng Liang. Haze Removal Using a Hybrid Convolutional Sparse Representation Model. The 14th International Conference on Digital Image Processing (ICDIP).
    [3] Hongxia Gao, Zhanhong Chen, Binyang Huang*, Jiahe Chen, Zhifu Li. Image Super Resolution Based on Conditional Generative Adversarial Network. IET Image Processing(SCI三区),2020, 14(13): 3006-3013.(SCI收录- 000595800300006).
    [4] Weipeng Yang, Hongxia Gao, Shasha Huang, Shicheng Niu, Hongsheng Chen, Guoheng Liang. Low-light image enhancement under mixed noise model with Tensor Representation. CAAI International Conference on Artificial Intelligence(CICAI)." -#description = "Gradio demo for NAFNet: Nonlinear Activation Free Network for Image Restoration. NAFNet achieves state-of-the-art performance on three tasks: image denoising, image debluring and stereo image super-resolution (SR). See the paper and project page for detailed results below. Here, we provide a demo for image denoise and deblur. To use it, simply upload your image, or click one of the examples to load them. Inference needs some time since this demo uses CPU." -#article = "

    Simple Baselines for Image Restoration | NAFSSR: Stereo Image Super-Resolution Using NAFNet | Github Repo

    " - - -examples = [['demo/underwater.jpg', 'Underwater'], - ['demo/low.jpg', 'LLIE'], - ['demo/dehaze.jpg', 'Dehazing'], - ['demo/sr.png', 'SuperResolutionx2']] - - -#examples = [['demo/low.jpg', 'LLIE']] - -''' -iface = gr.Interface( - inference, - [gr.inputs.Image(type="pil", label="Input"), - gr.inputs.Radio(["LLIE"], default="LLIE", label='task'),], - gr.outputs.Image(type="file", label="Output"), - title=title, - description=description, - article=article, - enable_queue=True, - examples=examples - ) -iface.launch(debug=True,enable_queue=True) -''' -iface = gr.Interface( - inference, - [gr.inputs.Image(type="pil", label="Input"), - gr.inputs.Radio(["Underwater", "LLIE", "Dehazing", "SuperResolutionx2", "SuperResolutionx3", "SuperResolutionx4"], default="Underwater", label='task'),], - gr.outputs.Image(type="file", label="Output"), - title=title, - description=description, - article=article, - enable_queue=True, - examples=examples - ) -iface.launch(debug=True,enable_queue=True) diff --git a/spaces/Amrrs/QR-code-AI-art-generator/app.py b/spaces/Amrrs/QR-code-AI-art-generator/app.py deleted file mode 100644 index 80b5d6e014678531bf636f154e642df4c9cb3631..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/QR-code-AI-art-generator/app.py +++ /dev/null @@ -1,285 +0,0 @@ -import torch -import gradio as gr -from PIL import Image -import qrcode -from pathlib import Path -from multiprocessing import cpu_count -import requests -import io -import os -from PIL import Image - -from diffusers import ( - StableDiffusionPipeline, - StableDiffusionControlNetImg2ImgPipeline, - ControlNetModel, - DDIMScheduler, - DPMSolverMultistepScheduler, - DEISMultistepScheduler, - HeunDiscreteScheduler, - EulerDiscreteScheduler, -) - -qrcode_generator = qrcode.QRCode( - version=1, - error_correction=qrcode.ERROR_CORRECT_H, - box_size=10, - border=4, -) - -controlnet = ControlNetModel.from_pretrained( - "DionTimmer/controlnet_qrcode-control_v1p_sd15", torch_dtype=torch.float16 -) - -pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", - controlnet=controlnet, - safety_checker=None, - torch_dtype=torch.float16, -).to("cuda") -pipe.enable_xformers_memory_efficient_attention() - - -def resize_for_condition_image(input_image: Image.Image, resolution: int): - input_image = input_image.convert("RGB") - W, H = input_image.size - k = float(resolution) / min(H, W) - H *= k - W *= k - H = int(round(H / 64.0)) * 64 - W = int(round(W / 64.0)) * 64 - img = input_image.resize((W, H), resample=Image.LANCZOS) - return img - - -SAMPLER_MAP = { - "DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True, algorithm_type="sde-dpmsolver++"), - "DPM++ Karras": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True), - "Heun": lambda config: HeunDiscreteScheduler.from_config(config), - "Euler": lambda config: EulerDiscreteScheduler.from_config(config), - "DDIM": lambda config: DDIMScheduler.from_config(config), - "DEIS": lambda config: DEISMultistepScheduler.from_config(config), -} - - -def inference( - qr_code_content: str, - prompt: str, - negative_prompt: str, - guidance_scale: float = 10.0, - controlnet_conditioning_scale: float = 2.0, - strength: float = 0.8, - seed: int = -1, - init_image: Image.Image | None = None, - qrcode_image: Image.Image | None = None, - use_qr_code_as_init_image = True, - sampler = "DPM++ Karras SDE", -): - if prompt is None or prompt == "": - raise gr.Error("Prompt is required") - - if qrcode_image is None and qr_code_content == "": - raise gr.Error("QR Code Image or QR Code Content is required") - - pipe.scheduler = SAMPLER_MAP[sampler](pipe.scheduler.config) - - generator = torch.manual_seed(seed) if seed != -1 else torch.Generator() - - if qr_code_content != "" or qrcode_image.size == (1, 1): - print("Generating QR Code from content") - qr = qrcode.QRCode( - version=1, - error_correction=qrcode.constants.ERROR_CORRECT_H, - box_size=10, - border=4, - ) - qr.add_data(qr_code_content) - qr.make(fit=True) - - qrcode_image = qr.make_image(fill_color="black", back_color="white") - qrcode_image = resize_for_condition_image(qrcode_image, 768) - else: - print("Using QR Code Image") - qrcode_image = resize_for_condition_image(qrcode_image, 768) - - # hack due to gradio examples - init_image = qrcode_image - - out = pipe( - prompt=prompt, - negative_prompt=negative_prompt, - image=qrcode_image, - control_image=qrcode_image, # type: ignore - width=768, # type: ignore - height=768, # type: ignore - guidance_scale=float(guidance_scale), - controlnet_conditioning_scale=float(controlnet_conditioning_scale), # type: ignore - generator=generator, - strength=float(strength), - num_inference_steps=40, - ) - return out.images[0] # type: ignore - - -with gr.Blocks() as blocks: - gr.Markdown( - """ -# QR Code AI Art Generator - -## 💡 How to generate beautiful QR codes - -We use the QR code image as the initial image **and** the control image, which allows you to generate -QR Codes that blend in **very naturally** with your provided prompt. -The strength parameter defines how much noise is added to your QR code and the noisy QR code is then guided towards both your prompt and the QR code image via Controlnet. -Use a high strength value between 0.8 and 0.95 and choose a conditioning scale between 0.6 and 2.0. -This mode arguably achieves the asthetically most appealing QR code images, but also requires more tuning of the controlnet conditioning scale and the strength value. If the generated image -looks way to much like the original QR code, make sure to gently increase the *strength* value and reduce the *conditioning* scale. Also check out the examples below. - -model: https://huggingface.co/DionTimmer/controlnet_qrcode-control_v1p_sd15 - - -Duplicate Space for no queue on your own hardware.

    - """ - ) - - with gr.Row(): - with gr.Column(): - qr_code_content = gr.Textbox( - label="QR Code Content", - info="QR Code Content or URL", - value="", - ) - with gr.Accordion(label="QR Code Image (Optional)", open=False): - qr_code_image = gr.Image( - label="QR Code Image (Optional). Leave blank to automatically generate QR code", - type="pil", - ) - - prompt = gr.Textbox( - label="Prompt", - info="Prompt that guides the generation towards", - ) - negative_prompt = gr.Textbox( - label="Negative Prompt", - value="ugly, disfigured, low quality, blurry, nsfw", - ) - use_qr_code_as_init_image = gr.Checkbox(label="Use QR code as init image", value=True, interactive=False, info="Whether init image should be QR code. Unclick to pass init image or generate init image with Stable Diffusion 2.1") - - with gr.Accordion(label="Init Images (Optional)", open=False, visible=False) as init_image_acc: - init_image = gr.Image(label="Init Image (Optional). Leave blank to generate image with SD 2.1", type="pil") - - - with gr.Accordion( - label="Params: The generated QR Code functionality is largely influenced by the parameters detailed below", - open=True, - ): - controlnet_conditioning_scale = gr.Slider( - minimum=0.0, - maximum=5.0, - step=0.01, - value=1.1, - label="Controlnet Conditioning Scale", - ) - strength = gr.Slider( - minimum=0.0, maximum=1.0, step=0.01, value=0.9, label="Strength" - ) - guidance_scale = gr.Slider( - minimum=0.0, - maximum=50.0, - step=0.25, - value=7.5, - label="Guidance Scale", - ) - sampler = gr.Dropdown(choices=list(SAMPLER_MAP.keys()), value="DPM++ Karras SDE") - seed = gr.Slider( - minimum=-1, - maximum=9999999999, - step=1, - value=2313123, - label="Seed", - randomize=True, - ) - with gr.Row(): - run_btn = gr.Button("Run") - with gr.Column(): - result_image = gr.Image(label="Result Image") - run_btn.click( - inference, - inputs=[ - qr_code_content, - prompt, - negative_prompt, - guidance_scale, - controlnet_conditioning_scale, - strength, - seed, - init_image, - qr_code_image, - use_qr_code_as_init_image, - sampler, - ], - outputs=[result_image], - ) - - gr.Examples( - examples=[ - [ - "https://huggingface.co/", - "A sky view of a colorful lakes and rivers flowing through the desert", - "ugly, disfigured, low quality, blurry, nsfw", - 7.5, - 1.3, - 0.9, - 5392011833, - None, - None, - True, - "DPM++ Karras SDE", - ], - [ - "https://huggingface.co/", - "Bright sunshine coming through the cracks of a wet, cave wall of big rocks", - "ugly, disfigured, low quality, blurry, nsfw", - 7.5, - 1.11, - 0.9, - 2523992465, - None, - None, - True, - "DPM++ Karras SDE", - ], - [ - "https://huggingface.co/", - "Sky view of highly aesthetic, ancient greek thermal baths in beautiful nature", - "ugly, disfigured, low quality, blurry, nsfw", - 7.5, - 1.5, - 0.9, - 2523992465, - None, - None, - True, - "DPM++ Karras SDE", - ], - ], - fn=inference, - inputs=[ - qr_code_content, - prompt, - negative_prompt, - guidance_scale, - controlnet_conditioning_scale, - strength, - seed, - init_image, - qr_code_image, - use_qr_code_as_init_image, - sampler, - ], - outputs=[result_image], - cache_examples=True, - ) - -blocks.queue(concurrency_count=1, max_size=20) -blocks.launch(share=bool(os.environ.get("SHARE", False))) diff --git a/spaces/Andres99/Tune-A-Video-Training-UI/app_inference.py b/spaces/Andres99/Tune-A-Video-Training-UI/app_inference.py deleted file mode 100644 index d705504e5bc7a8938e1b5fcfb207f4cb731c866b..0000000000000000000000000000000000000000 --- a/spaces/Andres99/Tune-A-Video-Training-UI/app_inference.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import enum - -import gradio as gr -from huggingface_hub import HfApi - -from constants import MODEL_LIBRARY_ORG_NAME, UploadTarget -from inference import InferencePipeline -from utils import find_exp_dirs - - -class ModelSource(enum.Enum): - HUB_LIB = UploadTarget.MODEL_LIBRARY.value - LOCAL = 'Local' - - -class InferenceUtil: - def __init__(self, hf_token: str | None): - self.hf_token = hf_token - - def load_hub_model_list(self) -> dict: - api = HfApi(token=self.hf_token) - choices = [ - info.modelId - for info in api.list_models(author=MODEL_LIBRARY_ORG_NAME) - ] - return gr.update(choices=choices, - value=choices[0] if choices else None) - - @staticmethod - def load_local_model_list() -> dict: - choices = find_exp_dirs() - return gr.update(choices=choices, - value=choices[0] if choices else None) - - def reload_model_list(self, model_source: str) -> dict: - if model_source == ModelSource.HUB_LIB.value: - return self.load_hub_model_list() - elif model_source == ModelSource.LOCAL.value: - return self.load_local_model_list() - else: - raise ValueError - - def load_model_info(self, model_id: str) -> tuple[str, str]: - try: - card = InferencePipeline.get_model_card(model_id, self.hf_token) - except Exception: - return '', '' - base_model = getattr(card.data, 'base_model', '') - training_prompt = getattr(card.data, 'training_prompt', '') - return base_model, training_prompt - - def reload_model_list_and_update_model_info( - self, model_source: str) -> tuple[dict, str, str]: - model_list_update = self.reload_model_list(model_source) - model_list = model_list_update['choices'] - model_info = self.load_model_info(model_list[0] if model_list else '') - return model_list_update, *model_info - - -def create_inference_demo(pipe: InferencePipeline, - hf_token: str | None = None) -> gr.Blocks: - app = InferenceUtil(hf_token) - - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - with gr.Box(): - model_source = gr.Radio( - label='Model Source', - choices=[_.value for _ in ModelSource], - value=ModelSource.HUB_LIB.value) - reload_button = gr.Button('Reload Model List') - model_id = gr.Dropdown(label='Model ID', - choices=None, - value=None) - with gr.Accordion( - label= - 'Model info (Base model and prompt used for training)', - open=False): - with gr.Row(): - base_model_used_for_training = gr.Text( - label='Base model', interactive=False) - prompt_used_for_training = gr.Text( - label='Training prompt', interactive=False) - prompt = gr.Textbox( - label='Prompt', - max_lines=1, - placeholder='Example: "A panda is surfing"') - video_length = gr.Slider(label='Video length', - minimum=4, - maximum=12, - step=1, - value=8) - fps = gr.Slider(label='FPS', - minimum=1, - maximum=12, - step=1, - value=1) - seed = gr.Slider(label='Seed', - minimum=0, - maximum=100000, - step=1, - value=0) - with gr.Accordion('Other Parameters', open=False): - num_steps = gr.Slider(label='Number of Steps', - minimum=0, - maximum=100, - step=1, - value=50) - guidance_scale = gr.Slider(label='CFG Scale', - minimum=0, - maximum=50, - step=0.1, - value=7.5) - - run_button = gr.Button('Generate') - - gr.Markdown(''' - - After training, you can press "Reload Model List" button to load your trained model names. - - It takes a few minutes to download model first. - - Expected time to generate an 8-frame video: 70 seconds with T4, 24 seconds with A10G, (10 seconds with A100) - ''') - with gr.Column(): - result = gr.Video(label='Result') - - model_source.change(fn=app.reload_model_list_and_update_model_info, - inputs=model_source, - outputs=[ - model_id, - base_model_used_for_training, - prompt_used_for_training, - ]) - reload_button.click(fn=app.reload_model_list_and_update_model_info, - inputs=model_source, - outputs=[ - model_id, - base_model_used_for_training, - prompt_used_for_training, - ]) - model_id.change(fn=app.load_model_info, - inputs=model_id, - outputs=[ - base_model_used_for_training, - prompt_used_for_training, - ]) - inputs = [ - model_id, - prompt, - video_length, - fps, - seed, - num_steps, - guidance_scale, - ] - prompt.submit(fn=pipe.run, inputs=inputs, outputs=result) - run_button.click(fn=pipe.run, inputs=inputs, outputs=result) - return demo - - -if __name__ == '__main__': - import os - - hf_token = os.getenv('HF_TOKEN') - pipe = InferencePipeline(hf_token) - demo = create_inference_demo(pipe, hf_token) - demo.queue(max_size=10).launch(share=False) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py deleted file mode 100644 index 1ea0bafa51a011d4c1741c46090aaae55b1e06e5..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +++ /dev/null @@ -1,1058 +0,0 @@ -import html -import inspect -import re -import urllib.parse as ul -from typing import Any, Callable, Dict, List, Optional, Union - -import numpy as np -import PIL -import torch -import torch.nn.functional as F -from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer - -from ...loaders import LoraLoaderMixin -from ...models import UNet2DConditionModel -from ...schedulers import DDPMScheduler -from ...utils import ( - BACKENDS_MAPPING, - PIL_INTERPOLATION, - is_accelerate_available, - is_accelerate_version, - is_bs4_available, - is_ftfy_available, - logging, - randn_tensor, - replace_example_docstring, -) -from ..pipeline_utils import DiffusionPipeline -from . import IFPipelineOutput -from .safety_checker import IFSafetyChecker -from .watermark import IFWatermarker - - -if is_bs4_available(): - from bs4 import BeautifulSoup - -if is_ftfy_available(): - import ftfy - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.resize -def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: - w, h = images.size - - coef = w / h - - w, h = img_size, img_size - - if coef >= 1: - w = int(round(img_size / 8 * coef) * 8) - else: - h = int(round(img_size / 8 / coef) * 8) - - images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None) - - return images - - -EXAMPLE_DOC_STRING = """ - Examples: - ```py - >>> from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline - >>> from diffusers.utils import pt_to_pil - >>> import torch - >>> from PIL import Image - >>> import requests - >>> from io import BytesIO - - >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" - >>> response = requests.get(url) - >>> original_image = Image.open(BytesIO(response.content)).convert("RGB") - >>> original_image = original_image.resize((768, 512)) - - >>> pipe = IFImg2ImgPipeline.from_pretrained( - ... "DeepFloyd/IF-I-XL-v1.0", - ... variant="fp16", - ... torch_dtype=torch.float16, - ... ) - >>> pipe.enable_model_cpu_offload() - - >>> prompt = "A fantasy landscape in style minecraft" - >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) - - >>> image = pipe( - ... image=original_image, - ... prompt_embeds=prompt_embeds, - ... negative_prompt_embeds=negative_embeds, - ... output_type="pt", - ... ).images - - >>> # save intermediate image - >>> pil_image = pt_to_pil(image) - >>> pil_image[0].save("./if_stage_I.png") - - >>> super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained( - ... "DeepFloyd/IF-II-L-v1.0", - ... text_encoder=None, - ... variant="fp16", - ... torch_dtype=torch.float16, - ... ) - >>> super_res_1_pipe.enable_model_cpu_offload() - - >>> image = super_res_1_pipe( - ... image=image, - ... original_image=original_image, - ... prompt_embeds=prompt_embeds, - ... negative_prompt_embeds=negative_embeds, - ... ).images - >>> image[0].save("./if_stage_II.png") - ``` -""" - - -class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin): - tokenizer: T5Tokenizer - text_encoder: T5EncoderModel - - unet: UNet2DConditionModel - scheduler: DDPMScheduler - image_noising_scheduler: DDPMScheduler - - feature_extractor: Optional[CLIPImageProcessor] - safety_checker: Optional[IFSafetyChecker] - - watermarker: Optional[IFWatermarker] - - bad_punct_regex = re.compile( - r"[" + "#®•©™&@·º½¾¿¡§~" + "\)" + "\(" + "\]" + "\[" + "\}" + "\{" + "\|" + "\\" + "\/" + "\*" + r"]{1,}" - ) # noqa - - _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor"] - - def __init__( - self, - tokenizer: T5Tokenizer, - text_encoder: T5EncoderModel, - unet: UNet2DConditionModel, - scheduler: DDPMScheduler, - image_noising_scheduler: DDPMScheduler, - safety_checker: Optional[IFSafetyChecker], - feature_extractor: Optional[CLIPImageProcessor], - watermarker: Optional[IFWatermarker], - requires_safety_checker: bool = True, - ): - super().__init__() - - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the IF license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - if unet.config.in_channels != 6: - logger.warn( - "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`." - ) - - self.register_modules( - tokenizer=tokenizer, - text_encoder=text_encoder, - unet=unet, - scheduler=scheduler, - image_noising_scheduler=image_noising_scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - watermarker=watermarker, - ) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.enable_model_cpu_offload - def enable_model_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared - to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` - method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with - `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. - """ - if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): - from accelerate import cpu_offload_with_hook - else: - raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") - - device = torch.device(f"cuda:{gpu_id}") - - if self.device.type != "cpu": - self.to("cpu", silence_dtype_warnings=True) - torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) - - hook = None - - if self.text_encoder is not None: - _, hook = cpu_offload_with_hook(self.text_encoder, device, prev_module_hook=hook) - - # Accelerate will move the next model to the device _before_ calling the offload hook of the - # previous model. This will cause both models to be present on the device at the same time. - # IF uses T5 for its text encoder which is really large. We can manually call the offload - # hook for the text encoder to ensure it's moved to the cpu before the unet is moved to - # the GPU. - self.text_encoder_offload_hook = hook - - _, hook = cpu_offload_with_hook(self.unet, device, prev_module_hook=hook) - - # if the safety checker isn't called, `unet_offload_hook` will have to be called to manually offload the unet - self.unet_offload_hook = hook - - if self.safety_checker is not None: - _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook) - - # We'll offload the last model manually. - self.final_offload_hook = hook - - # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks - def remove_all_hooks(self): - if is_accelerate_available(): - from accelerate.hooks import remove_hook_from_module - else: - raise ImportError("Please install accelerate via `pip install accelerate`") - - for model in [self.text_encoder, self.unet, self.safety_checker]: - if model is not None: - remove_hook_from_module(model, recurse=True) - - self.unet_offload_hook = None - self.text_encoder_offload_hook = None - self.final_offload_hook = None - - # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing - def _text_preprocessing(self, text, clean_caption=False): - if clean_caption and not is_bs4_available(): - logger.warn(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") - clean_caption = False - - if clean_caption and not is_ftfy_available(): - logger.warn(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) - logger.warn("Setting `clean_caption` to False...") - clean_caption = False - - if not isinstance(text, (tuple, list)): - text = [text] - - def process(text: str): - if clean_caption: - text = self._clean_caption(text) - text = self._clean_caption(text) - else: - text = text.lower().strip() - return text - - return [process(t) for t in text] - - # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption - def _clean_caption(self, caption): - caption = str(caption) - caption = ul.unquote_plus(caption) - caption = caption.strip().lower() - caption = re.sub("", "person", caption) - # urls: - caption = re.sub( - r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa - "", - caption, - ) # regex for urls - caption = re.sub( - r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa - "", - caption, - ) # regex for urls - # html: - caption = BeautifulSoup(caption, features="html.parser").text - - # @ - caption = re.sub(r"@[\w\d]+\b", "", caption) - - # 31C0—31EF CJK Strokes - # 31F0—31FF Katakana Phonetic Extensions - # 3200—32FF Enclosed CJK Letters and Months - # 3300—33FF CJK Compatibility - # 3400—4DBF CJK Unified Ideographs Extension A - # 4DC0—4DFF Yijing Hexagram Symbols - # 4E00—9FFF CJK Unified Ideographs - caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) - caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) - caption = re.sub(r"[\u3200-\u32ff]+", "", caption) - caption = re.sub(r"[\u3300-\u33ff]+", "", caption) - caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) - caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) - caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) - ####################################################### - - # все виды тире / all types of dash --> "-" - caption = re.sub( - r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa - "-", - caption, - ) - - # кавычки к одному стандарту - caption = re.sub(r"[`´«»“”¨]", '"', caption) - caption = re.sub(r"[‘’]", "'", caption) - - # " - caption = re.sub(r""?", "", caption) - # & - caption = re.sub(r"&", "", caption) - - # ip adresses: - caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) - - # article ids: - caption = re.sub(r"\d:\d\d\s+$", "", caption) - - # \n - caption = re.sub(r"\\n", " ", caption) - - # "#123" - caption = re.sub(r"#\d{1,3}\b", "", caption) - # "#12345.." - caption = re.sub(r"#\d{5,}\b", "", caption) - # "123456.." - caption = re.sub(r"\b\d{6,}\b", "", caption) - # filenames: - caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) - - # - caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" - caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" - - caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT - caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " - - # this-is-my-cute-cat / this_is_my_cute_cat - regex2 = re.compile(r"(?:\-|\_)") - if len(re.findall(regex2, caption)) > 3: - caption = re.sub(regex2, " ", caption) - - caption = ftfy.fix_text(caption) - caption = html.unescape(html.unescape(caption)) - - caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 - caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc - caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 - - caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) - caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) - caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) - caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) - caption = re.sub(r"\bpage\s+\d+\b", "", caption) - - caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... - - caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) - - caption = re.sub(r"\b\s+\:\s+", r": ", caption) - caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) - caption = re.sub(r"\s+", " ", caption) - - caption.strip() - - caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) - caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) - caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) - caption = re.sub(r"^\.\S+$", "", caption) - - return caption.strip() - - @torch.no_grad() - # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt - def encode_prompt( - self, - prompt, - do_classifier_free_guidance=True, - num_images_per_prompt=1, - device=None, - negative_prompt=None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - clean_caption: bool = False, - ): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `List[str]`, *optional*): - prompt to be encoded - device: (`torch.device`, *optional*): - torch device to place the resulting embeddings on - num_images_per_prompt (`int`, *optional*, defaults to 1): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. - Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - """ - if prompt is not None and negative_prompt is not None: - if type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - - if device is None: - device = self._execution_device - - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF - max_length = 77 - - if prompt_embeds is None: - prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=max_length, - truncation=True, - add_special_tokens=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {max_length} tokens: {removed_text}" - ) - - attention_mask = text_inputs.attention_mask.to(device) - - prompt_embeds = self.text_encoder( - text_input_ids.to(device), - attention_mask=attention_mask, - ) - prompt_embeds = prompt_embeds[0] - - if self.text_encoder is not None: - dtype = self.text_encoder.dtype - elif self.unet is not None: - dtype = self.unet.dtype - else: - dtype = None - - prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) - - bs_embed, seq_len, _ = prompt_embeds.shape - # duplicate text embeddings for each generation per prompt, using mps friendly method - prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) - prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance and negative_prompt_embeds is None: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) - max_length = prompt_embeds.shape[1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_attention_mask=True, - add_special_tokens=True, - return_tensors="pt", - ) - attention_mask = uncond_input.attention_mask.to(device) - - negative_prompt_embeds = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - negative_prompt_embeds = negative_prompt_embeds[0] - - if do_classifier_free_guidance: - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = negative_prompt_embeds.shape[1] - - negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) - - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - else: - negative_prompt_embeds = None - - return prompt_embeds, negative_prompt_embeds - - # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker - def run_safety_checker(self, image, device, dtype): - if self.safety_checker is not None: - safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) - image, nsfw_detected, watermark_detected = self.safety_checker( - images=image, - clip_input=safety_checker_input.pixel_values.to(dtype=dtype), - ) - else: - nsfw_detected = None - watermark_detected = None - - if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: - self.unet_offload_hook.offload() - - return image, nsfw_detected, watermark_detected - - # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - def check_inputs( - self, - prompt, - image, - original_image, - batch_size, - callback_steps, - negative_prompt=None, - prompt_embeds=None, - negative_prompt_embeds=None, - ): - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - # image - - if isinstance(image, list): - check_image_type = image[0] - else: - check_image_type = image - - if ( - not isinstance(check_image_type, torch.Tensor) - and not isinstance(check_image_type, PIL.Image.Image) - and not isinstance(check_image_type, np.ndarray) - ): - raise ValueError( - "`image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" - f" {type(check_image_type)}" - ) - - if isinstance(image, list): - image_batch_size = len(image) - elif isinstance(image, torch.Tensor): - image_batch_size = image.shape[0] - elif isinstance(image, PIL.Image.Image): - image_batch_size = 1 - elif isinstance(image, np.ndarray): - image_batch_size = image.shape[0] - else: - assert False - - if batch_size != image_batch_size: - raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}") - - # original_image - - if isinstance(original_image, list): - check_image_type = original_image[0] - else: - check_image_type = original_image - - if ( - not isinstance(check_image_type, torch.Tensor) - and not isinstance(check_image_type, PIL.Image.Image) - and not isinstance(check_image_type, np.ndarray) - ): - raise ValueError( - "`original_image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" - f" {type(check_image_type)}" - ) - - if isinstance(original_image, list): - image_batch_size = len(original_image) - elif isinstance(original_image, torch.Tensor): - image_batch_size = original_image.shape[0] - elif isinstance(original_image, PIL.Image.Image): - image_batch_size = 1 - elif isinstance(original_image, np.ndarray): - image_batch_size = original_image.shape[0] - else: - assert False - - if batch_size != image_batch_size: - raise ValueError( - f"original_image batch size: {image_batch_size} must be same as prompt batch size {batch_size}" - ) - - # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image with preprocess_image -> preprocess_original_image - def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor: - if not isinstance(image, list): - image = [image] - - def numpy_to_pt(images): - if images.ndim == 3: - images = images[..., None] - - images = torch.from_numpy(images.transpose(0, 3, 1, 2)) - return images - - if isinstance(image[0], PIL.Image.Image): - new_image = [] - - for image_ in image: - image_ = image_.convert("RGB") - image_ = resize(image_, self.unet.sample_size) - image_ = np.array(image_) - image_ = image_.astype(np.float32) - image_ = image_ / 127.5 - 1 - new_image.append(image_) - - image = new_image - - image = np.stack(image, axis=0) # to np - image = numpy_to_pt(image) # to pt - - elif isinstance(image[0], np.ndarray): - image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) - image = numpy_to_pt(image) - - elif isinstance(image[0], torch.Tensor): - image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) - - return image - - # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_superresolution.IFSuperResolutionPipeline.preprocess_image - def preprocess_image(self, image: PIL.Image.Image, num_images_per_prompt, device) -> torch.Tensor: - if not isinstance(image, torch.Tensor) and not isinstance(image, list): - image = [image] - - if isinstance(image[0], PIL.Image.Image): - image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image] - - image = np.stack(image, axis=0) # to np - image = torch.from_numpy(image.transpose(0, 3, 1, 2)) - elif isinstance(image[0], np.ndarray): - image = np.stack(image, axis=0) # to np - if image.ndim == 5: - image = image[0] - - image = torch.from_numpy(image.transpose(0, 3, 1, 2)) - elif isinstance(image, list) and isinstance(image[0], torch.Tensor): - dims = image[0].ndim - - if dims == 3: - image = torch.stack(image, dim=0) - elif dims == 4: - image = torch.concat(image, dim=0) - else: - raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}") - - image = image.to(device=device, dtype=self.unet.dtype) - - image = image.repeat_interleave(num_images_per_prompt, dim=0) - - return image - - # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.get_timesteps - def get_timesteps(self, num_inference_steps, strength): - # get the original timestep using init_timestep - init_timestep = min(int(num_inference_steps * strength), num_inference_steps) - - t_start = max(num_inference_steps - init_timestep, 0) - timesteps = self.scheduler.timesteps[t_start:] - - return timesteps, num_inference_steps - t_start - - # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.prepare_intermediate_images - def prepare_intermediate_images( - self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None - ): - _, channels, height, width = image.shape - - batch_size = batch_size * num_images_per_prompt - - shape = (batch_size, channels, height, width) - - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - - image = image.repeat_interleave(num_images_per_prompt, dim=0) - image = self.scheduler.add_noise(image, noise, timestep) - - return image - - @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - image: Union[PIL.Image.Image, np.ndarray, torch.FloatTensor], - original_image: Union[ - PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] - ] = None, - strength: float = 0.8, - prompt: Union[str, List[str]] = None, - num_inference_steps: int = 50, - timesteps: List[int] = None, - guidance_scale: float = 4.0, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - noise_level: int = 250, - clean_caption: bool = True, - ): - """ - Function invoked when calling the pipeline for generation. - - Args: - image (`torch.FloatTensor` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, that will be used as the starting point for the - process. - original_image (`torch.FloatTensor` or `PIL.Image.Image`): - The original image that `image` was varied from. - strength (`float`, *optional*, defaults to 0.8): - Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` - will be used as a starting point, adding more noise to it the larger the `strength`. The number of - denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will - be maximum and the denoising process will run for the full number of iterations specified in - `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. - instead. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - timesteps (`List[int]`, *optional*): - Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` - timesteps are used. Must be in descending order. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - cross_attention_kwargs (`dict`, *optional*): - A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under - `self.processor` in - [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). - noise_level (`int`, *optional*, defaults to 250): - The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)` - clean_caption (`bool`, *optional*, defaults to `True`): - Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to - be installed. If the dependencies are not installed, the embeddings will be created from the raw - prompt. - - Examples: - - Returns: - [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When - returning a tuple, the first element is a list with the generated images, and the second element is a list - of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) - or watermarked content, according to the `safety_checker`. - """ - # 1. Check inputs. Raise error if not correct - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - self.check_inputs( - prompt, - image, - original_image, - batch_size, - callback_steps, - negative_prompt, - prompt_embeds, - negative_prompt_embeds, - ) - - # 2. Define call parameters - - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - device = self._execution_device - - # 3. Encode input prompt - prompt_embeds, negative_prompt_embeds = self.encode_prompt( - prompt, - do_classifier_free_guidance, - num_images_per_prompt=num_images_per_prompt, - device=device, - negative_prompt=negative_prompt, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - clean_caption=clean_caption, - ) - - if do_classifier_free_guidance: - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - - dtype = prompt_embeds.dtype - - # 4. Prepare timesteps - if timesteps is not None: - self.scheduler.set_timesteps(timesteps=timesteps, device=device) - timesteps = self.scheduler.timesteps - num_inference_steps = len(timesteps) - else: - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - - timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength) - - # 5. prepare original image - original_image = self.preprocess_original_image(original_image) - original_image = original_image.to(device=device, dtype=dtype) - - # 6. Prepare intermediate images - noise_timestep = timesteps[0:1] - noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) - - intermediate_images = self.prepare_intermediate_images( - original_image, - noise_timestep, - batch_size, - num_images_per_prompt, - dtype, - device, - generator, - ) - - # 7. Prepare upscaled image and noise level - _, _, height, width = original_image.shape - - image = self.preprocess_image(image, num_images_per_prompt, device) - - upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True) - - noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device) - noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype) - upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level) - - if do_classifier_free_guidance: - noise_level = torch.cat([noise_level] * 2) - - # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # HACK: see comment in `enable_model_cpu_offload` - if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: - self.text_encoder_offload_hook.offload() - - # 9. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - model_input = torch.cat([intermediate_images, upscaled], dim=1) - - model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input - model_input = self.scheduler.scale_model_input(model_input, t) - - # predict the noise residual - noise_pred = self.unet( - model_input, - t, - encoder_hidden_states=prompt_embeds, - class_labels=noise_level, - cross_attention_kwargs=cross_attention_kwargs, - return_dict=False, - )[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1) - noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) - - if self.scheduler.config.variance_type not in ["learned", "learned_range"]: - noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1) - - # compute the previous noisy sample x_t -> x_t-1 - intermediate_images = self.scheduler.step( - noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False - )[0] - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, intermediate_images) - - image = intermediate_images - - if output_type == "pil": - # 10. Post-processing - image = (image / 2 + 0.5).clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - - # 11. Run safety checker - image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) - - # 12. Convert to PIL - image = self.numpy_to_pil(image) - - # 13. Apply watermark - if self.watermarker is not None: - self.watermarker.apply_watermark(image, self.unet.config.sample_size) - elif output_type == "pt": - nsfw_detected = None - watermark_detected = None - - if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: - self.unet_offload_hook.offload() - else: - # 10. Post-processing - image = (image / 2 + 0.5).clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - - # 11. Run safety checker - image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) - - # Offload last model to CPU - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.final_offload_hook.offload() - - if not return_dict: - return (image, nsfw_detected, watermark_detected) - - return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py deleted file mode 100644 index 907bede158c7043d2a3b0d9daf64a0b6a13bc83c..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fovea_r50_fpn_4x4_1x_coco.py' -model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py deleted file mode 100644 index b8be60145758c191543ef0683234e63f02d8fe60..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,22 +0,0 @@ -_base_ = './gfl_r50_fpn_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) -# multi-scale training -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 480), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -data = dict(train=dict(pipeline=train_pipeline)) diff --git a/spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/backbone.py b/spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/backbone.py deleted file mode 100644 index a30942eca9cad56e75252c3026dca95bf1021df7..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/backbone.py +++ /dev/null @@ -1,117 +0,0 @@ -import torch.nn as nn - -from .trident_conv import MultiScaleTridentConv - - -class ResidualBlock(nn.Module): - def __init__(self, in_planes, planes, norm_layer=nn.InstanceNorm2d, stride=1, dilation=1, - ): - super(ResidualBlock, self).__init__() - - self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, - dilation=dilation, padding=dilation, stride=stride, bias=False) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, - dilation=dilation, padding=dilation, bias=False) - self.relu = nn.ReLU(inplace=True) - - self.norm1 = norm_layer(planes) - self.norm2 = norm_layer(planes) - if not stride == 1 or in_planes != planes: - self.norm3 = norm_layer(planes) - - if stride == 1 and in_planes == planes: - self.downsample = None - else: - self.downsample = nn.Sequential( - nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3) - - def forward(self, x): - y = x - y = self.relu(self.norm1(self.conv1(y))) - y = self.relu(self.norm2(self.conv2(y))) - - if self.downsample is not None: - x = self.downsample(x) - - return self.relu(x + y) - - -class CNNEncoder(nn.Module): - def __init__(self, output_dim=128, - norm_layer=nn.InstanceNorm2d, - num_output_scales=1, - **kwargs, - ): - super(CNNEncoder, self).__init__() - self.num_branch = num_output_scales - - feature_dims = [64, 96, 128] - - self.conv1 = nn.Conv2d(3, feature_dims[0], kernel_size=7, stride=2, padding=3, bias=False) # 1/2 - self.norm1 = norm_layer(feature_dims[0]) - self.relu1 = nn.ReLU(inplace=True) - - self.in_planes = feature_dims[0] - self.layer1 = self._make_layer(feature_dims[0], stride=1, norm_layer=norm_layer) # 1/2 - self.layer2 = self._make_layer(feature_dims[1], stride=2, norm_layer=norm_layer) # 1/4 - - # highest resolution 1/4 or 1/8 - stride = 2 if num_output_scales == 1 else 1 - self.layer3 = self._make_layer(feature_dims[2], stride=stride, - norm_layer=norm_layer, - ) # 1/4 or 1/8 - - self.conv2 = nn.Conv2d(feature_dims[2], output_dim, 1, 1, 0) - - if self.num_branch > 1: - if self.num_branch == 4: - strides = (1, 2, 4, 8) - elif self.num_branch == 3: - strides = (1, 2, 4) - elif self.num_branch == 2: - strides = (1, 2) - else: - raise ValueError - - self.trident_conv = MultiScaleTridentConv(output_dim, output_dim, - kernel_size=3, - strides=strides, - paddings=1, - num_branch=self.num_branch, - ) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): - if m.weight is not None: - nn.init.constant_(m.weight, 1) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def _make_layer(self, dim, stride=1, dilation=1, norm_layer=nn.InstanceNorm2d): - layer1 = ResidualBlock(self.in_planes, dim, norm_layer=norm_layer, stride=stride, dilation=dilation) - layer2 = ResidualBlock(dim, dim, norm_layer=norm_layer, stride=1, dilation=dilation) - - layers = (layer1, layer2) - - self.in_planes = dim - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.norm1(x) - x = self.relu1(x) - - x = self.layer1(x) # 1/2 - x = self.layer2(x) # 1/4 - x = self.layer3(x) # 1/8 or 1/4 - - x = self.conv2(x) - - if self.num_branch > 1: - out = self.trident_conv([x] * self.num_branch) # high to low res - else: - out = [x] - - return out diff --git a/spaces/Armandoliv/document_parser/app.py b/spaces/Armandoliv/document_parser/app.py deleted file mode 100644 index 58d6a0006da52d75a12559aa3631d222407c5198..0000000000000000000000000000000000000000 --- a/spaces/Armandoliv/document_parser/app.py +++ /dev/null @@ -1,202 +0,0 @@ -import os -import os -os.system('pip install "detectron2@git+https://github.com/facebookresearch/detectron2.git@v0.5#egg=detectron2"') - -import io -import pandas as pd -import numpy as np -import gradio as gr - -## for plotting -import matplotlib.pyplot as plt - -## for ocr -import pdf2image -import cv2 -import layoutparser as lp - -from docx import Document -from docx.shared import Inches - - -def parse_doc(dic): - for k,v in dic.items(): - if "Title" in k: - print('\x1b[1;31m'+ v +'\x1b[0m') - elif "Figure" in k: - plt.figure(figsize=(10,5)) - plt.imshow(v) - plt.show() - else: - print(v) - print(" ") - - -def to_image(filename): - doc = pdf2image.convert_from_path(filename, dpi=350, last_page=1) - # Save imgs - folder = "doc" - if folder not in os.listdir(): - os.makedirs(folder) - - p = 1 - for page in doc: - image_name = "page_"+str(p)+".jpg" - page.save(os.path.join(folder, image_name), "JPEG") - p = p+1 - - return doc - - - -def detect(doc): - # General - model = lp.Detectron2LayoutModel("lp://PubLayNet/mask_rcnn_X_101_32x8d_FPN_3x/config", - extra_config=["MODEL.ROI_HEADS.SCORE_THRESH_TEST", 0.8], - label_map={0:"Text", 1:"Title", 2:"List", 3:"Table", 4:"Figure"}) - ## turn img into array - img = np.asarray(doc[0]) - - ## predict - detected = model.detect(img) - - - return img, detected - - -# sort detected -def split_page(img, n, axis): - new_detected, start = [], 0 - for s in range(n): - end = len(img[0])/3 * s if axis == "x" else len(img[1])/3 - section = lp.Interval(start=start, end=end, axis=axis).put_on_canvas(img) - filter_detected = detected.filter_by(section, center=True)._blocks - new_detected = new_detected + filter_detected - start = end - return lp.Layout([block.set(id=idx) for idx,block in enumerate(new_detected)]) - - - -def get_detected(img, detected): - n_cols,n_rows = 1,1 - - ## if single page just sort based on y - if (n_cols == 1) and (n_rows == 1): - new_detected = detected.sort(key=lambda x: x.coordinates[1]) - detected = lp.Layout([block.set(id=idx) for idx,block in enumerate(new_detected)]) - - ## if multi columns sort by x,y - elif (n_cols > 1) and (n_rows == 1): - detected = split_page(img, n_cols, axis="x") - - ## if multi rows sort by y,x - elif (n_cols > 1) and (n_rows == 1): - detected = split_page(img, n_rows, axis="y") - - ## if multi columns-rows - else: - pass - - return detected - - -def predict_elements(img, detected)->dict: - model = lp.TesseractAgent(languages='eng') - dic_predicted = {} - - for block in [block for block in detected if block.type in ["Title","Text", "List"]]: - ## segmentation - segmented = block.pad(left=15, right=15, top=5, bottom=5).crop_image(img) - ## extraction - extracted = model.detect(segmented) - ## save - dic_predicted[str(block.id)+"-"+block.type] = extracted.replace('\n',' ').strip() - - for block in [block for block in detected if block.type == "Figure"]: - ## segmentation - segmented = block.pad(left=15, right=15, top=5, bottom=5).crop_image(img) - ## save - dic_predicted[str(block.id)+"-"+block.type] = segmented - - - for block in [block for block in detected if block.type == "Table"]: - ## segmentation - segmented = block.pad(left=15, right=15, top=5, bottom=5).crop_image(img) - ## extraction - extracted = model.detect(segmented) - ## save - dic_predicted[str(block.id)+"-"+block.type] = pd.read_csv( io.StringIO(extracted) ) - - - return dic_predicted - -def gen_doc(dic_predicted:dict): - document = Document() - - for k,v in dic_predicted.items(): - - if "Figure" in k: - cv2.imwrite(f'{k}.jpg', dic_predicted[k]) - document.add_picture(f'{k}.jpg', width=Inches(3)) - - elif "Table" in k: - table = document.add_table(rows=v.shape[0], cols=v.shape[1]) - hdr_cells = table.rows[0].cells - for idx, col in enumerate(v.columns): - hdr_cells[idx].text = col - for c in v.iterrows(): - - for idx, col in enumerate(v.columns): - try: - if len(c[1][col].strip())>0: - row_cells = table.add_row().cells - row_cells[idx].text = str(c[1][col]) - except: - continue - - else: - document.add_paragraph(str(v)) - - document.save('demo.docx') - - -def main_convert(filename): - print(filename.name) - doc = to_image(filename.name) - - img, detected = detect(doc) - - n_detected = get_detected(img, detected) - - dic_predicted = predict_elements(img, n_detected) - - gen_doc(dic_predicted) - - im_out = lp.draw_box(img, detected, box_width=5, box_alpha=0.2, show_element_type=True) - dict_out = {} - for k,v in dic_predicted.items(): - if "figure" not in k.lower(): - dict_out[k] = dic_predicted[k] - - return 'demo.docx', im_out, dict_out - - -inputs = [gr.File(type='file', label="Original PDF File")] -outputs = [gr.File(label="Converted DOC File"),gr.Image(type="PIL.Image", label="Detected Image"), gr.JSON()] - -title = "A Document AI parser" -description = "This demo uses AI Models to detect text, titles, tables, figures and lists as well as table cells from an Scanned document.\nBased on the layout it determines reading order and generates an MS-DOC file to Download." - - -io = gr.Interface(fn=main_convert, inputs=inputs, outputs=outputs, title=title, description=description, - css= """.gr-button-primary { background: -webkit-linear-gradient( - 90deg, #355764 0%, #55a8a1 100% ) !important; background: #355764; - background: linear-gradient( - 90deg, #355764 0%, #55a8a1 100% ) !important; - background: -moz-linear-gradient( 90deg, #355764 0%, #55a8a1 100% ) !important; - background: -webkit-linear-gradient( - 90deg, #355764 0%, #55a8a1 100% ) !important; - color:white !important}""" - ) - -io.launch() \ No newline at end of file diff --git a/spaces/Arnx/MusicGenXvAKN/audiocraft/data/audio_dataset.py b/spaces/Arnx/MusicGenXvAKN/audiocraft/data/audio_dataset.py deleted file mode 100644 index cf21422ea0059cb2d6553f93e608b8f9fa0d3a50..0000000000000000000000000000000000000000 --- a/spaces/Arnx/MusicGenXvAKN/audiocraft/data/audio_dataset.py +++ /dev/null @@ -1,525 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import copy -from concurrent.futures import ThreadPoolExecutor, Future -from dataclasses import dataclass, fields -from contextlib import ExitStack -import gzip -import json -import logging -import os -from pathlib import Path -import random -import sys -import typing as tp - -import torch -import torch.nn.functional as F - -from .audio import audio_read, audio_info -from .audio_utils import convert_audio -from .zip import PathInZip - -try: - import dora -except ImportError: - dora = None # type: ignore - - -@dataclass(order=True) -class BaseInfo: - - @classmethod - def _dict2fields(cls, dictionary: dict): - return { - field.name: dictionary[field.name] - for field in fields(cls) if field.name in dictionary - } - - @classmethod - def from_dict(cls, dictionary: dict): - _dictionary = cls._dict2fields(dictionary) - return cls(**_dictionary) - - def to_dict(self): - return { - field.name: self.__getattribute__(field.name) - for field in fields(self) - } - - -@dataclass(order=True) -class AudioMeta(BaseInfo): - path: str - duration: float - sample_rate: int - amplitude: tp.Optional[float] = None - weight: tp.Optional[float] = None - # info_path is used to load additional information about the audio file that is stored in zip files. - info_path: tp.Optional[PathInZip] = None - - @classmethod - def from_dict(cls, dictionary: dict): - base = cls._dict2fields(dictionary) - if 'info_path' in base and base['info_path'] is not None: - base['info_path'] = PathInZip(base['info_path']) - return cls(**base) - - def to_dict(self): - d = super().to_dict() - if d['info_path'] is not None: - d['info_path'] = str(d['info_path']) - return d - - -@dataclass(order=True) -class SegmentInfo(BaseInfo): - meta: AudioMeta - seek_time: float - n_frames: int # actual number of frames without padding - total_frames: int # total number of frames, padding included - sample_rate: int # actual sample rate - - -DEFAULT_EXTS = ['.wav', '.mp3', '.flac', '.ogg', '.m4a'] - -logger = logging.getLogger(__name__) - - -def _get_audio_meta(file_path: str, minimal: bool = True) -> AudioMeta: - """AudioMeta from a path to an audio file. - - Args: - file_path (str): Resolved path of valid audio file. - minimal (bool): Whether to only load the minimal set of metadata (takes longer if not). - Returns: - AudioMeta: Audio file path and its metadata. - """ - info = audio_info(file_path) - amplitude: tp.Optional[float] = None - if not minimal: - wav, sr = audio_read(file_path) - amplitude = wav.abs().max().item() - return AudioMeta(file_path, info.duration, info.sample_rate, amplitude) - - -def _resolve_audio_meta(m: AudioMeta, fast: bool = True) -> AudioMeta: - """If Dora is available as a dependency, try to resolve potential relative paths - in list of AudioMeta. This method is expected to be used when loading meta from file. - - Args: - m (AudioMeta): Audio meta to resolve. - fast (bool): If True, uses a really fast check for determining if a file is already absolute or not. - Only valid on Linux/Mac. - Returns: - AudioMeta: Audio meta with resolved path. - """ - def is_abs(m): - if fast: - return str(m)[0] == '/' - else: - os.path.isabs(str(m)) - - if not dora: - return m - - if not is_abs(m.path): - m.path = dora.git_save.to_absolute_path(m.path) - if m.info_path is not None and not is_abs(m.info_path.zip_path): - m.info_path.zip_path = dora.git_save.to_absolute_path(m.path) - return m - - -def find_audio_files(path: tp.Union[Path, str], - exts: tp.List[str] = DEFAULT_EXTS, - resolve: bool = True, - minimal: bool = True, - progress: bool = False, - workers: int = 0) -> tp.List[AudioMeta]: - """Build a list of AudioMeta from a given path, - collecting relevant audio files and fetching meta info. - - Args: - path (str or Path): Path to folder containing audio files. - exts (list of str): List of file extensions to consider for audio files. - minimal (bool): Whether to only load the minimal set of metadata (takes longer if not). - progress (bool): Whether to log progress on audio files collection. - workers (int): number of parallel workers, if 0, use only the current thread. - Returns: - List[AudioMeta]: List of audio file path and its metadata. - """ - audio_files = [] - futures: tp.List[Future] = [] - pool: tp.Optional[ThreadPoolExecutor] = None - with ExitStack() as stack: - if workers > 0: - pool = ThreadPoolExecutor(workers) - stack.enter_context(pool) - - if progress: - print("Finding audio files...") - for root, folders, files in os.walk(path, followlinks=True): - for file in files: - full_path = Path(root) / file - if full_path.suffix.lower() in exts: - audio_files.append(full_path) - if pool is not None: - futures.append(pool.submit(_get_audio_meta, str(audio_files[-1]), minimal)) - if progress: - print(format(len(audio_files), " 8d"), end='\r', file=sys.stderr) - - if progress: - print("Getting audio metadata...") - meta: tp.List[AudioMeta] = [] - for idx, file_path in enumerate(audio_files): - try: - if pool is None: - m = _get_audio_meta(str(file_path), minimal) - else: - m = futures[idx].result() - if resolve: - m = _resolve_audio_meta(m) - except Exception as err: - print("Error with", str(file_path), err, file=sys.stderr) - continue - meta.append(m) - if progress: - print(format((1 + idx) / len(audio_files), " 3.1%"), end='\r', file=sys.stderr) - meta.sort() - return meta - - -def load_audio_meta(path: tp.Union[str, Path], - resolve: bool = True, fast: bool = True) -> tp.List[AudioMeta]: - """Load list of AudioMeta from an optionally compressed json file. - - Args: - path (str or Path): Path to JSON file. - resolve (bool): Whether to resolve the path from AudioMeta (default=True). - fast (bool): activates some tricks to make things faster. - Returns: - List[AudioMeta]: List of audio file path and its total duration. - """ - open_fn = gzip.open if str(path).lower().endswith('.gz') else open - with open_fn(path, 'rb') as fp: # type: ignore - lines = fp.readlines() - meta = [] - for line in lines: - d = json.loads(line) - m = AudioMeta.from_dict(d) - if resolve: - m = _resolve_audio_meta(m, fast=fast) - meta.append(m) - return meta - - -def save_audio_meta(path: tp.Union[str, Path], meta: tp.List[AudioMeta]): - """Save the audio metadata to the file pointer as json. - - Args: - path (str or Path): Path to JSON file. - metadata (list of BaseAudioMeta): List of audio meta to save. - """ - Path(path).parent.mkdir(exist_ok=True, parents=True) - open_fn = gzip.open if str(path).lower().endswith('.gz') else open - with open_fn(path, 'wb') as fp: # type: ignore - for m in meta: - json_str = json.dumps(m.to_dict()) + '\n' - json_bytes = json_str.encode('utf-8') - fp.write(json_bytes) - - -class AudioDataset: - """Base audio dataset. - - The dataset takes a list of AudioMeta and create a dataset composed of segments of audio - and potentially additional information, by creating random segments from the list of audio - files referenced in the metadata and applying minimal data pre-processing such as resampling, - mixing of channels, padding, etc. - - If no segment_duration value is provided, the AudioDataset will return the full wav for each - audio file. Otherwise, it will randomly sample audio files and create a segment of the specified - duration, applying padding if required. - - By default, only the torch Tensor corresponding to the waveform is returned. Setting return_info=True - allows to return a tuple containing the torch Tensor and additional metadata on the segment and the - original audio meta. - - Args: - meta (tp.List[AudioMeta]): List of audio files metadata. - segment_duration (float): Optional segment duration of audio to load. - If not specified, the dataset will load the full audio segment from the file. - shuffle (bool): Set to `True` to have the data reshuffled at every epoch. - sample_rate (int): Target sample rate of the loaded audio samples. - channels (int): Target number of channels of the loaded audio samples. - sample_on_duration (bool): Set to `True` to sample segments with probability - dependent on audio file duration. This is only used if `segment_duration` is provided. - sample_on_weight (bool): Set to `True` to sample segments using the `weight` entry of - `AudioMeta`. If `sample_on_duration` is also True, the actual weight will be the product - of the file duration and file weight. This is only used if `segment_duration` is provided. - min_segment_ratio (float): Minimum segment ratio to use when the audio file - is shorter than the desired segment. - max_read_retry (int): Maximum number of retries to sample an audio segment from the dataset. - return_info (bool): Whether to return the wav only or return wav along with segment info and metadata. - min_audio_duration (tp.Optional[float], optional): Minimum audio file duration, in seconds, if provided - audio shorter than this will be filtered out. - max_audio_duration (tp.Optional[float], optional): Maximal audio file duration in seconds, if provided - audio longer than this will be filtered out. - """ - def __init__(self, - meta: tp.List[AudioMeta], - segment_duration: tp.Optional[float] = None, - shuffle: bool = True, - num_samples: int = 10_000, - sample_rate: int = 48_000, - channels: int = 2, - pad: bool = True, - sample_on_duration: bool = True, - sample_on_weight: bool = True, - min_segment_ratio: float = 0.5, - max_read_retry: int = 10, - return_info: bool = False, - min_audio_duration: tp.Optional[float] = None, - max_audio_duration: tp.Optional[float] = None - ): - assert len(meta) > 0, 'No audio meta provided to AudioDataset. Please check loading of audio meta.' - assert segment_duration is None or segment_duration > 0 - assert segment_duration is None or min_segment_ratio >= 0 - logging.debug(f'sample_on_duration: {sample_on_duration}') - logging.debug(f'sample_on_weight: {sample_on_weight}') - logging.debug(f'pad: {pad}') - logging.debug(f'min_segment_ratio: {min_segment_ratio}') - - self.segment_duration = segment_duration - self.min_segment_ratio = min_segment_ratio - self.max_audio_duration = max_audio_duration - self.min_audio_duration = min_audio_duration - if self.min_audio_duration is not None and self.max_audio_duration is not None: - assert self.min_audio_duration <= self.max_audio_duration - self.meta: tp.List[AudioMeta] = self._filter_duration(meta) - assert len(self.meta) # Fail fast if all data has been filtered. - self.total_duration = sum(d.duration for d in self.meta) - - if segment_duration is None: - num_samples = len(self.meta) - self.num_samples = num_samples - self.shuffle = shuffle - self.sample_rate = sample_rate - self.channels = channels - self.pad = pad - self.sample_on_weight = sample_on_weight - self.sample_on_duration = sample_on_duration - self.sampling_probabilities = self._get_sampling_probabilities() - self.max_read_retry = max_read_retry - self.return_info = return_info - - def __len__(self): - return self.num_samples - - def _get_sampling_probabilities(self, normalized: bool = True): - """Return the sampling probabilities for each file inside `self.meta`. - """ - scores: tp.List[float] = [] - for file_meta in self.meta: - score = 1. - if self.sample_on_weight and file_meta.weight is not None: - score *= file_meta.weight - if self.sample_on_duration: - score *= file_meta.duration - scores.append(score) - probabilities = torch.tensor(scores) - if normalized: - probabilities /= probabilities.sum() - return probabilities - - def sample_file(self, rng: torch.Generator) -> AudioMeta: - """Sample a given file from `self.meta`. Can be overriden in subclasses. - This is only called if `segment_duration` is not None. - - You must use the provided random number generator `rng` for reproducibility. - """ - if not self.sample_on_weight and not self.sample_on_duration: - file_index = int(torch.randint(len(self.sampling_probabilities), (1,), generator=rng).item()) - else: - file_index = int(torch.multinomial(self.sampling_probabilities, 1, generator=rng).item()) - - return self.meta[file_index] - - def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentInfo]]: - if self.segment_duration is None: - file_meta = self.meta[index] - out, sr = audio_read(file_meta.path) - out = convert_audio(out, sr, self.sample_rate, self.channels) - n_frames = out.shape[-1] - segment_info = SegmentInfo(file_meta, seek_time=0., n_frames=n_frames, total_frames=n_frames, - sample_rate=self.sample_rate) - else: - rng = torch.Generator() - if self.shuffle: - # We use index, plus extra randomness - rng.manual_seed(index + self.num_samples * random.randint(0, 2**24)) - else: - # We only use index - rng.manual_seed(index) - - for retry in range(self.max_read_retry): - file_meta = self.sample_file(rng) - # We add some variance in the file position even if audio file is smaller than segment - # without ending up with empty segments - max_seek = max(0, file_meta.duration - self.segment_duration * self.min_segment_ratio) - seek_time = torch.rand(1, generator=rng).item() * max_seek - try: - out, sr = audio_read(file_meta.path, seek_time, self.segment_duration, pad=False) - out = convert_audio(out, sr, self.sample_rate, self.channels) - n_frames = out.shape[-1] - target_frames = int(self.segment_duration * self.sample_rate) - if self.pad: - out = F.pad(out, (0, target_frames - n_frames)) - segment_info = SegmentInfo(file_meta, seek_time, n_frames=n_frames, total_frames=target_frames, - sample_rate=self.sample_rate) - except Exception as exc: - logger.warning("Error opening file %s: %r", file_meta.path, exc) - if retry == self.max_read_retry - 1: - raise - else: - break - - if self.return_info: - # Returns the wav and additional information on the wave segment - return out, segment_info - else: - return out - - def collater(self, samples): - """The collater function has to be provided to the dataloader - if AudioDataset has return_info=True in order to properly collate - the samples of a batch. - """ - if self.segment_duration is None and len(samples) > 1: - assert self.pad, "Must allow padding when batching examples of different durations." - - # In this case the audio reaching the collater is of variable length as segment_duration=None. - to_pad = self.segment_duration is None and self.pad - if to_pad: - max_len = max([wav.shape[-1] for wav, _ in samples]) - - def _pad_wav(wav): - return F.pad(wav, (0, max_len - wav.shape[-1])) - - if self.return_info: - if len(samples) > 0: - assert len(samples[0]) == 2 - assert isinstance(samples[0][0], torch.Tensor) - assert isinstance(samples[0][1], SegmentInfo) - - wavs = [wav for wav, _ in samples] - segment_infos = [copy.deepcopy(info) for _, info in samples] - - if to_pad: - # Each wav could be of a different duration as they are not segmented. - for i in range(len(samples)): - # Determines the total legth of the signal with padding, so we update here as we pad. - segment_infos[i].total_frames = max_len - wavs[i] = _pad_wav(wavs[i]) - - wav = torch.stack(wavs) - return wav, segment_infos - else: - assert isinstance(samples[0], torch.Tensor) - if to_pad: - samples = [_pad_wav(s) for s in samples] - return torch.stack(samples) - - def _filter_duration(self, meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]: - """Filters out audio files with short durations. - Removes from meta files that have durations that will not allow to samples examples from them. - """ - orig_len = len(meta) - - # Filter data that is too short. - if self.min_audio_duration is not None: - meta = [m for m in meta if m.duration >= self.min_audio_duration] - - # Filter data that is too long. - if self.max_audio_duration is not None: - meta = [m for m in meta if m.duration <= self.max_audio_duration] - - filtered_len = len(meta) - removed_percentage = 100*(1-float(filtered_len)/orig_len) - msg = 'Removed %.2f percent of the data because it was too short or too long.' % removed_percentage - if removed_percentage < 10: - logging.debug(msg) - else: - logging.warning(msg) - return meta - - @classmethod - def from_meta(cls, root: tp.Union[str, Path], **kwargs): - """Instantiate AudioDataset from a path to a directory containing a manifest as a jsonl file. - - Args: - root (str or Path): Path to root folder containing audio files. - kwargs: Additional keyword arguments for the AudioDataset. - """ - root = Path(root) - if root.is_dir(): - if (root / 'data.jsonl').exists(): - root = root / 'data.jsonl' - elif (root / 'data.jsonl.gz').exists(): - root = root / 'data.jsonl.gz' - else: - raise ValueError("Don't know where to read metadata from in the dir. " - "Expecting either a data.jsonl or data.jsonl.gz file but none found.") - meta = load_audio_meta(root) - return cls(meta, **kwargs) - - @classmethod - def from_path(cls, root: tp.Union[str, Path], minimal_meta: bool = True, - exts: tp.List[str] = DEFAULT_EXTS, **kwargs): - """Instantiate AudioDataset from a path containing (possibly nested) audio files. - - Args: - root (str or Path): Path to root folder containing audio files. - minimal_meta (bool): Whether to only load minimal metadata or not. - exts (list of str): Extensions for audio files. - kwargs: Additional keyword arguments for the AudioDataset. - """ - root = Path(root) - if root.is_file(): - meta = load_audio_meta(root, resolve=True) - else: - meta = find_audio_files(root, exts, minimal=minimal_meta, resolve=True) - return cls(meta, **kwargs) - - -def main(): - logging.basicConfig(stream=sys.stderr, level=logging.INFO) - parser = argparse.ArgumentParser( - prog='audio_dataset', - description='Generate .jsonl files by scanning a folder.') - parser.add_argument('root', help='Root folder with all the audio files') - parser.add_argument('output_meta_file', - help='Output file to store the metadata, ') - parser.add_argument('--complete', - action='store_false', dest='minimal', default=True, - help='Retrieve all metadata, even the one that are expansive ' - 'to compute (e.g. normalization).') - parser.add_argument('--resolve', - action='store_true', default=False, - help='Resolve the paths to be absolute and with no symlinks.') - parser.add_argument('--workers', - default=10, type=int, - help='Number of workers.') - args = parser.parse_args() - meta = find_audio_files(args.root, DEFAULT_EXTS, progress=True, - resolve=args.resolve, minimal=args.minimal, workers=args.workers) - save_audio_meta(args.output_meta_file, meta) - - -if __name__ == '__main__': - main() diff --git a/spaces/Awiny/Image2Paragraph/models/segment_models/configs/ade20k_id2label.py b/spaces/Awiny/Image2Paragraph/models/segment_models/configs/ade20k_id2label.py deleted file mode 100644 index 2057b7ce4d5ab44d989bdf882bc6c6a35dcb8a70..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/segment_models/configs/ade20k_id2label.py +++ /dev/null @@ -1,153 +0,0 @@ -CONFIG = { - "id2label": { - "0": "wall", - "1": "building", - "2": "sky", - "3": "floor", - "4": "tree", - "5": "ceiling", - "6": "road", - "7": "bed ", - "8": "windowpane", - "9": "grass", - "10": "cabinet", - "11": "sidewalk", - "12": "person", - "13": "earth", - "14": "door", - "15": "table", - "16": "mountain", - "17": "plant", - "18": "curtain", - "19": "chair", - "20": "car", - "21": "water", - "22": "painting", - "23": "sofa", - "24": "shelf", - "25": "house", - "26": "sea", - "27": "mirror", - "28": "rug", - "29": "field", - "30": "armchair", - "31": "seat", - "32": "fence", - "33": "desk", - "34": "rock", - "35": "wardrobe", - "36": "lamp", - "37": "bathtub", - "38": "railing", - "39": "cushion", - "40": "base", - "41": "box", - "42": "column", - "43": "signboard", - "44": "chest of drawers", - "45": "counter", - "46": "sand", - "47": "sink", - "48": "skyscraper", - "49": "fireplace", - "50": "refrigerator", - "51": "grandstand", - "52": "path", - "53": "stairs", - "54": "runway", - "55": "case", - "56": "pool table", - "57": "pillow", - "58": "screen door", - "59": "stairway", - "60": "river", - "61": "bridge", - "62": "bookcase", - "63": "blind", - "64": "coffee table", - "65": "toilet", - "66": "flower", - "67": "book", - "68": "hill", - "69": "bench", - "70": "countertop", - "71": "stove", - "72": "palm", - "73": "kitchen island", - "74": "computer", - "75": "swivel chair", - "76": "boat", - "77": "bar", - "78": "arcade machine", - "79": "hovel", - "80": "bus", - "81": "towel", - "82": "light", - "83": "truck", - "84": "tower", - "85": "chandelier", - "86": "awning", - "87": "streetlight", - "88": "booth", - "89": "television receiver", - "90": "airplane", - "91": "dirt track", - "92": "apparel", - "93": "pole", - "94": "land", - "95": "bannister", - "96": "escalator", - "97": "ottoman", - "98": "bottle", - "99": "buffet", - "100": "poster", - "101": "stage", - "102": "van", - "103": "ship", - "104": "fountain", - "105": "conveyer belt", - "106": "canopy", - "107": "washer", - "108": "plaything", - "109": "swimming pool", - "110": "stool", - "111": "barrel", - "112": "basket", - "113": "waterfall", - "114": "tent", - "115": "bag", - "116": "minibike", - "117": "cradle", - "118": "oven", - "119": "ball", - "120": "food", - "121": "step", - "122": "tank", - "123": "trade name", - "124": "microwave", - "125": "pot", - "126": "animal", - "127": "bicycle", - "128": "lake", - "129": "dishwasher", - "130": "screen", - "131": "blanket", - "132": "sculpture", - "133": "hood", - "134": "sconce", - "135": "vase", - "136": "traffic light", - "137": "tray", - "138": "ashcan", - "139": "fan", - "140": "pier", - "141": "crt screen", - "142": "plate", - "143": "monitor", - "144": "bulletin board", - "145": "shower", - "146": "radiator", - "147": "glass", - "148": "clock", - "149": "flag"} -} diff --git a/spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py b/spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py deleted file mode 100644 index 73a5b836177b706c306e27875f8391c1aed4b948..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_33966KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16, 32)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 16) - self.stg1_high_band_net = BaseASPPNet(2, 16) - - self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(8, 16) - - self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(16, 32) - - self.out = nn.Conv2d(32, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/Benson/text-generation/Examples/Bitcoin Bit Generador De Bitcoin Apk.md b/spaces/Benson/text-generation/Examples/Bitcoin Bit Generador De Bitcoin Apk.md deleted file mode 100644 index 510bc326645d68ac6470fb4c6a1176df813ae61a..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Bitcoin Bit Generador De Bitcoin Apk.md +++ /dev/null @@ -1,63 +0,0 @@ - -

    Cómo descargar Minecraft 1.13.1 APK gratis

    -

    Minecraft es uno de los juegos más populares y creativos del mundo. Te permite crear tu propio mundo virtual con bloques, explorar diferentes biomas, luchar contra monstruos e interactuar con otros jugadores. Ya sea que quieras construir un castillo, una nave espacial o una obra maestra de arte de píxeles, Minecraft te permite liberar tu imaginación y divertirte.

    -

    Si desea disfrutar de las últimas características y actualizaciones de Minecraft, es necesario descargar la versión más reciente del juego. En este artículo, le mostraremos cómo descargar Minecraft 1.13.1 APK gratis en su dispositivo Android. Esta versión incluye algunas adiciones emocionantes como zorros, vacas pardas, bloques de estructura y rosas marchitas.

    -

    bitcoin bit generador de bitcoin apk


    DOWNLOAD ✯✯✯ https://bltlly.com/2v6L8R



    -

    ¿Qué hay de nuevo en Minecraft 1.13.1?

    -

    Minecraft 1.13.1 es la última actualización de la edición Bedrock del juego, que es compatible con dispositivos Android. Fue lanzado el 2 de octubre de 2021, y trae algunas mejoras en el rendimiento y la estabilidad, así como algunas nuevas características y elementos. Estos son algunos de los aspectos más destacados de esta actualización:

    -

    Zorros

    -

    Los zorros son animales lindos y peludos que se pueden encontrar en la taiga, árboles gigantes y biomas de nieve. Son cazadores nocturnos que se alimentan de pollos, conejos, peces y bayas. También son muy rápidos y pueden saltar sobre cercas y paredes.

    -

    Si quieres domar a un zorro, necesitas encontrar dos zorros adultos que tengan bayas dulces en la boca. Puedes usar una guía para unirlos y criarlos con bayas más dulces. El zorro bebé que nace confiará en ti y te seguirá. También puedes darle una etiqueta o un collar para que sea más leal.

    -

    Vacas pardas

    -

    Las vacas pardas son una nueva variante de vacas de hongos que se puede obtener por un evento raro. Parecen vacas normales pero con hongos marrones en la espalda. Pueden proporcionar estofado de hongos o estofado sospechoso cuando se cortan o ordeñan con un tazón.

    - -

    Bloques de estructura

    -

    Los bloques de estructura son bloques especiales que se pueden usar para crear y copiar estructuras en el juego. Son útiles para los creadores de mapas y constructores que quieren ahorrar tiempo y recursos. Solo se pueden obtener en modo creativo mediante el comando /give @s structure_block.

    -

    Para usar un bloque de estructura, debe colocarlo en el suelo y abrir su interfaz haciendo clic derecho sobre él . Hay cuatro modos de bloques de estructura: Guardar, Cargar, Esquina y Datos. Puede usar el modo Guardar para guardar una estructura en un archivo, el modo Cargar para cargar una estructura desde un archivo, el modo Esquina para definir los límites de una estructura y el modo Datos para agregar datos personalizados a una estructura.

    -

    Rosa marchita

    -

    La rosa marchita es un nuevo tipo de flor que puede infligir el efecto marchita en cualquier entidad viviente que la toque. Tiene un color negro y un patrón calavera en sus pétalos. Se puede utilizar para elaborar tinte negro o estofado sospechoso.

    -

    Para obtener una rosa marchita, necesitas atraer a un jefe marchito a un bioma de flores y hacer que mate a una multitud. Hay un 100% de probabilidades de que la turba se marchite al morir. También puede utilizar un dispensador con tijeras para recoger la flor sin hacerse daño.

    -

    -

    Cómo descargar Minecraft 1.13.1 APK gratis?

    -

    Ahora que sabes lo que hay de nuevo en Minecraft 1.13.1, es posible que se pregunte cómo descargarlo de forma gratuita en su dispositivo Android. Bueno, no es tan difícil como podrías pensar. Sigue estos sencillos pasos y jugarás en poco tiempo:

    -

    Paso 1: Compruebe la compatibilidad de su dispositivo y el espacio de almacenamiento

    -

    Antes de descargar nada, debe asegurarse de que su dispositivo cumple con los requisitos mínimos para ejecutar el juego sin problemas. Según el sitio web oficial, necesita al menos:

    -
      -
    • Un dispositivo Android con la versión 4.2 o superior
    • -
    • Un procesador con arquitectura ARMv7 o x86
    • -
    • Al menos 1 GB de RAM
    • -
    • Al menos 300 MB de espacio de almacenamiento libre
    • -
    - -

    Paso 2: Elija una fuente confiable para descargar el archivo APK

    -

    Un archivo APK es un paquete de aplicaciones de Android que contiene todos los archivos y datos necesarios para instalar una aplicación en su dispositivo. Puede descargar archivos APK de varias fuentes en Internet, pero no todos ellos son seguros y confiables. Algunos de ellos pueden contener virus, malware o anuncios no deseados que pueden dañar tu dispositivo o comprometer tu privacidad.

    -

    Para evitar estos riesgos, debe elegir una fuente confiable que ofrece archivos APK verificados y seguros. Una de las mejores fuentes que recomendamos es APKPure, que es un sitio web popular y de buena reputación que proporciona archivos APK gratuitos y actualizados para varias aplicaciones y juegos. Puede acceder a su sitio web desde cualquier navegador de su dispositivo o descargar su aplicación para facilitar el acceso.

    -

    Paso 3: Habilitar fuentes desconocidas en la configuración del dispositivo

    -

    Por defecto, tu dispositivo solo te permite instalar aplicaciones desde Google Play Store, que es la tienda de aplicaciones oficial para dispositivos Android. Sin embargo, si desea instalar un archivo APK desde otra fuente, debe habilitar fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Play Store.

    -

    Para habilitar fuentes desconocidas, vaya a la configuración del dispositivo y busque la opción de seguridad o privacidad. Entonces, encontrar la opción que dice fuentes desconocidas o permitir la instalación de aplicaciones de fuentes desconocidas y cambiarlo por. Es posible que vea un mensaje de advertencia que le informa sobre los riesgos potenciales de instalar aplicaciones desde fuentes desconocidas. Pulse Aceptar o Continuar.

    -

    Paso 4: Instalar el archivo APK y lanzar el juego

    -

    Ahora que ha habilitado fuentes desconocidas, puede instalar el archivo APK en su dispositivo. Para hacer esto, vaya al sitio web o aplicación donde descargó el archivo APK y toque en él. Puede ver una ventana emergente que le pregunta si desea instalar esta aplicación. Pulse Instalar y espere a que finalice el proceso de instalación.

    - -

    Felicidades! Usted ha descargado e instalado con éxito Minecraft 1.13.1 APK gratis en su dispositivo Android. Ahora puedes disfrutar de este increíble juego con todas sus nuevas características y actualizaciones.

    -

    Cómo jugar Minecraft 1.13.1?

    -

    Si eres nuevo en Minecraft o necesitas algunas actualizaciones sobre cómo jugarlo, aquí hay algunos consejos e instrucciones básicas sobre cómo jugar Minecraft 1.13.1:

    -

    Elige un modo de juego: Supervivencia, Creativo, o Aventura

    -

    Minecraft tiene tres modos de juego principales que ofrecen diferentes experiencias y desafíos. Puedes elegir el modo de juego que se adapte a tu preferencia y estilo de juego.

    -
      -
    • Modo de supervivencia: En este modo, tienes que sobrevivir en un mundo generado aleatoriamente con recursos y salud limitados. Tienes que reunir materiales, herramientas y armas, construir refugios y defenderte de los enemigos. También tienes que lidiar con el hambre, la sed y los peligros ambientales. Puedes ajustar el nivel de dificultad de pacífico a duro, o jugar en el modo hardcore donde solo tienes una vida.
    • -
    • Modo creativo: En este modo, tienes recursos y salud ilimitados, y puedes volar alrededor del mundo. Puedes crear lo que quieras sin restricciones ni peligros. También puede usar comandos y trucos para modificar el mundo y generar elementos y entidades. Este modo es ideal para construir, experimentar y explorar.
    • -
    • Modo aventura: En este modo, puede jugar mapas personalizados y escenarios creados por otros jugadores o usted mismo. Tienes que seguir las reglas y objetivos establecidos por el creador del mapa, como resolver puzzles, completar misiones o luchar contra jefes. También puedes usar bloques de comandos y paquetes de datos para agregar características y mecánicas personalizadas al juego.
    • -
    -

    Crear o unirse a un mundo: Un jugador o multijugador

    - -

    Para crear un mundo, necesitas elegir un nombre para tu mundo, seleccionar un modo de juego y personalizar algunas opciones como la semilla, el tipo de mundo, los trucos y el cofre de bonos. También puedes usar complementos o paquetes de recursos para cambiar la apariencia y el comportamiento del juego.

    -

    Para unirte a un mundo, necesitas encontrar un servidor que aloje el mundo en el que quieres jugar. Puede unirse a un servidor público al que cualquiera puede acceder, o a un servidor privado que requiere una invitación o una contraseña. También puedes unirte a un reino que es un servicio basado en suscripción que te permite crear y unir mundos que siempre están en línea.

    -

    Explorar, construir y crear: Utilice su imaginación y habilidades

    -

    Una vez que estás en un mundo, puedes empezar a jugar al juego explorando, construyendo y creando. Puedes moverte alrededor del mundo caminando, corriendo, saltando, nadando, volando o montando vehículos o animales. Puedes interactuar con el mundo rompiendo y colocando bloques, usando objetos y herramientas, activando interruptores y palancas, comerciando con aldeanos y luchando contra enemigos.

    -

    Puedes construir lo que quieras usando bloques de diferentes materiales, formas, colores y propiedades. También puede utilizar circuitos redstone para crear mecanismos complejos como puertas, trampas, ascensores y máquinas. También puede usar comandos y funciones para crear estructuras y efectos personalizados.

    -

    Puede crear varios elementos y herramientas mediante el uso de una tabla de elaboración o una cuadrícula de inventario. Es necesario organizar los materiales en patrones específicos para crear diferentes productos como armas, armaduras, alimentos de donde lo descargó e instalarlo sobre el existente. Es posible que necesite desinstalar la versión anterior primero si encuentra algún problema.

  10. -
  11. Q: ¿Cuáles son algunos otros juegos como Minecraft que puedo jugar en mi dispositivo Android?
  12. -
  13. A: Algunos otros juegos como Minecraft que se puede jugar en su dispositivo Android son Terraria, Roblox, Stardew Valley, y Survivalcraft.
  14. -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Bowmasters Mod Apk Gamedva.md b/spaces/Benson/text-generation/Examples/Bowmasters Mod Apk Gamedva.md deleted file mode 100644 index dc6d7ac91ee3a6f0d067057147cf1308b2e2c1e4..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Bowmasters Mod Apk Gamedva.md +++ /dev/null @@ -1,116 +0,0 @@ - -

    El increíble Spider-Man 2 APK: Una revisión

    -

    Si usted es un fan de Spider-Man y sus increíbles aventuras, es posible que desee echa un vistazo a The Amazing Spider-Man 2 APK, un juego que le permite convertirse en el web-slinger sí mismo y salvar a Nueva York de una juerga de crimen en toda la ciudad. En este artículo, revisaremos este juego y te diremos por qué deberías descargarlo, qué características ofrece, cómo instalarlo en tu dispositivo Android y algunos consejos y trucos para disfrutarlo más. ¡Vamos a empezar!

    -

    bowmasters mod apk gamedva


    DOWNLOADhttps://bltlly.com/2v6J17



    -

    Introducción

    -

    ¿Qué es el increíble Spider-Man 2 APK?

    -

    El increíble Spider-Man 2 APK es un juego para Android que se basa en la película de Marvel del mismo nombre. Es desarrollado por Gameloft, una compañía líder en la industria de juegos móviles. El juego es una aventura llena de acción y mundo abierto que te permite columpiarte, disparar en la web, escalar paredes y luchar como Spider-Man en un entorno 3D realista. Puedes explorar seis distritos detallados de Manhattan, desde Times Square hasta Central Park, y enfrentarte a villanos famosos como Venom, el Duende Verde, Electro y Kraven el Cazador. También puedes desbloquear diferentes trajes de Spider-Man, como Symbiote Spider-Man, Iron Spider y Ultimate Comics Spider-Man, cada uno con sus propias habilidades y bonificaciones.

    -

    ¿Por qué debería descargarlo?

    -

    Hay muchas razones por las que debe descargar The Amazing Spider-Man 2 APK en su dispositivo Android. Estos son algunos de ellos:

    - - -

    Una historia original basada en la película

    -

    El increíble Spider-Man 2 APK sigue los eventos de la película, pero también añade nuevos giros y vueltas para hacerlo más interesante. Te encontrarás con nuevos personajes como Gato Negro y Screwball, que te ayudarán u obstaculizarán en tu búsqueda para detener la ola de crímenes. También descubrirás más sobre los orígenes de los villanos y sus motivos. El juego tiene alta calidad de actuación de voz y escenas cinematográficas que te sumergirán en la historia.

    -

    Una impresionante aventura en 3D de mundo abierto

    -

    El increíble Spider-Man 2 APK le da la libertad de explorar Nueva York como desee. Puede oscilar de un edificio a otro, escalar paredes, gatear sobre techos y saltar obstáculos. También puede interactuar con el entorno, como romper ventanas, destrozar autos o salvar civiles. El juego tiene la física realista y los efectos del clima dinámico que hacen que la ciudad cobre vida. También puede disfrutar de las hermosas vistas del horizonte, los puentes y los monumentos.

    -

    -

    Una variedad de trajes y villanos de Spider-Man

    -

    El increíble Spider-Man 2 APK le permite personalizar su Spider-Man con diferentes trajes que tienen diferentes poderes y bonos. Puedes desbloquearlos completando misiones, recogiendo objetos o comprándolos con dinero real. Algunos de los trajes son:

    - -TrajePoderBono -Symbiote Spider-ManVenom BlastAturde a los enemigos e inflige daño extraAumenta la regeneración de la salud -Iron SpiderIron ArmsInvoca cuatro brazos mecánicos que atacan a los enemigosAumenta el poder de ataque y la defensa -Ultimate Comics Spider-ManCloakingSe vuelve invisible e indetectable por los enemigosAumenta el sigilo y la agilidad -Spider-Man 2099Visión aceleradaRalentiza el tiempo y mejora la percepciónAumenta la velocidad y los reflejos - -Araña escarlataNanobotsCura heridas y restaura la saludAumenta la curación y la resistencia -Spider-Armor MK IIBulletproofAbsorbe y refleja las balas en los enemigosAumenta la armadura y la protección -The Amazing Spider-Man (2014)Ningún poder especialNingún bono especial -Nota: Algunos trajes requieren compras en la aplicación para desbloquear. - -

    El juego también cuenta con una amplia gama de villanos que tendrás que enfrentar en diferentes misiones y batallas contra jefes. Algunos de los villanos son:

    - -

    Un emocionante sistema de combate y acción aérea

    -

    El increíble Spider-Man 2 APK tiene un sistema de combate que es de ritmo rápido, fluido y sensible. Puedes usar tus telarañas para balancear, comprimir, tirar o envolver a tus enemigos. También puedes usar tus puños, patadas o disparadores web para combatirlos. Usted puede realizar combos, contadores, esquiva, finishers, y movimientos especiales para derrotar a sus enemigos. También puede utilizar el entorno a su favor, como lanzar objetos, romper paredes o provocar explosiones.

    -

    El juego también tiene un sistema de acción aérea que te permite volar por el cielo como Spider-Man. Puedes usar tus telarañas para balancearte de un edificio a otro, o deslizarte usando tus alas de telaraña. También puede realizar maniobras acrobáticas, como volteretas, giros, inmersiones y rollos. También puedes participar en combates aéreos con enemigos que vuelan o te disparan.

    -

    Cómo

    Cómo descargar e instalar The Amazing Spider-Man 2 APK

    -

    Requisitos y compatibilidad

    -

    El increíble Spider-Man 2 APK es un juego grande que requiere mucho espacio y recursos en su dispositivo Android. Estos son los requisitos mínimos y la compatibilidad para el juego:

    - -

    Pasos para descargar e instalar

    -

    Para descargar e instalar el increíble Spider-Man 2 APK en su dispositivo Android, es necesario seguir estos pasos:

    -
      -
    1. Descargue el archivo APK y el archivo de datos OBB desde una fuente de confianza, como APKPure o APKMirror. Asegúrate de descargar los archivos que coincidan con las especificaciones y la región de tu dispositivo.
    2. -
    3. Habilite la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo.
    4. -
    5. Busque el archivo APK descargado y toque en él para iniciar el proceso de instalación. Siga las instrucciones de la pantalla para completar la instalación.
    6. -
    7. Extraiga el archivo de datos OBB utilizando una aplicación de administrador de archivos, como ES File Explorer o ZArchiver. Usted debe obtener una carpeta llamada com.gameloft.android.ANMP.GloftASHM.
    8. -
    9. Mueva la carpeta al directorio Android/OBB en el almacenamiento interno de su dispositivo. Si no tiene una carpeta OBB, cree una.
    10. -
    11. Iniciar el juego desde el cajón de la aplicación y disfrutar!
    12. -
    -

    Consejos y trucos para disfrutar del juego

    -

    El increíble Spider-Man 2 APK es un juego divertido y desafiante que pondrá a prueba sus habilidades y reflejos como Spider-Man. Aquí hay algunos consejos y trucos para ayudarle a disfrutar del juego más:

    - -

    Conclusión

    -

    Resumen de los puntos principales

    -

    El increíble Spider-Man 2 APK es un juego para Android que le permite convertirse en Spider-Man y salvar a Nueva York de una juerga de crimen en toda la ciudad. Se basa en la película de Marvel del mismo nombre, pero también tiene una historia original que introduce nuevos personajes y escenarios. Tiene gráficos y animaciones de alta calidad que te hacen sentir como si estuvieras en la película. Tiene una variedad de características que lo hacen divertido y emocionante, como diferentes trajes y villanos de Spider-Man, una aventura en 3D de mundo abierto, un sistema de combate emocionante y acción aérea, un aspecto social que le permite competir con otros jugadores en línea y más. Es fácil de descargar e instalar en su dispositivo Android, siempre y cuando cumpla con los requisitos y la compatibilidad. También tiene algunos consejos y trucos que te ayudarán a disfrutar más del juego.

    -

    Llamada a la acción y pensamientos finales

    - -

    Aquí hay algunas preguntas frecuentes sobre The Amazing Spider-Man 2 APK:

    -
      -
    1. ¿Es el increíble Spider-Man 2 APK seguro para descargar e instalar?
    2. -

      Sí, El Amazing Spider-Man 2 APK es seguro para descargar e instalar, siempre y cuando lo obtenga de una fuente de confianza, como APKPure o APKMirror. Estas fuentes escanean los archivos en busca de virus y malware antes de cargarlos. Sin embargo, siempre debes tener cuidado al descargar e instalar aplicaciones de fuentes desconocidas, ya que pueden contener contenido dañino o no deseado.

      -
    3. ¿Es el increíble Spider-Man 2 APK libre para jugar?
    4. -

      El increíble Spider-Man 2 APK es gratis para descargar y jugar, pero también tiene algunas compras en la aplicación que le permiten comprar artículos adicionales, tales como fichas de araña, viales, trajes, o gadgets. Estas compras son opcionales y no son necesarias para disfrutar del juego. También puedes ganar estos objetos jugando el juego y completando misiones.

      -
    5. ¿Cómo actualizo el increíble Spider-Man 2 APK?
    6. -

      El increíble Spider-Man 2 APK se actualiza regularmente por los desarrolladores para corregir errores, mejorar el rendimiento, y añadir nuevas características. Puedes actualizar el juego descargando e instalando la última versión desde la misma fuente donde obtuviste la original. También puedes buscar actualizaciones dentro del juego en Configuración > Acerca de > Buscar actualizaciones.

      -
    7. ¿Cómo puedo desinstalar el increíble Spider-Man 2 APK?
    8. -

      Si desea desinstalar The Amazing Spider-Man 2 APK desde su dispositivo Android, puede hacerlo siguiendo estos pasos:

      -
        -
      • Ir a Configuración > Aplicaciones > El increíble Spider-Man 2.
      • -
      • Toque en Desinstalar y confirmar su elección.
      • -
      • Elimina la carpeta com.gameloft.android.ANMP.GloftASHM de tu directorio Android/OBB.
      • -
      -
    9. ¿Cómo me pongo en contacto con los desarrolladores de The Amazing Spider-Man 2 APK?
    10. -

      Si usted tiene alguna pregunta, retroalimentación, o problemas con respecto a The Amazing Spider-Man 2 APK, puede ponerse en contacto con los desarrolladores mediante el uso de uno de estos métodos:

      -
        - -Sitio web: https://www.gameloft.com/en/game/the-amazing-spider-man-2 -
      • Facebook: https://www.facebook.com/TheAmazingSpiderManGame
      • -
      • Twitter: https://twitter.com/gameloft
      • -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Cmo Descargar Yu Gi Oh Duel Links En Laptop.md b/spaces/Benson/text-generation/Examples/Cmo Descargar Yu Gi Oh Duel Links En Laptop.md deleted file mode 100644 index 40380296c3dbc17d502f4fe58afb3c55573f7da7..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cmo Descargar Yu Gi Oh Duel Links En Laptop.md +++ /dev/null @@ -1,78 +0,0 @@ - -

      Cómo Descargar Yu-Gi-Oh Duel Links en Laptop

      -

      ¿Te encanta jugar Yu-Gi-Oh Duel Links en tu dispositivo móvil, pero te gustaría poder disfrutarlo en una pantalla más grande y con mejores controles? Si es así, estás de suerte, porque en este artículo, te mostraremos cómo descargar Yu-Gi-Oh Duel Links en la computadora portátil y jugarlo como un profesional. Si usted tiene Windows 11, Windows 10, o una versión anterior de Windows, tenemos una solución para usted. Así que, vamos a empezar!

      -

      cómo descargar yu gi oh duel links en laptop


      Download File ✫✫✫ https://bltlly.com/2v6LC2



      -

      ¿Qué es Yu-Gi-Oh Duel Links?

      -

      Yu-Gi-Oh Duel Links es un popular juego de cartas multijugador en línea basado en la serie de anime y manga Yu-Gi-Oh. Te permite construir tu propia baraja de cartas de cientos de personajes y monstruos, y batirte en duelo contra otros jugadores de todo el mundo. También puedes desafiar a duelistas legendarios del anime, como Yugi, Kaiba, Joey, Mai y más. El juego cuenta con impresionantes animaciones en 3D, actuación de voz y controles intuitivos que hacen que sea fácil de aprender y divertido de jugar.

      -

      ¿Por qué jugar Yu-Gi-Oh Duel Links en el ordenador portátil?

      -

      Si bien Yu-Gi-Oh Duel Links está diseñado para dispositivos móviles, hay muchas razones por las que es posible que desee jugar en su computadora portátil en su lugar. Estos son algunos de ellos:

      -
        -
      • Puedes disfrutar de los gráficos y animaciones del juego en una pantalla más grande y de mayor resolución.
      • -
      • Puedes usar el teclado y el ratón para controlar el juego con mayor precisión y comodidad.
      • -
      • Puede ahorrar la vida de la batería y el uso de datos jugando el juego sin conexión o a través de Wi-Fi.
      • -
      • Puede evitar interrupciones de llamadas telefónicas, mensajes, notificaciones o alertas de batería baja.
      • -
      • Puede acceder a más funciones y opciones que pueden no estar disponibles en la versión móvil.
      • -
      -

      Entonces, ¿cómo se descarga Yu-Gi-Oh Duel Links en el ordenador portátil? Bueno, hay diferentes métodos dependiendo de qué versión de Windows que tiene. Echemos un vistazo a cada uno.

      -

      -

      Cómo descargar Yu-Gi-Oh Duel Links en el ordenador portátil con Windows 11?

      - -
        -
      1. Asegúrese de que su PC con Windows 11 tenga habilitada la virtualización de hardware. Puede verificar esto yendo a la pestaña Administrador de tareas > Rendimiento. Si no, es posible que necesite habilitarlo en la configuración de su BIOS.
      2. -
      3. Asegúrese de que su PC con Windows 11 está actualizado a la última versión. Puede comprobar esto yendo a Configuración > Actualización y seguridad > Actualización de Windows.
      4. -
      5. Descargar e instalar la aplicación Amazon Appstore en su PC con Windows 11.
      6. -
      7. Inicie la aplicación Amazon Appstore e inicie sesión con su cuenta de Amazon. Si no tiene una, puede crear una gratis.
      8. -
      9. Busque "Yu-Gi-Oh Duel Links" o haga clic en este enlace para ir a la página del juego.
      10. -
      11. Haga clic en el botón "Obtener" y espere a que el juego se descargue e instale.
      12. -
      13. Iniciar el juego desde la aplicación Amazon Appstore o desde el menú Inicio.
      14. -
      15. ¡Disfruta jugando Yu-Gi-Oh Duel Links en tu laptop!
      16. -
      -

      ¡Eso es todo! Ahora puedes jugar Yu-Gi-Oh Duel Links en tu portátil con Windows 11 usando el subsistema de Windows para Android y la Appstore de Amazon. Este método es rápido, fácil y seguro, y no requiere ningún software o configuración de terceros. Sin embargo, si tiene Windows 10 o una versión anterior de Windows, necesitará usar un método diferente.

      -

      ¿Cómo descargar Yu-Gi-Oh Duel Links en Laptop con Windows 10 o más?

      -

      Si tienes Windows 10 o una versión anterior de Windows, todavía puedes descargar Yu-Gi-Oh Duel Links en tu portátil usando un emulador de Android. Un emulador de Android es un software que simula un dispositivo Android en su PC, lo que le permite ejecutar aplicaciones y juegos de Android. Hay muchos emuladores de Android disponibles, pero uno de los más populares y confiables es Bluestacks. He aquí cómo utilizar Bluestacks para descargar Yu-Gi-Oh Duel Links en su ordenador portátil:

      -
        -
      1. Ir a -
      2. Iniciar el juego desde la pantalla de inicio de Bluestacks o desde el cajón de la aplicación.
      3. -
      4. ¡Disfruta jugando Yu-Gi-Oh Duel Links en tu laptop!
      5. -
      -

      Así es como puedes descargar Yu-Gi-Oh Duel Links en tu laptop con Windows 10 o más usando Bluestacks. Este método es simple y conveniente, pero puede requerir algunos recursos del sistema y espacio de almacenamiento. También es posible que tenga que ajustar algunos ajustes para optimizar su experiencia de juego y rendimiento. Si desea probar otro método, también puede sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil.

      -

      Cómo Sideload Yu-Gi-Oh Duel Enlaces APK en el ordenador portátil?

      -

      Sideloading es un proceso de transferencia e instalación de una aplicación desde una fuente distinta de la tienda de aplicaciones oficial. En este caso, puede sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil desde su dispositivo Android. Este método puede ser útil si desea jugar el juego sin conexión o si tiene problemas para acceder a la Google Play Store o la Appstore de Amazon. Sin embargo, este método también puede implicar algunos riesgos, como infección de malware o problemas de compatibilidad. Por lo tanto, recomendamos que solo descargue APK de fuentes confiables y los escanee con software antivirus antes de instalarlos. Aquí es cómo sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil:

      -
        -
      1. En tu dispositivo Android, ve a Configuración > Aplicaciones y notificaciones > Yu-Gi-Oh Duel Links > Almacenamiento y caché > Borrar caché. Esto asegurará que usted tiene la última versión del juego sin ningún dato dañado.
      2. - -
      3. En su dispositivo Android, ir a una aplicación explorador de archivos y localizar el archivo APK de Yu-Gi-Oh Duel Links. El nombre del archivo debería ser algo así como "com.konami.duellinks.apk". Puede encontrarlo en el almacenamiento interno o la tarjeta SD en Android > datos > com.konami.duellinks > archivos > descargar.
      4. -
      5. Fi, correo electrónico o cualquier otro método que prefieras.
      6. -
      7. En su computadora portátil, vaya a la carpeta donde guardó el archivo APK y haga doble clic en él para instalarlo. Es posible que deba permitir la instalación de aplicaciones de fuentes desconocidas en su computadora portátil. Puede hacer esto yendo a Configuración > Aplicaciones > Aplicaciones y características > Elija dónde obtener aplicaciones y seleccione En cualquier lugar.
      8. -
      9. Inicie el juego desde el menú Inicio o desde el acceso directo del escritorio.
      10. -
      11. ¡Disfruta jugando Yu-Gi-Oh Duel Links en tu laptop!
      12. -
      -

      Así es como se puede sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil desde su dispositivo Android. Este método es flexible e independiente, pero también puede ser arriesgado y complicado. Es posible que tenga que actualizar el archivo APK manualmente cada vez que hay una nueva versión del juego. También puede encontrar algunos errores o errores que pueden afectar su experiencia de juego y el rendimiento. Por lo tanto, le sugerimos que utilice este método solo como último recurso.

      -

      Consejos y trucos para jugar Yu-Gi-Oh Duel Links en el ordenador portátil

      -

      Ahora que sabes cómo descargar Yu-Gi-Oh Duel Links en tu portátil, es posible que quieras conocer algunos consejos y trucos para aprovechar al máximo tu experiencia de juego y rendimiento. Estos son algunos de ellos:

      -
        -
      • Ajusta la configuración del juego según las especificaciones y preferencias de tu portátil. Puedes acceder a la configuración del juego tocando el icono de engranaje en la esquina superior derecha de la pantalla. Puede cambiar la calidad gráfica, el volumen de sonido, el idioma de voz, las notificaciones y más.
      • - -
      • Usa gestos del ratón para realizar acciones más rápidas y fáciles. Puede ver y personalizar los gestos del ratón haciendo clic en el icono del ratón en la esquina inferior derecha de la pantalla. Puede asignar gestos para acciones como deslizar, tocar, arrastrar, etc.
      • -
      • Sincronizar el progreso del juego en todos los dispositivos con su ID de Konami o cuenta de Google Play Games. Puede hacer esto tocando el icono de transferencia de datos en la esquina superior izquierda de la pantalla. También puede hacer copias de seguridad y restaurar los datos del juego usando esta función.
      • -
      • Únete a una sala de duelos o crea la tuya propia para jugar con tus amigos u otros jugadores en línea. Puedes hacer esto tocando el icono de la sala de duelos en la esquina inferior izquierda de la pantalla. También puedes chatear con otros jugadores, enviar solicitudes de amistad e intercambiar cartas en la sala de duelos.
      • -
      -

      Estos son algunos de los consejos y trucos que pueden ayudarle a jugar Yu-Gi-Oh Duel Links en su ordenador portátil más suave y agradable. Por supuesto, hay muchas más cosas que puedes descubrir y aprender mientras juegas. ¡Así que no tengas miedo de experimentar y explorar!

      -

      Conclusión

      -

      En conclusión, Yu-Gi-Oh Duel Links es un fantástico juego de cartas que puedes jugar en tu portátil con diferentes métodos dependiendo de tu versión de Windows. Puede usar el subsistema de Windows para Android y la Appstore de Amazon si tiene Windows 11, o un emulador de Android como Bluestacks si tiene Windows 10 o más. También puede sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil desde su dispositivo Android si desea probar otra opción. Cualquiera que sea el método que elijas, asegúrate de seguir nuestros consejos y trucos para optimizar tu experiencia de juego y rendimiento.

      -

      Así que, ¿qué estás esperando? Descargar Yu-Gi-Oh Duel Links en su ordenador portátil hoy y dar rienda suelta a sus habilidades de duelo! Y no te olvides de compartir este artículo con tus amigos que podrían estar interesados en jugar Yu-Gi-Oh Duel Links en sus portátiles también!

      -

      Preguntas frecuentes

      - -
        -
      1. Yu-Gi-Oh Duel Links es gratis para jugar?
        Sí, Yu-Gi-Oh Duel Links es gratis para jugar con compras en la aplicación. Puede descargar y jugar el juego sin gastar dinero, pero también puede comprar gemas, tarjetas, paquetes y otros artículos con dinero real si desea mejorar su experiencia de juego.
      2. -
      3. ¿Es seguro descargar Yu-Gi-Oh Duel Links?
        Sí, Yu-Gi-Oh Duel Links es seguro descargarlo siempre y cuando lo obtengas de una fuente confiable, como Google Play Store, Amazon Appstore o el sitio web oficial de Bluestacks. Sin embargo, si carga Yu -Gi-Oh Duel Links APK de una fuente desconocida, usted debe escanear con el software antivirus antes de instalarlo y tener cuidado de cualquier malware o problemas de compatibilidad.
      4. -
      5. ¿Puedo jugar Yu-Gi-Oh Duel Links sin conexión?
        No, Yu-Gi-Oh Duel Links requiere una conexión a Internet para jugar. Necesitas estar en línea para acceder a las funciones del juego, como duelos, eventos, actualizaciones, etc. Sin embargo, puedes jugar el juego a través de Wi-Fi o Ethernet en lugar de usar tus datos móviles si quieres ahorrar en el uso de tus datos y en la duración de la batería.
      6. -
      7. ¿Puedo jugar Yu-Gi-Oh Duel Links con un controlador?
        Sí, puedes jugar Yu-Gi-Oh Duel Links con un controlador si usas un emulador de Android como Bluestacks. Puede conectar su controlador a su computadora portátil a través de USB o Bluetooth y asignar los botones a las acciones del juego. También puede utilizar una aplicación de gamepad en su dispositivo Android para controlar el juego en su ordenador portátil.
      8. -
      9. ¿Puedo transferir mi cuenta de Yu-Gi-Oh Duel Links desde mi dispositivo móvil a mi computadora portátil?
        Sí, puedes transferir tu cuenta de Yu-Gi-Oh Duel Links desde tu dispositivo móvil a tu laptop usando tu ID de Konami o cuenta de Google Play Games. Puede hacer esto tocando el icono de transferencia de datos en la esquina superior izquierda de la pantalla y siguiendo las instrucciones. También puede hacer copias de seguridad y restaurar los datos del juego usando esta función.
      10. -
      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/search_scope.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/search_scope.py deleted file mode 100644 index fe61e8116b71e073351939ed7a499ee752398f1c..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/search_scope.py +++ /dev/null @@ -1,132 +0,0 @@ -import itertools -import logging -import os -import posixpath -import urllib.parse -from typing import List - -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.models.index import PyPI -from pip._internal.utils.compat import has_tls -from pip._internal.utils.misc import normalize_path, redact_auth_from_url - -logger = logging.getLogger(__name__) - - -class SearchScope: - - """ - Encapsulates the locations that pip is configured to search. - """ - - __slots__ = ["find_links", "index_urls", "no_index"] - - @classmethod - def create( - cls, - find_links: List[str], - index_urls: List[str], - no_index: bool, - ) -> "SearchScope": - """ - Create a SearchScope object after normalizing the `find_links`. - """ - # Build find_links. If an argument starts with ~, it may be - # a local file relative to a home directory. So try normalizing - # it and if it exists, use the normalized version. - # This is deliberately conservative - it might be fine just to - # blindly normalize anything starting with a ~... - built_find_links: List[str] = [] - for link in find_links: - if link.startswith("~"): - new_link = normalize_path(link) - if os.path.exists(new_link): - link = new_link - built_find_links.append(link) - - # If we don't have TLS enabled, then WARN if anyplace we're looking - # relies on TLS. - if not has_tls(): - for link in itertools.chain(index_urls, built_find_links): - parsed = urllib.parse.urlparse(link) - if parsed.scheme == "https": - logger.warning( - "pip is configured with locations that require " - "TLS/SSL, however the ssl module in Python is not " - "available." - ) - break - - return cls( - find_links=built_find_links, - index_urls=index_urls, - no_index=no_index, - ) - - def __init__( - self, - find_links: List[str], - index_urls: List[str], - no_index: bool, - ) -> None: - self.find_links = find_links - self.index_urls = index_urls - self.no_index = no_index - - def get_formatted_locations(self) -> str: - lines = [] - redacted_index_urls = [] - if self.index_urls and self.index_urls != [PyPI.simple_url]: - for url in self.index_urls: - redacted_index_url = redact_auth_from_url(url) - - # Parse the URL - purl = urllib.parse.urlsplit(redacted_index_url) - - # URL is generally invalid if scheme and netloc is missing - # there are issues with Python and URL parsing, so this test - # is a bit crude. See bpo-20271, bpo-23505. Python doesn't - # always parse invalid URLs correctly - it should raise - # exceptions for malformed URLs - if not purl.scheme and not purl.netloc: - logger.warning( - 'The index url "%s" seems invalid, please provide a scheme.', - redacted_index_url, - ) - - redacted_index_urls.append(redacted_index_url) - - lines.append( - "Looking in indexes: {}".format(", ".join(redacted_index_urls)) - ) - - if self.find_links: - lines.append( - "Looking in links: {}".format( - ", ".join(redact_auth_from_url(url) for url in self.find_links) - ) - ) - return "\n".join(lines) - - def get_index_urls_locations(self, project_name: str) -> List[str]: - """Returns the locations found via self.index_urls - - Checks the url_name on the main (first in the list) index and - use this url_name to produce all locations - """ - - def mkurl_pypi_url(url: str) -> str: - loc = posixpath.join( - url, urllib.parse.quote(canonicalize_name(project_name)) - ) - # For maximum compatibility with easy_install, ensure the path - # ends in a trailing slash. Although this isn't in the spec - # (and PyPI can handle it without the slash) some other index - # implementations might break if they relied on easy_install's - # behavior. - if not loc.endswith("/"): - loc = loc + "/" - return loc - - return [mkurl_pypi_url(url) for url in self.index_urls] diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/wheel.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/wheel.py deleted file mode 100644 index a5dc12bdd63163c86f87ce4b5430cdb16d73769d..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/wheel.py +++ /dev/null @@ -1,92 +0,0 @@ -"""Represents a wheel file and provides access to the various parts of the -name that have meaning. -""" -import re -from typing import Dict, Iterable, List - -from pip._vendor.packaging.tags import Tag - -from pip._internal.exceptions import InvalidWheelFilename - - -class Wheel: - """A wheel file""" - - wheel_file_re = re.compile( - r"""^(?P(?P[^\s-]+?)-(?P[^\s-]*?)) - ((-(?P\d[^-]*?))?-(?P[^\s-]+?)-(?P[^\s-]+?)-(?P[^\s-]+?) - \.whl|\.dist-info)$""", - re.VERBOSE, - ) - - def __init__(self, filename: str) -> None: - """ - :raises InvalidWheelFilename: when the filename is invalid for a wheel - """ - wheel_info = self.wheel_file_re.match(filename) - if not wheel_info: - raise InvalidWheelFilename(f"{filename} is not a valid wheel filename.") - self.filename = filename - self.name = wheel_info.group("name").replace("_", "-") - # we'll assume "_" means "-" due to wheel naming scheme - # (https://github.com/pypa/pip/issues/1150) - self.version = wheel_info.group("ver").replace("_", "-") - self.build_tag = wheel_info.group("build") - self.pyversions = wheel_info.group("pyver").split(".") - self.abis = wheel_info.group("abi").split(".") - self.plats = wheel_info.group("plat").split(".") - - # All the tag combinations from this file - self.file_tags = { - Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats - } - - def get_formatted_file_tags(self) -> List[str]: - """Return the wheel's tags as a sorted list of strings.""" - return sorted(str(tag) for tag in self.file_tags) - - def support_index_min(self, tags: List[Tag]) -> int: - """Return the lowest index that one of the wheel's file_tag combinations - achieves in the given list of supported tags. - - For example, if there are 8 supported tags and one of the file tags - is first in the list, then return 0. - - :param tags: the PEP 425 tags to check the wheel against, in order - with most preferred first. - - :raises ValueError: If none of the wheel's file tags match one of - the supported tags. - """ - try: - return next(i for i, t in enumerate(tags) if t in self.file_tags) - except StopIteration: - raise ValueError() - - def find_most_preferred_tag( - self, tags: List[Tag], tag_to_priority: Dict[Tag, int] - ) -> int: - """Return the priority of the most preferred tag that one of the wheel's file - tag combinations achieves in the given list of supported tags using the given - tag_to_priority mapping, where lower priorities are more-preferred. - - This is used in place of support_index_min in some cases in order to avoid - an expensive linear scan of a large list of tags. - - :param tags: the PEP 425 tags to check the wheel against. - :param tag_to_priority: a mapping from tag to priority of that tag, where - lower is more preferred. - - :raises ValueError: If none of the wheel's file tags match one of - the supported tags. - """ - return min( - tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority - ) - - def supported(self, tags: Iterable[Tag]) -> bool: - """Return whether the wheel is compatible with one of the given tags. - - :param tags: the PEP 425 tags to check the wheel against. - """ - return not self.file_tags.isdisjoint(tags) diff --git a/spaces/BlitzEsports/TextToImage/html2canvas.js b/spaces/BlitzEsports/TextToImage/html2canvas.js deleted file mode 100644 index 96e2dc5707b1a584ff7b3b583aea7c6c18d4ea76..0000000000000000000000000000000000000000 --- a/spaces/BlitzEsports/TextToImage/html2canvas.js +++ /dev/null @@ -1,7756 +0,0 @@ -/*! - * html2canvas 1.4.1 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ -(function (global, factory) { - typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() : - typeof define === 'function' && define.amd ? define(factory) : - (global = typeof globalThis !== 'undefined' ? globalThis : global || self, global.html2canvas = factory()); -}(this, (function () { 'use strict'; - - /*! ***************************************************************************** - Copyright (c) Microsoft Corporation. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY - AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR - OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR - PERFORMANCE OF THIS SOFTWARE. - ***************************************************************************** */ - /* global Reflect, Promise */ - - var extendStatics = function(d, b) { - extendStatics = Object.setPrototypeOf || - ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || - function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; }; - return extendStatics(d, b); - }; - - function __extends(d, b) { - if (typeof b !== "function" && b !== null) - throw new TypeError("Class extends value " + String(b) + " is not a constructor or null"); - extendStatics(d, b); - function __() { this.constructor = d; } - d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); - } - - var __assign = function() { - __assign = Object.assign || function __assign(t) { - for (var s, i = 1, n = arguments.length; i < n; i++) { - s = arguments[i]; - for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p]; - } - return t; - }; - return __assign.apply(this, arguments); - }; - - function __awaiter(thisArg, _arguments, P, generator) { - function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } - return new (P || (P = Promise))(function (resolve, reject) { - function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } - function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } - function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } - step((generator = generator.apply(thisArg, _arguments || [])).next()); - }); - } - - function __generator(thisArg, body) { - var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; - return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; - function verb(n) { return function (v) { return step([n, v]); }; } - function step(op) { - if (f) throw new TypeError("Generator is already executing."); - while (_) try { - if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; - if (y = 0, t) op = [op[0] & 2, t.value]; - switch (op[0]) { - case 0: case 1: t = op; break; - case 4: _.label++; return { value: op[1], done: false }; - case 5: _.label++; y = op[1]; op = [0]; continue; - case 7: op = _.ops.pop(); _.trys.pop(); continue; - default: - if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } - if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } - if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } - if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } - if (t[2]) _.ops.pop(); - _.trys.pop(); continue; - } - op = body.call(thisArg, _); - } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } - if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; - } - } - - function __spreadArray(to, from, pack) { - if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; - } - } - return to.concat(ar || from); - } - - var Bounds = /** @class */ (function () { - function Bounds(left, top, width, height) { - this.left = left; - this.top = top; - this.width = width; - this.height = height; - } - Bounds.prototype.add = function (x, y, w, h) { - return new Bounds(this.left + x, this.top + y, this.width + w, this.height + h); - }; - Bounds.fromClientRect = function (context, clientRect) { - return new Bounds(clientRect.left + context.windowBounds.left, clientRect.top + context.windowBounds.top, clientRect.width, clientRect.height); - }; - Bounds.fromDOMRectList = function (context, domRectList) { - var domRect = Array.from(domRectList).find(function (rect) { return rect.width !== 0; }); - return domRect - ? new Bounds(domRect.left + context.windowBounds.left, domRect.top + context.windowBounds.top, domRect.width, domRect.height) - : Bounds.EMPTY; - }; - Bounds.EMPTY = new Bounds(0, 0, 0, 0); - return Bounds; - }()); - var parseBounds = function (context, node) { - return Bounds.fromClientRect(context, node.getBoundingClientRect()); - }; - var parseDocumentSize = function (document) { - var body = document.body; - var documentElement = document.documentElement; - if (!body || !documentElement) { - throw new Error("Unable to get document size"); - } - var width = Math.max(Math.max(body.scrollWidth, documentElement.scrollWidth), Math.max(body.offsetWidth, documentElement.offsetWidth), Math.max(body.clientWidth, documentElement.clientWidth)); - var height = Math.max(Math.max(body.scrollHeight, documentElement.scrollHeight), Math.max(body.offsetHeight, documentElement.offsetHeight), Math.max(body.clientHeight, documentElement.clientHeight)); - return new Bounds(0, 0, width, height); - }; - - /* - * css-line-break 2.1.0 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ - var toCodePoints$1 = function (str) { - var codePoints = []; - var i = 0; - var length = str.length; - while (i < length) { - var value = str.charCodeAt(i++); - if (value >= 0xd800 && value <= 0xdbff && i < length) { - var extra = str.charCodeAt(i++); - if ((extra & 0xfc00) === 0xdc00) { - codePoints.push(((value & 0x3ff) << 10) + (extra & 0x3ff) + 0x10000); - } - else { - codePoints.push(value); - i--; - } - } - else { - codePoints.push(value); - } - } - return codePoints; - }; - var fromCodePoint$1 = function () { - var codePoints = []; - for (var _i = 0; _i < arguments.length; _i++) { - codePoints[_i] = arguments[_i]; - } - if (String.fromCodePoint) { - return String.fromCodePoint.apply(String, codePoints); - } - var length = codePoints.length; - if (!length) { - return ''; - } - var codeUnits = []; - var index = -1; - var result = ''; - while (++index < length) { - var codePoint = codePoints[index]; - if (codePoint <= 0xffff) { - codeUnits.push(codePoint); - } - else { - codePoint -= 0x10000; - codeUnits.push((codePoint >> 10) + 0xd800, (codePoint % 0x400) + 0xdc00); - } - if (index + 1 === length || codeUnits.length > 0x4000) { - result += String.fromCharCode.apply(String, codeUnits); - codeUnits.length = 0; - } - } - return result; - }; - var chars$2 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'; - // Use a lookup table to find the index. - var lookup$2 = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256); - for (var i$2 = 0; i$2 < chars$2.length; i$2++) { - lookup$2[chars$2.charCodeAt(i$2)] = i$2; - } - - /* - * utrie 1.0.2 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ - var chars$1$1 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'; - // Use a lookup table to find the index. - var lookup$1$1 = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256); - for (var i$1$1 = 0; i$1$1 < chars$1$1.length; i$1$1++) { - lookup$1$1[chars$1$1.charCodeAt(i$1$1)] = i$1$1; - } - var decode$1 = function (base64) { - var bufferLength = base64.length * 0.75, len = base64.length, i, p = 0, encoded1, encoded2, encoded3, encoded4; - if (base64[base64.length - 1] === '=') { - bufferLength--; - if (base64[base64.length - 2] === '=') { - bufferLength--; - } - } - var buffer = typeof ArrayBuffer !== 'undefined' && - typeof Uint8Array !== 'undefined' && - typeof Uint8Array.prototype.slice !== 'undefined' - ? new ArrayBuffer(bufferLength) - : new Array(bufferLength); - var bytes = Array.isArray(buffer) ? buffer : new Uint8Array(buffer); - for (i = 0; i < len; i += 4) { - encoded1 = lookup$1$1[base64.charCodeAt(i)]; - encoded2 = lookup$1$1[base64.charCodeAt(i + 1)]; - encoded3 = lookup$1$1[base64.charCodeAt(i + 2)]; - encoded4 = lookup$1$1[base64.charCodeAt(i + 3)]; - bytes[p++] = (encoded1 << 2) | (encoded2 >> 4); - bytes[p++] = ((encoded2 & 15) << 4) | (encoded3 >> 2); - bytes[p++] = ((encoded3 & 3) << 6) | (encoded4 & 63); - } - return buffer; - }; - var polyUint16Array$1 = function (buffer) { - var length = buffer.length; - var bytes = []; - for (var i = 0; i < length; i += 2) { - bytes.push((buffer[i + 1] << 8) | buffer[i]); - } - return bytes; - }; - var polyUint32Array$1 = function (buffer) { - var length = buffer.length; - var bytes = []; - for (var i = 0; i < length; i += 4) { - bytes.push((buffer[i + 3] << 24) | (buffer[i + 2] << 16) | (buffer[i + 1] << 8) | buffer[i]); - } - return bytes; - }; - - /** Shift size for getting the index-2 table offset. */ - var UTRIE2_SHIFT_2$1 = 5; - /** Shift size for getting the index-1 table offset. */ - var UTRIE2_SHIFT_1$1 = 6 + 5; - /** - * Shift size for shifting left the index array values. - * Increases possible data size with 16-bit index values at the cost - * of compactability. - * This requires data blocks to be aligned by UTRIE2_DATA_GRANULARITY. - */ - var UTRIE2_INDEX_SHIFT$1 = 2; - /** - * Difference between the two shift sizes, - * for getting an index-1 offset from an index-2 offset. 6=11-5 - */ - var UTRIE2_SHIFT_1_2$1 = UTRIE2_SHIFT_1$1 - UTRIE2_SHIFT_2$1; - /** - * The part of the index-2 table for U+D800..U+DBFF stores values for - * lead surrogate code _units_ not code _points_. - * Values for lead surrogate code _points_ are indexed with this portion of the table. - * Length=32=0x20=0x400>>UTRIE2_SHIFT_2. (There are 1024=0x400 lead surrogates.) - */ - var UTRIE2_LSCP_INDEX_2_OFFSET$1 = 0x10000 >> UTRIE2_SHIFT_2$1; - /** Number of entries in a data block. 32=0x20 */ - var UTRIE2_DATA_BLOCK_LENGTH$1 = 1 << UTRIE2_SHIFT_2$1; - /** Mask for getting the lower bits for the in-data-block offset. */ - var UTRIE2_DATA_MASK$1 = UTRIE2_DATA_BLOCK_LENGTH$1 - 1; - var UTRIE2_LSCP_INDEX_2_LENGTH$1 = 0x400 >> UTRIE2_SHIFT_2$1; - /** Count the lengths of both BMP pieces. 2080=0x820 */ - var UTRIE2_INDEX_2_BMP_LENGTH$1 = UTRIE2_LSCP_INDEX_2_OFFSET$1 + UTRIE2_LSCP_INDEX_2_LENGTH$1; - /** - * The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820. - * Length 32=0x20 for lead bytes C0..DF, regardless of UTRIE2_SHIFT_2. - */ - var UTRIE2_UTF8_2B_INDEX_2_OFFSET$1 = UTRIE2_INDEX_2_BMP_LENGTH$1; - var UTRIE2_UTF8_2B_INDEX_2_LENGTH$1 = 0x800 >> 6; /* U+0800 is the first code point after 2-byte UTF-8 */ - /** - * The index-1 table, only used for supplementary code points, at offset 2112=0x840. - * Variable length, for code points up to highStart, where the last single-value range starts. - * Maximum length 512=0x200=0x100000>>UTRIE2_SHIFT_1. - * (For 0x100000 supplementary code points U+10000..U+10ffff.) - * - * The part of the index-2 table for supplementary code points starts - * after this index-1 table. - * - * Both the index-1 table and the following part of the index-2 table - * are omitted completely if there is only BMP data. - */ - var UTRIE2_INDEX_1_OFFSET$1 = UTRIE2_UTF8_2B_INDEX_2_OFFSET$1 + UTRIE2_UTF8_2B_INDEX_2_LENGTH$1; - /** - * Number of index-1 entries for the BMP. 32=0x20 - * This part of the index-1 table is omitted from the serialized form. - */ - var UTRIE2_OMITTED_BMP_INDEX_1_LENGTH$1 = 0x10000 >> UTRIE2_SHIFT_1$1; - /** Number of entries in an index-2 block. 64=0x40 */ - var UTRIE2_INDEX_2_BLOCK_LENGTH$1 = 1 << UTRIE2_SHIFT_1_2$1; - /** Mask for getting the lower bits for the in-index-2-block offset. */ - var UTRIE2_INDEX_2_MASK$1 = UTRIE2_INDEX_2_BLOCK_LENGTH$1 - 1; - var slice16$1 = function (view, start, end) { - if (view.slice) { - return view.slice(start, end); - } - return new Uint16Array(Array.prototype.slice.call(view, start, end)); - }; - var slice32$1 = function (view, start, end) { - if (view.slice) { - return view.slice(start, end); - } - return new Uint32Array(Array.prototype.slice.call(view, start, end)); - }; - var createTrieFromBase64$1 = function (base64, _byteLength) { - var buffer = decode$1(base64); - var view32 = Array.isArray(buffer) ? polyUint32Array$1(buffer) : new Uint32Array(buffer); - var view16 = Array.isArray(buffer) ? polyUint16Array$1(buffer) : new Uint16Array(buffer); - var headerLength = 24; - var index = slice16$1(view16, headerLength / 2, view32[4] / 2); - var data = view32[5] === 2 - ? slice16$1(view16, (headerLength + view32[4]) / 2) - : slice32$1(view32, Math.ceil((headerLength + view32[4]) / 4)); - return new Trie$1(view32[0], view32[1], view32[2], view32[3], index, data); - }; - var Trie$1 = /** @class */ (function () { - function Trie(initialValue, errorValue, highStart, highValueIndex, index, data) { - this.initialValue = initialValue; - this.errorValue = errorValue; - this.highStart = highStart; - this.highValueIndex = highValueIndex; - this.index = index; - this.data = data; - } - /** - * Get the value for a code point as stored in the Trie. - * - * @param codePoint the code point - * @return the value - */ - Trie.prototype.get = function (codePoint) { - var ix; - if (codePoint >= 0) { - if (codePoint < 0x0d800 || (codePoint > 0x0dbff && codePoint <= 0x0ffff)) { - // Ordinary BMP code point, excluding leading surrogates. - // BMP uses a single level lookup. BMP index starts at offset 0 in the Trie2 index. - // 16 bit data is stored in the index array itself. - ix = this.index[codePoint >> UTRIE2_SHIFT_2$1]; - ix = (ix << UTRIE2_INDEX_SHIFT$1) + (codePoint & UTRIE2_DATA_MASK$1); - return this.data[ix]; - } - if (codePoint <= 0xffff) { - // Lead Surrogate Code Point. A Separate index section is stored for - // lead surrogate code units and code points. - // The main index has the code unit data. - // For this function, we need the code point data. - // Note: this expression could be refactored for slightly improved efficiency, but - // surrogate code points will be so rare in practice that it's not worth it. - ix = this.index[UTRIE2_LSCP_INDEX_2_OFFSET$1 + ((codePoint - 0xd800) >> UTRIE2_SHIFT_2$1)]; - ix = (ix << UTRIE2_INDEX_SHIFT$1) + (codePoint & UTRIE2_DATA_MASK$1); - return this.data[ix]; - } - if (codePoint < this.highStart) { - // Supplemental code point, use two-level lookup. - ix = UTRIE2_INDEX_1_OFFSET$1 - UTRIE2_OMITTED_BMP_INDEX_1_LENGTH$1 + (codePoint >> UTRIE2_SHIFT_1$1); - ix = this.index[ix]; - ix += (codePoint >> UTRIE2_SHIFT_2$1) & UTRIE2_INDEX_2_MASK$1; - ix = this.index[ix]; - ix = (ix << UTRIE2_INDEX_SHIFT$1) + (codePoint & UTRIE2_DATA_MASK$1); - return this.data[ix]; - } - if (codePoint <= 0x10ffff) { - return this.data[this.highValueIndex]; - } - } - // Fall through. The code point is outside of the legal range of 0..0x10ffff. - return this.errorValue; - }; - return Trie; - }()); - - /* - * base64-arraybuffer 1.0.2 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ - var chars$3 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'; - // Use a lookup table to find the index. - var lookup$3 = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256); - for (var i$3 = 0; i$3 < chars$3.length; i$3++) { - lookup$3[chars$3.charCodeAt(i$3)] = i$3; - } - - var base64$1 = 'KwAAAAAAAAAACA4AUD0AADAgAAACAAAAAAAIABAAGABAAEgAUABYAGAAaABgAGgAYgBqAF8AZwBgAGgAcQB5AHUAfQCFAI0AlQCdAKIAqgCyALoAYABoAGAAaABgAGgAwgDKAGAAaADGAM4A0wDbAOEA6QDxAPkAAQEJAQ8BFwF1AH0AHAEkASwBNAE6AUIBQQFJAVEBWQFhAWgBcAF4ATAAgAGGAY4BlQGXAZ8BpwGvAbUBvQHFAc0B0wHbAeMB6wHxAfkBAQIJAvEBEQIZAiECKQIxAjgCQAJGAk4CVgJeAmQCbAJ0AnwCgQKJApECmQKgAqgCsAK4ArwCxAIwAMwC0wLbAjAA4wLrAvMC+AIAAwcDDwMwABcDHQMlAy0DNQN1AD0DQQNJA0kDSQNRA1EDVwNZA1kDdQB1AGEDdQBpA20DdQN1AHsDdQCBA4kDkQN1AHUAmQOhA3UAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AKYDrgN1AHUAtgO+A8YDzgPWAxcD3gPjA+sD8wN1AHUA+wMDBAkEdQANBBUEHQQlBCoEFwMyBDgEYABABBcDSARQBFgEYARoBDAAcAQzAXgEgASIBJAEdQCXBHUAnwSnBK4EtgS6BMIEyAR1AHUAdQB1AHUAdQCVANAEYABgAGAAYABgAGAAYABgANgEYADcBOQEYADsBPQE/AQEBQwFFAUcBSQFLAU0BWQEPAVEBUsFUwVbBWAAYgVgAGoFcgV6BYIFigWRBWAAmQWfBaYFYABgAGAAYABgAKoFYACxBbAFuQW6BcEFwQXHBcEFwQXPBdMF2wXjBeoF8gX6BQIGCgYSBhoGIgYqBjIGOgZgAD4GRgZMBmAAUwZaBmAAYABgAGAAYABgAGAAYABgAGAAYABgAGIGYABpBnAGYABgAGAAYABgAGAAYABgAGAAYAB4Bn8GhQZgAGAAYAB1AHcDFQSLBmAAYABgAJMGdQA9A3UAmwajBqsGqwaVALMGuwbDBjAAywbSBtIG1QbSBtIG0gbSBtIG0gbdBuMG6wbzBvsGAwcLBxMHAwcbByMHJwcsBywHMQcsB9IGOAdAB0gHTgfSBkgHVgfSBtIG0gbSBtIG0gbSBtIG0gbSBiwHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAdgAGAALAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAdbB2MHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsB2kH0gZwB64EdQB1AHUAdQB1AHUAdQB1AHUHfQdgAIUHjQd1AHUAlQedB2AAYAClB6sHYACzB7YHvgfGB3UAzgfWBzMB3gfmB1EB7gf1B/0HlQENAQUIDQh1ABUIHQglCBcDLQg1CD0IRQhNCEEDUwh1AHUAdQBbCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIcAh3CHoIMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIgggwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAALAcsBywHLAcsBywHLAcsBywHLAcsB4oILAcsB44I0gaWCJ4Ipgh1AHUAqgiyCHUAdQB1AHUAdQB1AHUAdQB1AHUAtwh8AXUAvwh1AMUIyQjRCNkI4AjoCHUAdQB1AO4I9gj+CAYJDgkTCS0HGwkjCYIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiAAIAAAAFAAYABgAGIAXwBgAHEAdQBFAJUAogCyAKAAYABgAEIA4ABGANMA4QDxAMEBDwE1AFwBLAE6AQEBUQF4QkhCmEKoQrhCgAHIQsAB0MLAAcABwAHAAeDC6ABoAHDCwMMAAcABwAHAAdDDGMMAAcAB6MM4wwjDWMNow3jDaABoAGgAaABoAGgAaABoAGgAaABoAGgAaABoAGgAaABoAGgAaABoAEjDqABWw6bDqABpg6gAaABoAHcDvwOPA+gAaABfA/8DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DpcPAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcAB9cPKwkyCToJMAB1AHUAdQBCCUoJTQl1AFUJXAljCWcJawkwADAAMAAwAHMJdQB2CX4JdQCECYoJjgmWCXUAngkwAGAAYABxAHUApgn3A64JtAl1ALkJdQDACTAAMAAwADAAdQB1AHUAdQB1AHUAdQB1AHUAowYNBMUIMAAwADAAMADICcsJ0wnZCRUE4QkwAOkJ8An4CTAAMAB1AAAKvwh1AAgKDwoXCh8KdQAwACcKLgp1ADYKqAmICT4KRgowADAAdQB1AE4KMAB1AFYKdQBeCnUAZQowADAAMAAwADAAMAAwADAAMAAVBHUAbQowADAAdQC5CXUKMAAwAHwBxAijBogEMgF9CoQKiASMCpQKmgqIBKIKqgquCogEDQG2Cr4KxgrLCjAAMADTCtsKCgHjCusK8Qr5CgELMAAwADAAMAB1AIsECQsRC3UANAEZCzAAMAAwADAAMAB1ACELKQswAHUANAExCzkLdQBBC0kLMABRC1kLMAAwADAAMAAwADAAdQBhCzAAMAAwAGAAYABpC3ELdwt/CzAAMACHC4sLkwubC58Lpwt1AK4Ltgt1APsDMAAwADAAMAAwADAAMAAwAL4LwwvLC9IL1wvdCzAAMADlC+kL8Qv5C/8LSQswADAAMAAwADAAMAAwADAAMAAHDDAAMAAwADAAMAAODBYMHgx1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1ACYMMAAwADAAdQB1AHUALgx1AHUAdQB1AHUAdQA2DDAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AD4MdQBGDHUAdQB1AHUAdQB1AEkMdQB1AHUAdQB1AFAMMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQBYDHUAdQB1AF8MMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUA+wMVBGcMMAAwAHwBbwx1AHcMfwyHDI8MMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAYABgAJcMMAAwADAAdQB1AJ8MlQClDDAAMACtDCwHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsB7UMLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AA0EMAC9DDAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAsBywHLAcsBywHLAcsBywHLQcwAMEMyAwsBywHLAcsBywHLAcsBywHLAcsBywHzAwwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAHUAdQB1ANQM2QzhDDAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMABgAGAAYABgAGAAYABgAOkMYADxDGAA+AwADQYNYABhCWAAYAAODTAAMAAwADAAFg1gAGAAHg37AzAAMAAwADAAYABgACYNYAAsDTQNPA1gAEMNPg1LDWAAYABgAGAAYABgAGAAYABgAGAAUg1aDYsGVglhDV0NcQBnDW0NdQ15DWAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAlQCBDZUAiA2PDZcNMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAnw2nDTAAMAAwADAAMAAwAHUArw23DTAAMAAwADAAMAAwADAAMAAwADAAMAB1AL8NMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAB1AHUAdQB1AHUAdQDHDTAAYABgAM8NMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAA1w11ANwNMAAwAD0B5A0wADAAMAAwADAAMADsDfQN/A0EDgwOFA4wABsOMAAwADAAMAAwADAAMAAwANIG0gbSBtIG0gbSBtIG0gYjDigOwQUuDsEFMw7SBjoO0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGQg5KDlIOVg7SBtIGXg5lDm0OdQ7SBtIGfQ6EDooOjQ6UDtIGmg6hDtIG0gaoDqwO0ga0DrwO0gZgAGAAYADEDmAAYAAkBtIGzA5gANIOYADaDokO0gbSBt8O5w7SBu8O0gb1DvwO0gZgAGAAxA7SBtIG0gbSBtIGYABgAGAAYAAED2AAsAUMD9IG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGFA8sBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAccD9IGLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHJA8sBywHLAcsBywHLAccDywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywPLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAc0D9IG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAccD9IG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGFA8sBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHPA/SBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gYUD0QPlQCVAJUAMAAwADAAMACVAJUAlQCVAJUAlQCVAEwPMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAA//8EAAQABAAEAAQABAAEAAQABAANAAMAAQABAAIABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQACgATABcAHgAbABoAHgAXABYAEgAeABsAGAAPABgAHABLAEsASwBLAEsASwBLAEsASwBLABgAGAAeAB4AHgATAB4AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQABYAGwASAB4AHgAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAWAA0AEQAeAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAFAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAJABYAGgAbABsAGwAeAB0AHQAeAE8AFwAeAA0AHgAeABoAGwBPAE8ADgBQAB0AHQAdAE8ATwAXAE8ATwBPABYAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAFAAUABQAFAAUABQAFAAUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAB4AHgAeAFAATwBAAE8ATwBPAEAATwBQAFAATwBQAB4AHgAeAB4AHgAeAB0AHQAdAB0AHgAdAB4ADgBQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgBQAB4AUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAJAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAkACQAJAAkACQAJAAkABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgAeAFAAHgAeAB4AKwArAFAAUABQAFAAGABQACsAKwArACsAHgAeAFAAHgBQAFAAUAArAFAAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAEAAQABAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAUAAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAYAA0AKwArAB4AHgAbACsABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQADQAEAB4ABAAEAB4ABAAEABMABAArACsAKwArACsAKwArACsAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAKwArACsAKwBWAFYAVgBWAB4AHgArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AGgAaABoAGAAYAB4AHgAEAAQABAAEAAQABAAEAAQABAAEAAQAEwAEACsAEwATAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABLAEsASwBLAEsASwBLAEsASwBLABoAGQAZAB4AUABQAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQABMAUAAEAAQABAAEAAQABAAEAB4AHgAEAAQABAAEAAQABABQAFAABAAEAB4ABAAEAAQABABQAFAASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUAAeAB4AUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAFAABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQAUABQAB4AHgAYABMAUAArACsABAAbABsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAFAABAAEAAQABAAEAFAABAAEAAQAUAAEAAQABAAEAAQAKwArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAArACsAHgArAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAB4ABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAUAAEAAQABAAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAABAAEAA0ADQBLAEsASwBLAEsASwBLAEsASwBLAB4AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAArAFAAUABQAFAAUABQAFAAUAArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUAArACsAKwBQAFAAUABQACsAKwAEAFAABAAEAAQABAAEAAQABAArACsABAAEACsAKwAEAAQABABQACsAKwArACsAKwArACsAKwAEACsAKwArACsAUABQACsAUABQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAFAAUAAaABoAUABQAFAAUABQAEwAHgAbAFAAHgAEACsAKwAEAAQABAArAFAAUABQAFAAUABQACsAKwArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQACsAUABQACsAKwAEACsABAAEAAQABAAEACsAKwArACsABAAEACsAKwAEAAQABAArACsAKwAEACsAKwArACsAKwArACsAUABQAFAAUAArAFAAKwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLAAQABABQAFAAUAAEAB4AKwArACsAKwArACsAKwArACsAKwAEAAQABAArAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQAFAAUABQACsAKwAEAFAABAAEAAQABAAEAAQABAAEACsABAAEAAQAKwAEAAQABAArACsAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAB4AGwArACsAKwArACsAKwArAFAABAAEAAQABAAEAAQAKwAEAAQABAArAFAAUABQAFAAUABQAFAAUAArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAArACsABAAEACsAKwAEAAQABAArACsAKwArACsAKwArAAQABAAEACsAKwArACsAUABQACsAUABQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAB4AUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArAAQAUAArAFAAUABQAFAAUABQACsAKwArAFAAUABQACsAUABQAFAAUAArACsAKwBQAFAAKwBQACsAUABQACsAKwArAFAAUAArACsAKwBQAFAAUAArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArAAQABAAEAAQABAArACsAKwAEAAQABAArAAQABAAEAAQAKwArAFAAKwArACsAKwArACsABAArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAUABQAFAAHgAeAB4AHgAeAB4AGwAeACsAKwArACsAKwAEAAQABAAEAAQAUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAUAAEAAQABAAEAAQABAAEACsABAAEAAQAKwAEAAQABAAEACsAKwArACsAKwArACsABAAEACsAUABQAFAAKwArACsAKwArAFAAUAAEAAQAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAKwAOAFAAUABQAFAAUABQAFAAHgBQAAQABAAEAA4AUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAKwArAAQAUAAEAAQABAAEAAQABAAEACsABAAEAAQAKwAEAAQABAAEACsAKwArACsAKwArACsABAAEACsAKwArACsAKwArACsAUAArAFAAUAAEAAQAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwBQAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAFAABAAEAAQABAAEAAQABAArAAQABAAEACsABAAEAAQABABQAB4AKwArACsAKwBQAFAAUAAEAFAAUABQAFAAUABQAFAAUABQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAFAAUABQAFAAUABQAFAAUABQABoAUABQAFAAUABQAFAAKwAEAAQABAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQACsAUAArACsAUABQAFAAUABQAFAAUAArACsAKwAEACsAKwArACsABAAEAAQABAAEAAQAKwAEACsABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArAAQABAAeACsAKwArACsAKwArACsAKwArACsAKwArAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAAqAFwAXAAqACoAKgAqACoAKgAqACsAKwArACsAGwBcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAeAEsASwBLAEsASwBLAEsASwBLAEsADQANACsAKwArACsAKwBcAFwAKwBcACsAXABcAFwAXABcACsAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACsAXAArAFwAXABcAFwAXABcAFwAXABcAFwAKgBcAFwAKgAqACoAKgAqACoAKgAqACoAXAArACsAXABcAFwAXABcACsAXAArACoAKgAqACoAKgAqACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwBcAFwAXABcAFAADgAOAA4ADgAeAA4ADgAJAA4ADgANAAkAEwATABMAEwATAAkAHgATAB4AHgAeAAQABAAeAB4AHgAeAB4AHgBLAEsASwBLAEsASwBLAEsASwBLAFAAUABQAFAAUABQAFAAUABQAFAADQAEAB4ABAAeAAQAFgARABYAEQAEAAQAUABQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQADQAEAAQABAAEAAQADQAEAAQAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABAArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArAA0ADQAeAB4AHgAeAB4AHgAEAB4AHgAeAB4AHgAeACsAHgAeAA4ADgANAA4AHgAeAB4AHgAeAAkACQArACsAKwArACsAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgBcAEsASwBLAEsASwBLAEsASwBLAEsADQANAB4AHgAeAB4AXABcAFwAXABcAFwAKgAqACoAKgBcAFwAXABcACoAKgAqAFwAKgAqACoAXABcACoAKgAqACoAKgAqACoAXABcAFwAKgAqACoAKgBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAqACoAKgAqAFwAKgBLAEsASwBLAEsASwBLAEsASwBLACoAKgAqACoAKgAqAFAAUABQAFAAUABQACsAUAArACsAKwArACsAUAArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgBQAFAAUABQAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAUAArACsAUABQAFAAUABQAFAAUAArAFAAKwBQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAKwArAFAAUABQAFAAUABQAFAAKwBQACsAUABQAFAAUAArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsABAAEAAQAHgANAB4AHgAeAB4AHgAeAB4AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUAArACsADQBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAANAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAWABEAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAA0ADQANAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAAQABAAEACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAANAA0AKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUAArAAQABAArACsAKwArACsAKwArACsAKwArACsAKwBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqAA0ADQAVAFwADQAeAA0AGwBcACoAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwAeAB4AEwATAA0ADQAOAB4AEwATAB4ABAAEAAQACQArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUAAEAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQAUAArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAArACsAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAHgArACsAKwATABMASwBLAEsASwBLAEsASwBLAEsASwBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAArACsAXABcAFwAXABcACsAKwArACsAKwArACsAKwArACsAKwBcAFwAXABcAFwAXABcAFwAXABcAFwAXAArACsAKwArAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAXAArACsAKwAqACoAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAArACsAHgAeAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAqACoAKwAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKwArAAQASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwArACsAKwArACoAKgAqACoAKgAqACoAXAAqACoAKgAqACoAKgArACsABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsABAAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABABQAFAAUABQAFAAUABQACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwANAA0AHgANAA0ADQANAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAEAAQABAAEAAQAHgAeAB4AHgAeAB4AHgAeAB4AKwArACsABAAEAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwAeAB4AHgAeAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArAA0ADQANAA0ADQBLAEsASwBLAEsASwBLAEsASwBLACsAKwArAFAAUABQAEsASwBLAEsASwBLAEsASwBLAEsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAA0ADQBQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUAAeAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArAAQABAAEAB4ABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAAQAUABQAFAAUABQAFAABABQAFAABAAEAAQAUAArACsAKwArACsABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsABAAEAAQABAAEAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAKwBQACsAUAArAFAAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArAB4AHgAeAB4AHgAeAB4AHgBQAB4AHgAeAFAAUABQACsAHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQACsAKwAeAB4AHgAeAB4AHgArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArAFAAUABQACsAHgAeAB4AHgAeAB4AHgAOAB4AKwANAA0ADQANAA0ADQANAAkADQANAA0ACAAEAAsABAAEAA0ACQANAA0ADAAdAB0AHgAXABcAFgAXABcAFwAWABcAHQAdAB4AHgAUABQAFAANAAEAAQAEAAQABAAEAAQACQAaABoAGgAaABoAGgAaABoAHgAXABcAHQAVABUAHgAeAB4AHgAeAB4AGAAWABEAFQAVABUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ADQAeAA0ADQANAA0AHgANAA0ADQAHAB4AHgAeAB4AKwAEAAQABAAEAAQABAAEAAQABAAEAFAAUAArACsATwBQAFAAUABQAFAAHgAeAB4AFgARAE8AUABPAE8ATwBPAFAAUABQAFAAUAAeAB4AHgAWABEAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArABsAGwAbABsAGwAbABsAGgAbABsAGwAbABsAGwAbABsAGwAbABsAGwAbABsAGgAbABsAGwAbABoAGwAbABoAGwAbABsAGwAbABsAGwAbABsAGwAbABsAGwAbABsAGwAbAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAHgAeAFAAGgAeAB0AHgBQAB4AGgAeAB4AHgAeAB4AHgAeAB4AHgBPAB4AUAAbAB4AHgBQAFAAUABQAFAAHgAeAB4AHQAdAB4AUAAeAFAAHgBQAB4AUABPAFAAUAAeAB4AHgAeAB4AHgAeAFAAUABQAFAAUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAAHgBQAFAAUABQAE8ATwBQAFAAUABQAFAATwBQAFAATwBQAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAFAAUABQAFAATwBPAE8ATwBPAE8ATwBPAE8ATwBQAFAAUABQAFAAUABQAFAAUAAeAB4AUABQAFAAUABPAB4AHgArACsAKwArAB0AHQAdAB0AHQAdAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB4AHQAdAB4AHgAeAB0AHQAeAB4AHQAeAB4AHgAdAB4AHQAbABsAHgAdAB4AHgAeAB4AHQAeAB4AHQAdAB0AHQAeAB4AHQAeAB0AHgAdAB0AHQAdAB0AHQAeAB0AHgAeAB4AHgAeAB0AHQAdAB0AHgAeAB4AHgAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB4AHgAeAB0AHgAeAB4AHgAeAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHgAeAB0AHQAdAB0AHgAeAB0AHQAeAB4AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHQAeAB4AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHQAeAB4AHgAdAB4AHgAeAB4AHgAeAB4AHQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AFAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeABYAEQAWABEAHgAeAB4AHgAeAB4AHQAeAB4AHgAeAB4AHgAeACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAWABEAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAFAAHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHgAeAB4AHgAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAeAB4AHQAdAB0AHQAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHQAeAB0AHQAdAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB0AHQAeAB4AHQAdAB4AHgAeAB4AHQAdAB4AHgAeAB4AHQAdAB0AHgAeAB0AHgAeAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlAB4AHQAdAB4AHgAdAB4AHgAeAB4AHQAdAB4AHgAeAB4AJQAlAB0AHQAlAB4AJQAlACUAIAAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAeAB4AHgAeAB0AHgAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHgAdAB0AHQAeAB0AJQAdAB0AHgAdAB0AHgAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACUAJQAlACUAJQAdAB0AHQAdACUAHgAlACUAJQAdACUAJQAdAB0AHQAlACUAHQAdACUAHQAdACUAJQAlAB4AHQAeAB4AHgAeAB0AHQAlAB0AHQAdAB0AHQAdACUAJQAlACUAJQAdACUAJQAgACUAHQAdACUAJQAlACUAJQAlACUAJQAeAB4AHgAlACUAIAAgACAAIAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAeAB4AFwAXABcAFwAXABcAHgATABMAJQAeAB4AHgAWABEAFgARABYAEQAWABEAFgARABYAEQAWABEATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeABYAEQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAWABEAFgARABYAEQAWABEAFgARAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AFgARABYAEQAWABEAFgARABYAEQAWABEAFgARABYAEQAWABEAFgARABYAEQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAWABEAFgARAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AFgARAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AUABQAFAAUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAEAAQABAAeAB4AKwArACsAKwArABMADQANAA0AUAATAA0AUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAUAANACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAA0ADQANAA0ADQANAA0ADQAeAA0AFgANAB4AHgAXABcAHgAeABcAFwAWABEAFgARABYAEQAWABEADQANAA0ADQATAFAADQANAB4ADQANAB4AHgAeAB4AHgAMAAwADQANAA0AHgANAA0AFgANAA0ADQANAA0ADQANAA0AHgANAB4ADQANAB4AHgAeACsAKwArACsAKwArACsAKwArACsAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAKwArACsAKwArACsAKwArACsAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAlACUAJQAlACUAJQAlACUAJQAlACUAJQArACsAKwArAA0AEQARACUAJQBHAFcAVwAWABEAFgARABYAEQAWABEAFgARACUAJQAWABEAFgARABYAEQAWABEAFQAWABEAEQAlAFcAVwBXAFcAVwBXAFcAVwBXAAQABAAEAAQABAAEACUAVwBXAFcAVwA2ACUAJQBXAFcAVwBHAEcAJQAlACUAKwBRAFcAUQBXAFEAVwBRAFcAUQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFEAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBRAFcAUQBXAFEAVwBXAFcAVwBXAFcAUQBXAFcAVwBXAFcAVwBRAFEAKwArAAQABAAVABUARwBHAFcAFQBRAFcAUQBXAFEAVwBRAFcAUQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFEAVwBRAFcAUQBXAFcAVwBXAFcAVwBRAFcAVwBXAFcAVwBXAFEAUQBXAFcAVwBXABUAUQBHAEcAVwArACsAKwArACsAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAKwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAKwAlACUAVwBXAFcAVwAlACUAJQAlACUAJQAlACUAJQAlACsAKwArACsAKwArACsAKwArACsAKwArAFEAUQBRAFEAUQBRAFEAUQBRAFEAUQBRAFEAUQBRAFEAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQArAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQBPAE8ATwBPAE8ATwBPAE8AJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACUAJQAlAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAEcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAADQATAA0AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABLAEsASwBLAEsASwBLAEsASwBLAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAABAAEAAQABAAeAAQABAAEAAQABAAEAAQABAAEAAQAHgBQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AUABQAAQABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAeAA0ADQANAA0ADQArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAB4AHgAeAB4AHgAeAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAHgAeAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAeAB4AUABQAFAAUABQAFAAUABQAFAAUABQAAQAUABQAFAABABQAFAAUABQAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAeAB4AHgAeAAQAKwArACsAUABQAFAAUABQAFAAHgAeABoAHgArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAADgAOABMAEwArACsAKwArACsAKwArACsABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwANAA0ASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArACsAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAFAAUAAeAB4AHgBQAA4AUABQAAQAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAA0ADQBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArACsAKwArACsAKwArAB4AWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYACsAKwArAAQAHgAeAB4AHgAeAB4ADQANAA0AHgAeAB4AHgArAFAASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArAB4AHgBcAFwAXABcAFwAKgBcAFwAXABcAFwAXABcAFwAXABcAEsASwBLAEsASwBLAEsASwBLAEsAXABcAFwAXABcACsAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwArAFAAUABQAAQAUABQAFAAUABQAFAAUABQAAQABAArACsASwBLAEsASwBLAEsASwBLAEsASwArACsAHgANAA0ADQBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAKgAqACoAXAAqACoAKgBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAAqAFwAKgAqACoAXABcACoAKgBcAFwAXABcAFwAKgAqAFwAKgBcACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFwAXABcACoAKgBQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAA0ADQBQAFAAUAAEAAQAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUAArACsAUABQAFAAUABQAFAAKwArAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQADQAEAAQAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAVABVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBUAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVACsAKwArACsAKwArACsAKwArACsAKwArAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAKwArACsAKwBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAKwArACsAKwAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAKwArACsAKwArAFYABABWAFYAVgBWAFYAVgBWAFYAVgBWAB4AVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgArAFYAVgBWAFYAVgArAFYAKwBWAFYAKwBWAFYAKwBWAFYAVgBWAFYAVgBWAFYAVgBWAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAEQAWAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUAAaAB4AKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAGAARABEAGAAYABMAEwAWABEAFAArACsAKwArACsAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACUAJQAlACUAJQAWABEAFgARABYAEQAWABEAFgARABYAEQAlACUAFgARACUAJQAlACUAJQAlACUAEQAlABEAKwAVABUAEwATACUAFgARABYAEQAWABEAJQAlACUAJQAlACUAJQAlACsAJQAbABoAJQArACsAKwArAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAcAKwATACUAJQAbABoAJQAlABYAEQAlACUAEQAlABEAJQBXAFcAVwBXAFcAVwBXAFcAVwBXABUAFQAlACUAJQATACUAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXABYAJQARACUAJQAlAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwAWACUAEQAlABYAEQARABYAEQARABUAVwBRAFEAUQBRAFEAUQBRAFEAUQBRAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAEcARwArACsAVwBXAFcAVwBXAFcAKwArAFcAVwBXAFcAVwBXACsAKwBXAFcAVwBXAFcAVwArACsAVwBXAFcAKwArACsAGgAbACUAJQAlABsAGwArAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwAEAAQABAAQAB0AKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsADQANAA0AKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAB4AHgAeAB4AHgAeAB4AHgAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAAQAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAA0AUABQAFAAUAArACsAKwArAFAAUABQAFAAUABQAFAAUAANAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwAeACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAKwArAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUAArACsAKwBQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwANAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAB4AUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUAArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAA0AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAUABQAFAAUABQAAQABAAEACsABAAEACsAKwArACsAKwAEAAQABAAEAFAAUABQAFAAKwBQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAQABAAEACsAKwArACsABABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAA0ADQANAA0ADQANAA0ADQAeACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAFAAUABQAFAAUABQAFAAUAAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAArACsAKwArAFAAUABQAFAAUAANAA0ADQANAA0ADQAUACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsADQANAA0ADQANAA0ADQBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAAQABAAEAAQAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUAArAAQABAANACsAKwBQAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAB4AHgAeAB4AHgArACsAKwArACsAKwAEAAQABAAEAAQABAAEAA0ADQAeAB4AHgAeAB4AKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgANAA0ADQANACsAKwArACsAKwArACsAKwArACsAKwAeACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwArACsAKwArAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsASwBLAEsASwBLAEsASwBLAEsASwANAA0ADQANAFAABAAEAFAAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAeAA4AUAArACsAKwArACsAKwArACsAKwAEAFAAUABQAFAADQANAB4ADQAEAAQABAAEAB4ABAAEAEsASwBLAEsASwBLAEsASwBLAEsAUAAOAFAADQANAA0AKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAANAA0AHgANAA0AHgAEACsAUABQAFAAUABQAFAAUAArAFAAKwBQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAA0AKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsABAAEAAQABAArAFAAUABQAFAAUABQAFAAUAArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQAFAAUABQACsABAAEAFAABAAEAAQABAAEAAQABAArACsABAAEACsAKwAEAAQABAArACsAUAArACsAKwArACsAKwAEACsAKwArACsAKwBQAFAAUABQAFAABAAEACsAKwAEAAQABAAEAAQABAAEACsAKwArAAQABAAEAAQABAArACsAKwArACsAKwArACsAKwArACsABAAEAAQABAAEAAQABABQAFAAUABQAA0ADQANAA0AHgBLAEsASwBLAEsASwBLAEsASwBLAA0ADQArAB4ABABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAAQABAAEAFAAUAAeAFAAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAArACsABAAEAAQABAAEAAQABAAEAAQADgANAA0AEwATAB4AHgAeAA0ADQANAA0ADQANAA0ADQANAA0ADQANAA0ADQANAFAAUABQAFAABAAEACsAKwAEAA0ADQAeAFAAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAFAAKwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAKwArACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwBcAFwADQANAA0AKgBQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAKwArAFAAKwArAFAAUABQAFAAUABQAFAAUAArAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQAKwAEAAQAKwArAAQABAAEAAQAUAAEAFAABAAEAA0ADQANACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAArACsABAAEAAQABAAEAAQABABQAA4AUAAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAFAABAAEAAQABAAOAB4ADQANAA0ADQAOAB4ABAArACsAKwArACsAKwArACsAUAAEAAQABAAEAAQABAAEAAQABAAEAAQAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAA0ADQANAFAADgAOAA4ADQANACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEACsABAAEAAQABAAEAAQABAAEAFAADQANAA0ADQANACsAKwArACsAKwArACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwAOABMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAArACsAKwAEACsABAAEACsABAAEAAQABAAEAAQABABQAAQAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAKwBQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQAKwAEAAQAKwAEAAQABAAEAAQAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAaABoAGgAaAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArACsAKwArAA0AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsADQANAA0ADQANACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAASABIAEgAQwBDAEMAUABQAFAAUABDAFAAUABQAEgAQwBIAEMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAASABDAEMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwAJAAkACQAJAAkACQAJABYAEQArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABIAEMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwANAA0AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAQABAAEAAQABAANACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAA0ADQANAB4AHgAeAB4AHgAeAFAAUABQAFAADQAeACsAKwArACsAKwArACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwArAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAANAA0AHgAeACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwAEAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArACsAKwAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAARwBHABUARwAJACsAKwArACsAKwArACsAKwArACsAKwAEAAQAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACsAKwArACsAKwArACsAKwBXAFcAVwBXAFcAVwBXAFcAVwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUQBRAFEAKwArACsAKwArACsAKwArACsAKwArACsAKwBRAFEAUQBRACsAKwArACsAKwArACsAKwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUAArACsAHgAEAAQADQAEAAQABAAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArAB4AHgAeAB4AHgAeAB4AKwArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAAQABAAEAAQABAAeAB4AHgAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAB4AHgAEAAQABAAEAAQABAAEAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQAHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwBQAFAAKwArAFAAKwArAFAAUAArACsAUABQAFAAUAArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAUAArAFAAUABQAFAAUABQAFAAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwBQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAHgAeAFAAUABQAFAAUAArAFAAKwArACsAUABQAFAAUABQAFAAUAArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeACsAKwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgAeAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgAeAB4AHgAeAB4ABAAeAB4AHgAeAB4AHgAeAB4AHgAeAAQAHgAeAA0ADQANAA0AHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAAQABAAEAAQAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAEAAQAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArAAQABAAEAAQABAAEAAQAKwAEAAQAKwAEAAQABAAEAAQAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwAEAAQABAAEAAQABAAEAFAAUABQAFAAUABQAFAAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwBQAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArABsAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwArAB4AHgAeAB4ABAAEAAQABAAEAAQABABQACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArABYAFgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAGgBQAFAAUAAaAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAKwBQACsAKwBQACsAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAKwBQACsAUAArACsAKwArACsAKwBQACsAKwArACsAUAArAFAAKwBQACsAUABQAFAAKwBQAFAAKwBQACsAKwBQACsAUAArAFAAKwBQACsAUAArAFAAUAArAFAAKwArAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQAFAAUAArAFAAUABQAFAAKwBQACsAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAUABQAFAAKwBQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8AJQAlACUAHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHgAeAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB4AHgAeACUAJQAlAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAJQAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlAB4AHgAlACUAJQAlACUAHgAlACUAJQAlACUAIAAgACAAJQAlACAAJQAlACAAIAAgACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACEAIQAhACEAIQAlACUAIAAgACUAJQAgACAAIAAgACAAIAAgACAAIAAgACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAJQAlACUAIAAlACUAJQAlACAAIAAgACUAIAAgACAAJQAlACUAJQAlACUAJQAgACUAIAAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAlAB4AJQAeACUAJQAlACUAJQAgACUAJQAlACUAHgAlAB4AHgAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAJQAlACUAJQAgACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACAAIAAgACUAJQAlACAAIAAgACAAIAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeABcAFwAXABUAFQAVAB4AHgAeAB4AJQAlACUAIAAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAgACUAJQAlACUAJQAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAgACUAJQAgACUAJQAlACUAJQAlACUAJQAgACAAIAAgACAAIAAgACAAJQAlACUAJQAlACUAIAAlACUAJQAlACUAJQAlACUAJQAgACAAIAAgACAAIAAgACAAIAAgACUAJQAgACAAIAAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAgACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAlACAAIAAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAgACAAIAAlACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAJQAlAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAKwArAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwAlACUAJQAlACUAJQAlACUAJQAlACUAVwBXACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAKwAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAA=='; - - var LETTER_NUMBER_MODIFIER = 50; - // Non-tailorable Line Breaking Classes - var BK = 1; // Cause a line break (after) - var CR$1 = 2; // Cause a line break (after), except between CR and LF - var LF$1 = 3; // Cause a line break (after) - var CM = 4; // Prohibit a line break between the character and the preceding character - var NL = 5; // Cause a line break (after) - var WJ = 7; // Prohibit line breaks before and after - var ZW = 8; // Provide a break opportunity - var GL = 9; // Prohibit line breaks before and after - var SP = 10; // Enable indirect line breaks - var ZWJ$1 = 11; // Prohibit line breaks within joiner sequences - // Break Opportunities - var B2 = 12; // Provide a line break opportunity before and after the character - var BA = 13; // Generally provide a line break opportunity after the character - var BB = 14; // Generally provide a line break opportunity before the character - var HY = 15; // Provide a line break opportunity after the character, except in numeric context - var CB = 16; // Provide a line break opportunity contingent on additional information - // Characters Prohibiting Certain Breaks - var CL = 17; // Prohibit line breaks before - var CP = 18; // Prohibit line breaks before - var EX = 19; // Prohibit line breaks before - var IN = 20; // Allow only indirect line breaks between pairs - var NS = 21; // Allow only indirect line breaks before - var OP = 22; // Prohibit line breaks after - var QU = 23; // Act like they are both opening and closing - // Numeric Context - var IS = 24; // Prevent breaks after any and before numeric - var NU = 25; // Form numeric expressions for line breaking purposes - var PO = 26; // Do not break following a numeric expression - var PR = 27; // Do not break in front of a numeric expression - var SY = 28; // Prevent a break before; and allow a break after - // Other Characters - var AI = 29; // Act like AL when the resolvedEAW is N; otherwise; act as ID - var AL = 30; // Are alphabetic characters or symbols that are used with alphabetic characters - var CJ = 31; // Treat as NS or ID for strict or normal breaking. - var EB = 32; // Do not break from following Emoji Modifier - var EM = 33; // Do not break from preceding Emoji Base - var H2 = 34; // Form Korean syllable blocks - var H3 = 35; // Form Korean syllable blocks - var HL = 36; // Do not break around a following hyphen; otherwise act as Alphabetic - var ID = 37; // Break before or after; except in some numeric context - var JL = 38; // Form Korean syllable blocks - var JV = 39; // Form Korean syllable blocks - var JT = 40; // Form Korean syllable blocks - var RI$1 = 41; // Keep pairs together. For pairs; break before and after other classes - var SA = 42; // Provide a line break opportunity contingent on additional, language-specific context analysis - var XX = 43; // Have as yet unknown line breaking behavior or unassigned code positions - var ea_OP = [0x2329, 0xff08]; - var BREAK_MANDATORY = '!'; - var BREAK_NOT_ALLOWED$1 = '×'; - var BREAK_ALLOWED$1 = '÷'; - var UnicodeTrie$1 = createTrieFromBase64$1(base64$1); - var ALPHABETICS = [AL, HL]; - var HARD_LINE_BREAKS = [BK, CR$1, LF$1, NL]; - var SPACE$1 = [SP, ZW]; - var PREFIX_POSTFIX = [PR, PO]; - var LINE_BREAKS = HARD_LINE_BREAKS.concat(SPACE$1); - var KOREAN_SYLLABLE_BLOCK = [JL, JV, JT, H2, H3]; - var HYPHEN = [HY, BA]; - var codePointsToCharacterClasses = function (codePoints, lineBreak) { - if (lineBreak === void 0) { lineBreak = 'strict'; } - var types = []; - var indices = []; - var categories = []; - codePoints.forEach(function (codePoint, index) { - var classType = UnicodeTrie$1.get(codePoint); - if (classType > LETTER_NUMBER_MODIFIER) { - categories.push(true); - classType -= LETTER_NUMBER_MODIFIER; - } - else { - categories.push(false); - } - if (['normal', 'auto', 'loose'].indexOf(lineBreak) !== -1) { - // U+2010, – U+2013, 〜 U+301C, ゠ U+30A0 - if ([0x2010, 0x2013, 0x301c, 0x30a0].indexOf(codePoint) !== -1) { - indices.push(index); - return types.push(CB); - } - } - if (classType === CM || classType === ZWJ$1) { - // LB10 Treat any remaining combining mark or ZWJ as AL. - if (index === 0) { - indices.push(index); - return types.push(AL); - } - // LB9 Do not break a combining character sequence; treat it as if it has the line breaking class of - // the base character in all of the following rules. Treat ZWJ as if it were CM. - var prev = types[index - 1]; - if (LINE_BREAKS.indexOf(prev) === -1) { - indices.push(indices[index - 1]); - return types.push(prev); - } - indices.push(index); - return types.push(AL); - } - indices.push(index); - if (classType === CJ) { - return types.push(lineBreak === 'strict' ? NS : ID); - } - if (classType === SA) { - return types.push(AL); - } - if (classType === AI) { - return types.push(AL); - } - // For supplementary characters, a useful default is to treat characters in the range 10000..1FFFD as AL - // and characters in the ranges 20000..2FFFD and 30000..3FFFD as ID, until the implementation can be revised - // to take into account the actual line breaking properties for these characters. - if (classType === XX) { - if ((codePoint >= 0x20000 && codePoint <= 0x2fffd) || (codePoint >= 0x30000 && codePoint <= 0x3fffd)) { - return types.push(ID); - } - else { - return types.push(AL); - } - } - types.push(classType); - }); - return [indices, types, categories]; - }; - var isAdjacentWithSpaceIgnored = function (a, b, currentIndex, classTypes) { - var current = classTypes[currentIndex]; - if (Array.isArray(a) ? a.indexOf(current) !== -1 : a === current) { - var i = currentIndex; - while (i <= classTypes.length) { - i++; - var next = classTypes[i]; - if (next === b) { - return true; - } - if (next !== SP) { - break; - } - } - } - if (current === SP) { - var i = currentIndex; - while (i > 0) { - i--; - var prev = classTypes[i]; - if (Array.isArray(a) ? a.indexOf(prev) !== -1 : a === prev) { - var n = currentIndex; - while (n <= classTypes.length) { - n++; - var next = classTypes[n]; - if (next === b) { - return true; - } - if (next !== SP) { - break; - } - } - } - if (prev !== SP) { - break; - } - } - } - return false; - }; - var previousNonSpaceClassType = function (currentIndex, classTypes) { - var i = currentIndex; - while (i >= 0) { - var type = classTypes[i]; - if (type === SP) { - i--; - } - else { - return type; - } - } - return 0; - }; - var _lineBreakAtIndex = function (codePoints, classTypes, indicies, index, forbiddenBreaks) { - if (indicies[index] === 0) { - return BREAK_NOT_ALLOWED$1; - } - var currentIndex = index - 1; - if (Array.isArray(forbiddenBreaks) && forbiddenBreaks[currentIndex] === true) { - return BREAK_NOT_ALLOWED$1; - } - var beforeIndex = currentIndex - 1; - var afterIndex = currentIndex + 1; - var current = classTypes[currentIndex]; - // LB4 Always break after hard line breaks. - // LB5 Treat CR followed by LF, as well as CR, LF, and NL as hard line breaks. - var before = beforeIndex >= 0 ? classTypes[beforeIndex] : 0; - var next = classTypes[afterIndex]; - if (current === CR$1 && next === LF$1) { - return BREAK_NOT_ALLOWED$1; - } - if (HARD_LINE_BREAKS.indexOf(current) !== -1) { - return BREAK_MANDATORY; - } - // LB6 Do not break before hard line breaks. - if (HARD_LINE_BREAKS.indexOf(next) !== -1) { - return BREAK_NOT_ALLOWED$1; - } - // LB7 Do not break before spaces or zero width space. - if (SPACE$1.indexOf(next) !== -1) { - return BREAK_NOT_ALLOWED$1; - } - // LB8 Break before any character following a zero-width space, even if one or more spaces intervene. - if (previousNonSpaceClassType(currentIndex, classTypes) === ZW) { - return BREAK_ALLOWED$1; - } - // LB8a Do not break after a zero width joiner. - if (UnicodeTrie$1.get(codePoints[currentIndex]) === ZWJ$1) { - return BREAK_NOT_ALLOWED$1; - } - // zwj emojis - if ((current === EB || current === EM) && UnicodeTrie$1.get(codePoints[afterIndex]) === ZWJ$1) { - return BREAK_NOT_ALLOWED$1; - } - // LB11 Do not break before or after Word joiner and related characters. - if (current === WJ || next === WJ) { - return BREAK_NOT_ALLOWED$1; - } - // LB12 Do not break after NBSP and related characters. - if (current === GL) { - return BREAK_NOT_ALLOWED$1; - } - // LB12a Do not break before NBSP and related characters, except after spaces and hyphens. - if ([SP, BA, HY].indexOf(current) === -1 && next === GL) { - return BREAK_NOT_ALLOWED$1; - } - // LB13 Do not break before ‘]’ or ‘!’ or ‘;’ or ‘/’, even after spaces. - if ([CL, CP, EX, IS, SY].indexOf(next) !== -1) { - return BREAK_NOT_ALLOWED$1; - } - // LB14 Do not break after ‘[’, even after spaces. - if (previousNonSpaceClassType(currentIndex, classTypes) === OP) { - return BREAK_NOT_ALLOWED$1; - } - // LB15 Do not break within ‘”[’, even with intervening spaces. - if (isAdjacentWithSpaceIgnored(QU, OP, currentIndex, classTypes)) { - return BREAK_NOT_ALLOWED$1; - } - // LB16 Do not break between closing punctuation and a nonstarter (lb=NS), even with intervening spaces. - if (isAdjacentWithSpaceIgnored([CL, CP], NS, currentIndex, classTypes)) { - return BREAK_NOT_ALLOWED$1; - } - // LB17 Do not break within ‘——’, even with intervening spaces. - if (isAdjacentWithSpaceIgnored(B2, B2, currentIndex, classTypes)) { - return BREAK_NOT_ALLOWED$1; - } - // LB18 Break after spaces. - if (current === SP) { - return BREAK_ALLOWED$1; - } - // LB19 Do not break before or after quotation marks, such as ‘ ” ’. - if (current === QU || next === QU) { - return BREAK_NOT_ALLOWED$1; - } - // LB20 Break before and after unresolved CB. - if (next === CB || current === CB) { - return BREAK_ALLOWED$1; - } - // LB21 Do not break before hyphen-minus, other hyphens, fixed-width spaces, small kana, and other non-starters, or after acute accents. - if ([BA, HY, NS].indexOf(next) !== -1 || current === BB) { - return BREAK_NOT_ALLOWED$1; - } - // LB21a Don't break after Hebrew + Hyphen. - if (before === HL && HYPHEN.indexOf(current) !== -1) { - return BREAK_NOT_ALLOWED$1; - } - // LB21b Don’t break between Solidus and Hebrew letters. - if (current === SY && next === HL) { - return BREAK_NOT_ALLOWED$1; - } - // LB22 Do not break before ellipsis. - if (next === IN) { - return BREAK_NOT_ALLOWED$1; - } - // LB23 Do not break between digits and letters. - if ((ALPHABETICS.indexOf(next) !== -1 && current === NU) || (ALPHABETICS.indexOf(current) !== -1 && next === NU)) { - return BREAK_NOT_ALLOWED$1; - } - // LB23a Do not break between numeric prefixes and ideographs, or between ideographs and numeric postfixes. - if ((current === PR && [ID, EB, EM].indexOf(next) !== -1) || - ([ID, EB, EM].indexOf(current) !== -1 && next === PO)) { - return BREAK_NOT_ALLOWED$1; - } - // LB24 Do not break between numeric prefix/postfix and letters, or between letters and prefix/postfix. - if ((ALPHABETICS.indexOf(current) !== -1 && PREFIX_POSTFIX.indexOf(next) !== -1) || - (PREFIX_POSTFIX.indexOf(current) !== -1 && ALPHABETICS.indexOf(next) !== -1)) { - return BREAK_NOT_ALLOWED$1; - } - // LB25 Do not break between the following pairs of classes relevant to numbers: - if ( - // (PR | PO) × ( OP | HY )? NU - ([PR, PO].indexOf(current) !== -1 && - (next === NU || ([OP, HY].indexOf(next) !== -1 && classTypes[afterIndex + 1] === NU))) || - // ( OP | HY ) × NU - ([OP, HY].indexOf(current) !== -1 && next === NU) || - // NU × (NU | SY | IS) - (current === NU && [NU, SY, IS].indexOf(next) !== -1)) { - return BREAK_NOT_ALLOWED$1; - } - // NU (NU | SY | IS)* × (NU | SY | IS | CL | CP) - if ([NU, SY, IS, CL, CP].indexOf(next) !== -1) { - var prevIndex = currentIndex; - while (prevIndex >= 0) { - var type = classTypes[prevIndex]; - if (type === NU) { - return BREAK_NOT_ALLOWED$1; - } - else if ([SY, IS].indexOf(type) !== -1) { - prevIndex--; - } - else { - break; - } - } - } - // NU (NU | SY | IS)* (CL | CP)? × (PO | PR)) - if ([PR, PO].indexOf(next) !== -1) { - var prevIndex = [CL, CP].indexOf(current) !== -1 ? beforeIndex : currentIndex; - while (prevIndex >= 0) { - var type = classTypes[prevIndex]; - if (type === NU) { - return BREAK_NOT_ALLOWED$1; - } - else if ([SY, IS].indexOf(type) !== -1) { - prevIndex--; - } - else { - break; - } - } - } - // LB26 Do not break a Korean syllable. - if ((JL === current && [JL, JV, H2, H3].indexOf(next) !== -1) || - ([JV, H2].indexOf(current) !== -1 && [JV, JT].indexOf(next) !== -1) || - ([JT, H3].indexOf(current) !== -1 && next === JT)) { - return BREAK_NOT_ALLOWED$1; - } - // LB27 Treat a Korean Syllable Block the same as ID. - if ((KOREAN_SYLLABLE_BLOCK.indexOf(current) !== -1 && [IN, PO].indexOf(next) !== -1) || - (KOREAN_SYLLABLE_BLOCK.indexOf(next) !== -1 && current === PR)) { - return BREAK_NOT_ALLOWED$1; - } - // LB28 Do not break between alphabetics (“at”). - if (ALPHABETICS.indexOf(current) !== -1 && ALPHABETICS.indexOf(next) !== -1) { - return BREAK_NOT_ALLOWED$1; - } - // LB29 Do not break between numeric punctuation and alphabetics (“e.g.”). - if (current === IS && ALPHABETICS.indexOf(next) !== -1) { - return BREAK_NOT_ALLOWED$1; - } - // LB30 Do not break between letters, numbers, or ordinary symbols and opening or closing parentheses. - if ((ALPHABETICS.concat(NU).indexOf(current) !== -1 && - next === OP && - ea_OP.indexOf(codePoints[afterIndex]) === -1) || - (ALPHABETICS.concat(NU).indexOf(next) !== -1 && current === CP)) { - return BREAK_NOT_ALLOWED$1; - } - // LB30a Break between two regional indicator symbols if and only if there are an even number of regional - // indicators preceding the position of the break. - if (current === RI$1 && next === RI$1) { - var i = indicies[currentIndex]; - var count = 1; - while (i > 0) { - i--; - if (classTypes[i] === RI$1) { - count++; - } - else { - break; - } - } - if (count % 2 !== 0) { - return BREAK_NOT_ALLOWED$1; - } - } - // LB30b Do not break between an emoji base and an emoji modifier. - if (current === EB && next === EM) { - return BREAK_NOT_ALLOWED$1; - } - return BREAK_ALLOWED$1; - }; - var cssFormattedClasses = function (codePoints, options) { - if (!options) { - options = { lineBreak: 'normal', wordBreak: 'normal' }; - } - var _a = codePointsToCharacterClasses(codePoints, options.lineBreak), indicies = _a[0], classTypes = _a[1], isLetterNumber = _a[2]; - if (options.wordBreak === 'break-all' || options.wordBreak === 'break-word') { - classTypes = classTypes.map(function (type) { return ([NU, AL, SA].indexOf(type) !== -1 ? ID : type); }); - } - var forbiddenBreakpoints = options.wordBreak === 'keep-all' - ? isLetterNumber.map(function (letterNumber, i) { - return letterNumber && codePoints[i] >= 0x4e00 && codePoints[i] <= 0x9fff; - }) - : undefined; - return [indicies, classTypes, forbiddenBreakpoints]; - }; - var Break = /** @class */ (function () { - function Break(codePoints, lineBreak, start, end) { - this.codePoints = codePoints; - this.required = lineBreak === BREAK_MANDATORY; - this.start = start; - this.end = end; - } - Break.prototype.slice = function () { - return fromCodePoint$1.apply(void 0, this.codePoints.slice(this.start, this.end)); - }; - return Break; - }()); - var LineBreaker = function (str, options) { - var codePoints = toCodePoints$1(str); - var _a = cssFormattedClasses(codePoints, options), indicies = _a[0], classTypes = _a[1], forbiddenBreakpoints = _a[2]; - var length = codePoints.length; - var lastEnd = 0; - var nextIndex = 0; - return { - next: function () { - if (nextIndex >= length) { - return { done: true, value: null }; - } - var lineBreak = BREAK_NOT_ALLOWED$1; - while (nextIndex < length && - (lineBreak = _lineBreakAtIndex(codePoints, classTypes, indicies, ++nextIndex, forbiddenBreakpoints)) === - BREAK_NOT_ALLOWED$1) { } - if (lineBreak !== BREAK_NOT_ALLOWED$1 || nextIndex === length) { - var value = new Break(codePoints, lineBreak, lastEnd, nextIndex); - lastEnd = nextIndex; - return { value: value, done: false }; - } - return { done: true, value: null }; - }, - }; - }; - - // https://www.w3.org/TR/css-syntax-3 - var FLAG_UNRESTRICTED = 1 << 0; - var FLAG_ID = 1 << 1; - var FLAG_INTEGER = 1 << 2; - var FLAG_NUMBER = 1 << 3; - var LINE_FEED = 0x000a; - var SOLIDUS = 0x002f; - var REVERSE_SOLIDUS = 0x005c; - var CHARACTER_TABULATION = 0x0009; - var SPACE = 0x0020; - var QUOTATION_MARK = 0x0022; - var EQUALS_SIGN = 0x003d; - var NUMBER_SIGN = 0x0023; - var DOLLAR_SIGN = 0x0024; - var PERCENTAGE_SIGN = 0x0025; - var APOSTROPHE = 0x0027; - var LEFT_PARENTHESIS = 0x0028; - var RIGHT_PARENTHESIS = 0x0029; - var LOW_LINE = 0x005f; - var HYPHEN_MINUS = 0x002d; - var EXCLAMATION_MARK = 0x0021; - var LESS_THAN_SIGN = 0x003c; - var GREATER_THAN_SIGN = 0x003e; - var COMMERCIAL_AT = 0x0040; - var LEFT_SQUARE_BRACKET = 0x005b; - var RIGHT_SQUARE_BRACKET = 0x005d; - var CIRCUMFLEX_ACCENT = 0x003d; - var LEFT_CURLY_BRACKET = 0x007b; - var QUESTION_MARK = 0x003f; - var RIGHT_CURLY_BRACKET = 0x007d; - var VERTICAL_LINE = 0x007c; - var TILDE = 0x007e; - var CONTROL = 0x0080; - var REPLACEMENT_CHARACTER = 0xfffd; - var ASTERISK = 0x002a; - var PLUS_SIGN = 0x002b; - var COMMA = 0x002c; - var COLON = 0x003a; - var SEMICOLON = 0x003b; - var FULL_STOP = 0x002e; - var NULL = 0x0000; - var BACKSPACE = 0x0008; - var LINE_TABULATION = 0x000b; - var SHIFT_OUT = 0x000e; - var INFORMATION_SEPARATOR_ONE = 0x001f; - var DELETE = 0x007f; - var EOF = -1; - var ZERO = 0x0030; - var a = 0x0061; - var e = 0x0065; - var f = 0x0066; - var u = 0x0075; - var z = 0x007a; - var A = 0x0041; - var E = 0x0045; - var F = 0x0046; - var U = 0x0055; - var Z = 0x005a; - var isDigit = function (codePoint) { return codePoint >= ZERO && codePoint <= 0x0039; }; - var isSurrogateCodePoint = function (codePoint) { return codePoint >= 0xd800 && codePoint <= 0xdfff; }; - var isHex = function (codePoint) { - return isDigit(codePoint) || (codePoint >= A && codePoint <= F) || (codePoint >= a && codePoint <= f); - }; - var isLowerCaseLetter = function (codePoint) { return codePoint >= a && codePoint <= z; }; - var isUpperCaseLetter = function (codePoint) { return codePoint >= A && codePoint <= Z; }; - var isLetter = function (codePoint) { return isLowerCaseLetter(codePoint) || isUpperCaseLetter(codePoint); }; - var isNonASCIICodePoint = function (codePoint) { return codePoint >= CONTROL; }; - var isWhiteSpace = function (codePoint) { - return codePoint === LINE_FEED || codePoint === CHARACTER_TABULATION || codePoint === SPACE; - }; - var isNameStartCodePoint = function (codePoint) { - return isLetter(codePoint) || isNonASCIICodePoint(codePoint) || codePoint === LOW_LINE; - }; - var isNameCodePoint = function (codePoint) { - return isNameStartCodePoint(codePoint) || isDigit(codePoint) || codePoint === HYPHEN_MINUS; - }; - var isNonPrintableCodePoint = function (codePoint) { - return ((codePoint >= NULL && codePoint <= BACKSPACE) || - codePoint === LINE_TABULATION || - (codePoint >= SHIFT_OUT && codePoint <= INFORMATION_SEPARATOR_ONE) || - codePoint === DELETE); - }; - var isValidEscape = function (c1, c2) { - if (c1 !== REVERSE_SOLIDUS) { - return false; - } - return c2 !== LINE_FEED; - }; - var isIdentifierStart = function (c1, c2, c3) { - if (c1 === HYPHEN_MINUS) { - return isNameStartCodePoint(c2) || isValidEscape(c2, c3); - } - else if (isNameStartCodePoint(c1)) { - return true; - } - else if (c1 === REVERSE_SOLIDUS && isValidEscape(c1, c2)) { - return true; - } - return false; - }; - var isNumberStart = function (c1, c2, c3) { - if (c1 === PLUS_SIGN || c1 === HYPHEN_MINUS) { - if (isDigit(c2)) { - return true; - } - return c2 === FULL_STOP && isDigit(c3); - } - if (c1 === FULL_STOP) { - return isDigit(c2); - } - return isDigit(c1); - }; - var stringToNumber = function (codePoints) { - var c = 0; - var sign = 1; - if (codePoints[c] === PLUS_SIGN || codePoints[c] === HYPHEN_MINUS) { - if (codePoints[c] === HYPHEN_MINUS) { - sign = -1; - } - c++; - } - var integers = []; - while (isDigit(codePoints[c])) { - integers.push(codePoints[c++]); - } - var int = integers.length ? parseInt(fromCodePoint$1.apply(void 0, integers), 10) : 0; - if (codePoints[c] === FULL_STOP) { - c++; - } - var fraction = []; - while (isDigit(codePoints[c])) { - fraction.push(codePoints[c++]); - } - var fracd = fraction.length; - var frac = fracd ? parseInt(fromCodePoint$1.apply(void 0, fraction), 10) : 0; - if (codePoints[c] === E || codePoints[c] === e) { - c++; - } - var expsign = 1; - if (codePoints[c] === PLUS_SIGN || codePoints[c] === HYPHEN_MINUS) { - if (codePoints[c] === HYPHEN_MINUS) { - expsign = -1; - } - c++; - } - var exponent = []; - while (isDigit(codePoints[c])) { - exponent.push(codePoints[c++]); - } - var exp = exponent.length ? parseInt(fromCodePoint$1.apply(void 0, exponent), 10) : 0; - return sign * (int + frac * Math.pow(10, -fracd)) * Math.pow(10, expsign * exp); - }; - var LEFT_PARENTHESIS_TOKEN = { - type: 2 /* LEFT_PARENTHESIS_TOKEN */ - }; - var RIGHT_PARENTHESIS_TOKEN = { - type: 3 /* RIGHT_PARENTHESIS_TOKEN */ - }; - var COMMA_TOKEN = { type: 4 /* COMMA_TOKEN */ }; - var SUFFIX_MATCH_TOKEN = { type: 13 /* SUFFIX_MATCH_TOKEN */ }; - var PREFIX_MATCH_TOKEN = { type: 8 /* PREFIX_MATCH_TOKEN */ }; - var COLUMN_TOKEN = { type: 21 /* COLUMN_TOKEN */ }; - var DASH_MATCH_TOKEN = { type: 9 /* DASH_MATCH_TOKEN */ }; - var INCLUDE_MATCH_TOKEN = { type: 10 /* INCLUDE_MATCH_TOKEN */ }; - var LEFT_CURLY_BRACKET_TOKEN = { - type: 11 /* LEFT_CURLY_BRACKET_TOKEN */ - }; - var RIGHT_CURLY_BRACKET_TOKEN = { - type: 12 /* RIGHT_CURLY_BRACKET_TOKEN */ - }; - var SUBSTRING_MATCH_TOKEN = { type: 14 /* SUBSTRING_MATCH_TOKEN */ }; - var BAD_URL_TOKEN = { type: 23 /* BAD_URL_TOKEN */ }; - var BAD_STRING_TOKEN = { type: 1 /* BAD_STRING_TOKEN */ }; - var CDO_TOKEN = { type: 25 /* CDO_TOKEN */ }; - var CDC_TOKEN = { type: 24 /* CDC_TOKEN */ }; - var COLON_TOKEN = { type: 26 /* COLON_TOKEN */ }; - var SEMICOLON_TOKEN = { type: 27 /* SEMICOLON_TOKEN */ }; - var LEFT_SQUARE_BRACKET_TOKEN = { - type: 28 /* LEFT_SQUARE_BRACKET_TOKEN */ - }; - var RIGHT_SQUARE_BRACKET_TOKEN = { - type: 29 /* RIGHT_SQUARE_BRACKET_TOKEN */ - }; - var WHITESPACE_TOKEN = { type: 31 /* WHITESPACE_TOKEN */ }; - var EOF_TOKEN = { type: 32 /* EOF_TOKEN */ }; - var Tokenizer = /** @class */ (function () { - function Tokenizer() { - this._value = []; - } - Tokenizer.prototype.write = function (chunk) { - this._value = this._value.concat(toCodePoints$1(chunk)); - }; - Tokenizer.prototype.read = function () { - var tokens = []; - var token = this.consumeToken(); - while (token !== EOF_TOKEN) { - tokens.push(token); - token = this.consumeToken(); - } - return tokens; - }; - Tokenizer.prototype.consumeToken = function () { - var codePoint = this.consumeCodePoint(); - switch (codePoint) { - case QUOTATION_MARK: - return this.consumeStringToken(QUOTATION_MARK); - case NUMBER_SIGN: - var c1 = this.peekCodePoint(0); - var c2 = this.peekCodePoint(1); - var c3 = this.peekCodePoint(2); - if (isNameCodePoint(c1) || isValidEscape(c2, c3)) { - var flags = isIdentifierStart(c1, c2, c3) ? FLAG_ID : FLAG_UNRESTRICTED; - var value = this.consumeName(); - return { type: 5 /* HASH_TOKEN */, value: value, flags: flags }; - } - break; - case DOLLAR_SIGN: - if (this.peekCodePoint(0) === EQUALS_SIGN) { - this.consumeCodePoint(); - return SUFFIX_MATCH_TOKEN; - } - break; - case APOSTROPHE: - return this.consumeStringToken(APOSTROPHE); - case LEFT_PARENTHESIS: - return LEFT_PARENTHESIS_TOKEN; - case RIGHT_PARENTHESIS: - return RIGHT_PARENTHESIS_TOKEN; - case ASTERISK: - if (this.peekCodePoint(0) === EQUALS_SIGN) { - this.consumeCodePoint(); - return SUBSTRING_MATCH_TOKEN; - } - break; - case PLUS_SIGN: - if (isNumberStart(codePoint, this.peekCodePoint(0), this.peekCodePoint(1))) { - this.reconsumeCodePoint(codePoint); - return this.consumeNumericToken(); - } - break; - case COMMA: - return COMMA_TOKEN; - case HYPHEN_MINUS: - var e1 = codePoint; - var e2 = this.peekCodePoint(0); - var e3 = this.peekCodePoint(1); - if (isNumberStart(e1, e2, e3)) { - this.reconsumeCodePoint(codePoint); - return this.consumeNumericToken(); - } - if (isIdentifierStart(e1, e2, e3)) { - this.reconsumeCodePoint(codePoint); - return this.consumeIdentLikeToken(); - } - if (e2 === HYPHEN_MINUS && e3 === GREATER_THAN_SIGN) { - this.consumeCodePoint(); - this.consumeCodePoint(); - return CDC_TOKEN; - } - break; - case FULL_STOP: - if (isNumberStart(codePoint, this.peekCodePoint(0), this.peekCodePoint(1))) { - this.reconsumeCodePoint(codePoint); - return this.consumeNumericToken(); - } - break; - case SOLIDUS: - if (this.peekCodePoint(0) === ASTERISK) { - this.consumeCodePoint(); - while (true) { - var c = this.consumeCodePoint(); - if (c === ASTERISK) { - c = this.consumeCodePoint(); - if (c === SOLIDUS) { - return this.consumeToken(); - } - } - if (c === EOF) { - return this.consumeToken(); - } - } - } - break; - case COLON: - return COLON_TOKEN; - case SEMICOLON: - return SEMICOLON_TOKEN; - case LESS_THAN_SIGN: - if (this.peekCodePoint(0) === EXCLAMATION_MARK && - this.peekCodePoint(1) === HYPHEN_MINUS && - this.peekCodePoint(2) === HYPHEN_MINUS) { - this.consumeCodePoint(); - this.consumeCodePoint(); - return CDO_TOKEN; - } - break; - case COMMERCIAL_AT: - var a1 = this.peekCodePoint(0); - var a2 = this.peekCodePoint(1); - var a3 = this.peekCodePoint(2); - if (isIdentifierStart(a1, a2, a3)) { - var value = this.consumeName(); - return { type: 7 /* AT_KEYWORD_TOKEN */, value: value }; - } - break; - case LEFT_SQUARE_BRACKET: - return LEFT_SQUARE_BRACKET_TOKEN; - case REVERSE_SOLIDUS: - if (isValidEscape(codePoint, this.peekCodePoint(0))) { - this.reconsumeCodePoint(codePoint); - return this.consumeIdentLikeToken(); - } - break; - case RIGHT_SQUARE_BRACKET: - return RIGHT_SQUARE_BRACKET_TOKEN; - case CIRCUMFLEX_ACCENT: - if (this.peekCodePoint(0) === EQUALS_SIGN) { - this.consumeCodePoint(); - return PREFIX_MATCH_TOKEN; - } - break; - case LEFT_CURLY_BRACKET: - return LEFT_CURLY_BRACKET_TOKEN; - case RIGHT_CURLY_BRACKET: - return RIGHT_CURLY_BRACKET_TOKEN; - case u: - case U: - var u1 = this.peekCodePoint(0); - var u2 = this.peekCodePoint(1); - if (u1 === PLUS_SIGN && (isHex(u2) || u2 === QUESTION_MARK)) { - this.consumeCodePoint(); - this.consumeUnicodeRangeToken(); - } - this.reconsumeCodePoint(codePoint); - return this.consumeIdentLikeToken(); - case VERTICAL_LINE: - if (this.peekCodePoint(0) === EQUALS_SIGN) { - this.consumeCodePoint(); - return DASH_MATCH_TOKEN; - } - if (this.peekCodePoint(0) === VERTICAL_LINE) { - this.consumeCodePoint(); - return COLUMN_TOKEN; - } - break; - case TILDE: - if (this.peekCodePoint(0) === EQUALS_SIGN) { - this.consumeCodePoint(); - return INCLUDE_MATCH_TOKEN; - } - break; - case EOF: - return EOF_TOKEN; - } - if (isWhiteSpace(codePoint)) { - this.consumeWhiteSpace(); - return WHITESPACE_TOKEN; - } - if (isDigit(codePoint)) { - this.reconsumeCodePoint(codePoint); - return this.consumeNumericToken(); - } - if (isNameStartCodePoint(codePoint)) { - this.reconsumeCodePoint(codePoint); - return this.consumeIdentLikeToken(); - } - return { type: 6 /* DELIM_TOKEN */, value: fromCodePoint$1(codePoint) }; - }; - Tokenizer.prototype.consumeCodePoint = function () { - var value = this._value.shift(); - return typeof value === 'undefined' ? -1 : value; - }; - Tokenizer.prototype.reconsumeCodePoint = function (codePoint) { - this._value.unshift(codePoint); - }; - Tokenizer.prototype.peekCodePoint = function (delta) { - if (delta >= this._value.length) { - return -1; - } - return this._value[delta]; - }; - Tokenizer.prototype.consumeUnicodeRangeToken = function () { - var digits = []; - var codePoint = this.consumeCodePoint(); - while (isHex(codePoint) && digits.length < 6) { - digits.push(codePoint); - codePoint = this.consumeCodePoint(); - } - var questionMarks = false; - while (codePoint === QUESTION_MARK && digits.length < 6) { - digits.push(codePoint); - codePoint = this.consumeCodePoint(); - questionMarks = true; - } - if (questionMarks) { - var start_1 = parseInt(fromCodePoint$1.apply(void 0, digits.map(function (digit) { return (digit === QUESTION_MARK ? ZERO : digit); })), 16); - var end = parseInt(fromCodePoint$1.apply(void 0, digits.map(function (digit) { return (digit === QUESTION_MARK ? F : digit); })), 16); - return { type: 30 /* UNICODE_RANGE_TOKEN */, start: start_1, end: end }; - } - var start = parseInt(fromCodePoint$1.apply(void 0, digits), 16); - if (this.peekCodePoint(0) === HYPHEN_MINUS && isHex(this.peekCodePoint(1))) { - this.consumeCodePoint(); - codePoint = this.consumeCodePoint(); - var endDigits = []; - while (isHex(codePoint) && endDigits.length < 6) { - endDigits.push(codePoint); - codePoint = this.consumeCodePoint(); - } - var end = parseInt(fromCodePoint$1.apply(void 0, endDigits), 16); - return { type: 30 /* UNICODE_RANGE_TOKEN */, start: start, end: end }; - } - else { - return { type: 30 /* UNICODE_RANGE_TOKEN */, start: start, end: start }; - } - }; - Tokenizer.prototype.consumeIdentLikeToken = function () { - var value = this.consumeName(); - if (value.toLowerCase() === 'url' && this.peekCodePoint(0) === LEFT_PARENTHESIS) { - this.consumeCodePoint(); - return this.consumeUrlToken(); - } - else if (this.peekCodePoint(0) === LEFT_PARENTHESIS) { - this.consumeCodePoint(); - return { type: 19 /* FUNCTION_TOKEN */, value: value }; - } - return { type: 20 /* IDENT_TOKEN */, value: value }; - }; - Tokenizer.prototype.consumeUrlToken = function () { - var value = []; - this.consumeWhiteSpace(); - if (this.peekCodePoint(0) === EOF) { - return { type: 22 /* URL_TOKEN */, value: '' }; - } - var next = this.peekCodePoint(0); - if (next === APOSTROPHE || next === QUOTATION_MARK) { - var stringToken = this.consumeStringToken(this.consumeCodePoint()); - if (stringToken.type === 0 /* STRING_TOKEN */) { - this.consumeWhiteSpace(); - if (this.peekCodePoint(0) === EOF || this.peekCodePoint(0) === RIGHT_PARENTHESIS) { - this.consumeCodePoint(); - return { type: 22 /* URL_TOKEN */, value: stringToken.value }; - } - } - this.consumeBadUrlRemnants(); - return BAD_URL_TOKEN; - } - while (true) { - var codePoint = this.consumeCodePoint(); - if (codePoint === EOF || codePoint === RIGHT_PARENTHESIS) { - return { type: 22 /* URL_TOKEN */, value: fromCodePoint$1.apply(void 0, value) }; - } - else if (isWhiteSpace(codePoint)) { - this.consumeWhiteSpace(); - if (this.peekCodePoint(0) === EOF || this.peekCodePoint(0) === RIGHT_PARENTHESIS) { - this.consumeCodePoint(); - return { type: 22 /* URL_TOKEN */, value: fromCodePoint$1.apply(void 0, value) }; - } - this.consumeBadUrlRemnants(); - return BAD_URL_TOKEN; - } - else if (codePoint === QUOTATION_MARK || - codePoint === APOSTROPHE || - codePoint === LEFT_PARENTHESIS || - isNonPrintableCodePoint(codePoint)) { - this.consumeBadUrlRemnants(); - return BAD_URL_TOKEN; - } - else if (codePoint === REVERSE_SOLIDUS) { - if (isValidEscape(codePoint, this.peekCodePoint(0))) { - value.push(this.consumeEscapedCodePoint()); - } - else { - this.consumeBadUrlRemnants(); - return BAD_URL_TOKEN; - } - } - else { - value.push(codePoint); - } - } - }; - Tokenizer.prototype.consumeWhiteSpace = function () { - while (isWhiteSpace(this.peekCodePoint(0))) { - this.consumeCodePoint(); - } - }; - Tokenizer.prototype.consumeBadUrlRemnants = function () { - while (true) { - var codePoint = this.consumeCodePoint(); - if (codePoint === RIGHT_PARENTHESIS || codePoint === EOF) { - return; - } - if (isValidEscape(codePoint, this.peekCodePoint(0))) { - this.consumeEscapedCodePoint(); - } - } - }; - Tokenizer.prototype.consumeStringSlice = function (count) { - var SLICE_STACK_SIZE = 50000; - var value = ''; - while (count > 0) { - var amount = Math.min(SLICE_STACK_SIZE, count); - value += fromCodePoint$1.apply(void 0, this._value.splice(0, amount)); - count -= amount; - } - this._value.shift(); - return value; - }; - Tokenizer.prototype.consumeStringToken = function (endingCodePoint) { - var value = ''; - var i = 0; - do { - var codePoint = this._value[i]; - if (codePoint === EOF || codePoint === undefined || codePoint === endingCodePoint) { - value += this.consumeStringSlice(i); - return { type: 0 /* STRING_TOKEN */, value: value }; - } - if (codePoint === LINE_FEED) { - this._value.splice(0, i); - return BAD_STRING_TOKEN; - } - if (codePoint === REVERSE_SOLIDUS) { - var next = this._value[i + 1]; - if (next !== EOF && next !== undefined) { - if (next === LINE_FEED) { - value += this.consumeStringSlice(i); - i = -1; - this._value.shift(); - } - else if (isValidEscape(codePoint, next)) { - value += this.consumeStringSlice(i); - value += fromCodePoint$1(this.consumeEscapedCodePoint()); - i = -1; - } - } - } - i++; - } while (true); - }; - Tokenizer.prototype.consumeNumber = function () { - var repr = []; - var type = FLAG_INTEGER; - var c1 = this.peekCodePoint(0); - if (c1 === PLUS_SIGN || c1 === HYPHEN_MINUS) { - repr.push(this.consumeCodePoint()); - } - while (isDigit(this.peekCodePoint(0))) { - repr.push(this.consumeCodePoint()); - } - c1 = this.peekCodePoint(0); - var c2 = this.peekCodePoint(1); - if (c1 === FULL_STOP && isDigit(c2)) { - repr.push(this.consumeCodePoint(), this.consumeCodePoint()); - type = FLAG_NUMBER; - while (isDigit(this.peekCodePoint(0))) { - repr.push(this.consumeCodePoint()); - } - } - c1 = this.peekCodePoint(0); - c2 = this.peekCodePoint(1); - var c3 = this.peekCodePoint(2); - if ((c1 === E || c1 === e) && (((c2 === PLUS_SIGN || c2 === HYPHEN_MINUS) && isDigit(c3)) || isDigit(c2))) { - repr.push(this.consumeCodePoint(), this.consumeCodePoint()); - type = FLAG_NUMBER; - while (isDigit(this.peekCodePoint(0))) { - repr.push(this.consumeCodePoint()); - } - } - return [stringToNumber(repr), type]; - }; - Tokenizer.prototype.consumeNumericToken = function () { - var _a = this.consumeNumber(), number = _a[0], flags = _a[1]; - var c1 = this.peekCodePoint(0); - var c2 = this.peekCodePoint(1); - var c3 = this.peekCodePoint(2); - if (isIdentifierStart(c1, c2, c3)) { - var unit = this.consumeName(); - return { type: 15 /* DIMENSION_TOKEN */, number: number, flags: flags, unit: unit }; - } - if (c1 === PERCENTAGE_SIGN) { - this.consumeCodePoint(); - return { type: 16 /* PERCENTAGE_TOKEN */, number: number, flags: flags }; - } - return { type: 17 /* NUMBER_TOKEN */, number: number, flags: flags }; - }; - Tokenizer.prototype.consumeEscapedCodePoint = function () { - var codePoint = this.consumeCodePoint(); - if (isHex(codePoint)) { - var hex = fromCodePoint$1(codePoint); - while (isHex(this.peekCodePoint(0)) && hex.length < 6) { - hex += fromCodePoint$1(this.consumeCodePoint()); - } - if (isWhiteSpace(this.peekCodePoint(0))) { - this.consumeCodePoint(); - } - var hexCodePoint = parseInt(hex, 16); - if (hexCodePoint === 0 || isSurrogateCodePoint(hexCodePoint) || hexCodePoint > 0x10ffff) { - return REPLACEMENT_CHARACTER; - } - return hexCodePoint; - } - if (codePoint === EOF) { - return REPLACEMENT_CHARACTER; - } - return codePoint; - }; - Tokenizer.prototype.consumeName = function () { - var result = ''; - while (true) { - var codePoint = this.consumeCodePoint(); - if (isNameCodePoint(codePoint)) { - result += fromCodePoint$1(codePoint); - } - else if (isValidEscape(codePoint, this.peekCodePoint(0))) { - result += fromCodePoint$1(this.consumeEscapedCodePoint()); - } - else { - this.reconsumeCodePoint(codePoint); - return result; - } - } - }; - return Tokenizer; - }()); - - var Parser = /** @class */ (function () { - function Parser(tokens) { - this._tokens = tokens; - } - Parser.create = function (value) { - var tokenizer = new Tokenizer(); - tokenizer.write(value); - return new Parser(tokenizer.read()); - }; - Parser.parseValue = function (value) { - return Parser.create(value).parseComponentValue(); - }; - Parser.parseValues = function (value) { - return Parser.create(value).parseComponentValues(); - }; - Parser.prototype.parseComponentValue = function () { - var token = this.consumeToken(); - while (token.type === 31 /* WHITESPACE_TOKEN */) { - token = this.consumeToken(); - } - if (token.type === 32 /* EOF_TOKEN */) { - throw new SyntaxError("Error parsing CSS component value, unexpected EOF"); - } - this.reconsumeToken(token); - var value = this.consumeComponentValue(); - do { - token = this.consumeToken(); - } while (token.type === 31 /* WHITESPACE_TOKEN */); - if (token.type === 32 /* EOF_TOKEN */) { - return value; - } - throw new SyntaxError("Error parsing CSS component value, multiple values found when expecting only one"); - }; - Parser.prototype.parseComponentValues = function () { - var values = []; - while (true) { - var value = this.consumeComponentValue(); - if (value.type === 32 /* EOF_TOKEN */) { - return values; - } - values.push(value); - values.push(); - } - }; - Parser.prototype.consumeComponentValue = function () { - var token = this.consumeToken(); - switch (token.type) { - case 11 /* LEFT_CURLY_BRACKET_TOKEN */: - case 28 /* LEFT_SQUARE_BRACKET_TOKEN */: - case 2 /* LEFT_PARENTHESIS_TOKEN */: - return this.consumeSimpleBlock(token.type); - case 19 /* FUNCTION_TOKEN */: - return this.consumeFunction(token); - } - return token; - }; - Parser.prototype.consumeSimpleBlock = function (type) { - var block = { type: type, values: [] }; - var token = this.consumeToken(); - while (true) { - if (token.type === 32 /* EOF_TOKEN */ || isEndingTokenFor(token, type)) { - return block; - } - this.reconsumeToken(token); - block.values.push(this.consumeComponentValue()); - token = this.consumeToken(); - } - }; - Parser.prototype.consumeFunction = function (functionToken) { - var cssFunction = { - name: functionToken.value, - values: [], - type: 18 /* FUNCTION */ - }; - while (true) { - var token = this.consumeToken(); - if (token.type === 32 /* EOF_TOKEN */ || token.type === 3 /* RIGHT_PARENTHESIS_TOKEN */) { - return cssFunction; - } - this.reconsumeToken(token); - cssFunction.values.push(this.consumeComponentValue()); - } - }; - Parser.prototype.consumeToken = function () { - var token = this._tokens.shift(); - return typeof token === 'undefined' ? EOF_TOKEN : token; - }; - Parser.prototype.reconsumeToken = function (token) { - this._tokens.unshift(token); - }; - return Parser; - }()); - var isDimensionToken = function (token) { return token.type === 15 /* DIMENSION_TOKEN */; }; - var isNumberToken = function (token) { return token.type === 17 /* NUMBER_TOKEN */; }; - var isIdentToken = function (token) { return token.type === 20 /* IDENT_TOKEN */; }; - var isStringToken = function (token) { return token.type === 0 /* STRING_TOKEN */; }; - var isIdentWithValue = function (token, value) { - return isIdentToken(token) && token.value === value; - }; - var nonWhiteSpace = function (token) { return token.type !== 31 /* WHITESPACE_TOKEN */; }; - var nonFunctionArgSeparator = function (token) { - return token.type !== 31 /* WHITESPACE_TOKEN */ && token.type !== 4 /* COMMA_TOKEN */; - }; - var parseFunctionArgs = function (tokens) { - var args = []; - var arg = []; - tokens.forEach(function (token) { - if (token.type === 4 /* COMMA_TOKEN */) { - if (arg.length === 0) { - throw new Error("Error parsing function args, zero tokens for arg"); - } - args.push(arg); - arg = []; - return; - } - if (token.type !== 31 /* WHITESPACE_TOKEN */) { - arg.push(token); - } - }); - if (arg.length) { - args.push(arg); - } - return args; - }; - var isEndingTokenFor = function (token, type) { - if (type === 11 /* LEFT_CURLY_BRACKET_TOKEN */ && token.type === 12 /* RIGHT_CURLY_BRACKET_TOKEN */) { - return true; - } - if (type === 28 /* LEFT_SQUARE_BRACKET_TOKEN */ && token.type === 29 /* RIGHT_SQUARE_BRACKET_TOKEN */) { - return true; - } - return type === 2 /* LEFT_PARENTHESIS_TOKEN */ && token.type === 3 /* RIGHT_PARENTHESIS_TOKEN */; - }; - - var isLength = function (token) { - return token.type === 17 /* NUMBER_TOKEN */ || token.type === 15 /* DIMENSION_TOKEN */; - }; - - var isLengthPercentage = function (token) { - return token.type === 16 /* PERCENTAGE_TOKEN */ || isLength(token); - }; - var parseLengthPercentageTuple = function (tokens) { - return tokens.length > 1 ? [tokens[0], tokens[1]] : [tokens[0]]; - }; - var ZERO_LENGTH = { - type: 17 /* NUMBER_TOKEN */, - number: 0, - flags: FLAG_INTEGER - }; - var FIFTY_PERCENT = { - type: 16 /* PERCENTAGE_TOKEN */, - number: 50, - flags: FLAG_INTEGER - }; - var HUNDRED_PERCENT = { - type: 16 /* PERCENTAGE_TOKEN */, - number: 100, - flags: FLAG_INTEGER - }; - var getAbsoluteValueForTuple = function (tuple, width, height) { - var x = tuple[0], y = tuple[1]; - return [getAbsoluteValue(x, width), getAbsoluteValue(typeof y !== 'undefined' ? y : x, height)]; - }; - var getAbsoluteValue = function (token, parent) { - if (token.type === 16 /* PERCENTAGE_TOKEN */) { - return (token.number / 100) * parent; - } - if (isDimensionToken(token)) { - switch (token.unit) { - case 'rem': - case 'em': - return 16 * token.number; // TODO use correct font-size - case 'px': - default: - return token.number; - } - } - return token.number; - }; - - var DEG = 'deg'; - var GRAD = 'grad'; - var RAD = 'rad'; - var TURN = 'turn'; - var angle = { - name: 'angle', - parse: function (_context, value) { - if (value.type === 15 /* DIMENSION_TOKEN */) { - switch (value.unit) { - case DEG: - return (Math.PI * value.number) / 180; - case GRAD: - return (Math.PI / 200) * value.number; - case RAD: - return value.number; - case TURN: - return Math.PI * 2 * value.number; - } - } - throw new Error("Unsupported angle type"); - } - }; - var isAngle = function (value) { - if (value.type === 15 /* DIMENSION_TOKEN */) { - if (value.unit === DEG || value.unit === GRAD || value.unit === RAD || value.unit === TURN) { - return true; - } - } - return false; - }; - var parseNamedSide = function (tokens) { - var sideOrCorner = tokens - .filter(isIdentToken) - .map(function (ident) { return ident.value; }) - .join(' '); - switch (sideOrCorner) { - case 'to bottom right': - case 'to right bottom': - case 'left top': - case 'top left': - return [ZERO_LENGTH, ZERO_LENGTH]; - case 'to top': - case 'bottom': - return deg(0); - case 'to bottom left': - case 'to left bottom': - case 'right top': - case 'top right': - return [ZERO_LENGTH, HUNDRED_PERCENT]; - case 'to right': - case 'left': - return deg(90); - case 'to top left': - case 'to left top': - case 'right bottom': - case 'bottom right': - return [HUNDRED_PERCENT, HUNDRED_PERCENT]; - case 'to bottom': - case 'top': - return deg(180); - case 'to top right': - case 'to right top': - case 'left bottom': - case 'bottom left': - return [HUNDRED_PERCENT, ZERO_LENGTH]; - case 'to left': - case 'right': - return deg(270); - } - return 0; - }; - var deg = function (deg) { return (Math.PI * deg) / 180; }; - - var color$1 = { - name: 'color', - parse: function (context, value) { - if (value.type === 18 /* FUNCTION */) { - var colorFunction = SUPPORTED_COLOR_FUNCTIONS[value.name]; - if (typeof colorFunction === 'undefined') { - throw new Error("Attempting to parse an unsupported color function \"" + value.name + "\""); - } - return colorFunction(context, value.values); - } - if (value.type === 5 /* HASH_TOKEN */) { - if (value.value.length === 3) { - var r = value.value.substring(0, 1); - var g = value.value.substring(1, 2); - var b = value.value.substring(2, 3); - return pack(parseInt(r + r, 16), parseInt(g + g, 16), parseInt(b + b, 16), 1); - } - if (value.value.length === 4) { - var r = value.value.substring(0, 1); - var g = value.value.substring(1, 2); - var b = value.value.substring(2, 3); - var a = value.value.substring(3, 4); - return pack(parseInt(r + r, 16), parseInt(g + g, 16), parseInt(b + b, 16), parseInt(a + a, 16) / 255); - } - if (value.value.length === 6) { - var r = value.value.substring(0, 2); - var g = value.value.substring(2, 4); - var b = value.value.substring(4, 6); - return pack(parseInt(r, 16), parseInt(g, 16), parseInt(b, 16), 1); - } - if (value.value.length === 8) { - var r = value.value.substring(0, 2); - var g = value.value.substring(2, 4); - var b = value.value.substring(4, 6); - var a = value.value.substring(6, 8); - return pack(parseInt(r, 16), parseInt(g, 16), parseInt(b, 16), parseInt(a, 16) / 255); - } - } - if (value.type === 20 /* IDENT_TOKEN */) { - var namedColor = COLORS[value.value.toUpperCase()]; - if (typeof namedColor !== 'undefined') { - return namedColor; - } - } - return COLORS.TRANSPARENT; - } - }; - var isTransparent = function (color) { return (0xff & color) === 0; }; - var asString = function (color) { - var alpha = 0xff & color; - var blue = 0xff & (color >> 8); - var green = 0xff & (color >> 16); - var red = 0xff & (color >> 24); - return alpha < 255 ? "rgba(" + red + "," + green + "," + blue + "," + alpha / 255 + ")" : "rgb(" + red + "," + green + "," + blue + ")"; - }; - var pack = function (r, g, b, a) { - return ((r << 24) | (g << 16) | (b << 8) | (Math.round(a * 255) << 0)) >>> 0; - }; - var getTokenColorValue = function (token, i) { - if (token.type === 17 /* NUMBER_TOKEN */) { - return token.number; - } - if (token.type === 16 /* PERCENTAGE_TOKEN */) { - var max = i === 3 ? 1 : 255; - return i === 3 ? (token.number / 100) * max : Math.round((token.number / 100) * max); - } - return 0; - }; - var rgb = function (_context, args) { - var tokens = args.filter(nonFunctionArgSeparator); - if (tokens.length === 3) { - var _a = tokens.map(getTokenColorValue), r = _a[0], g = _a[1], b = _a[2]; - return pack(r, g, b, 1); - } - if (tokens.length === 4) { - var _b = tokens.map(getTokenColorValue), r = _b[0], g = _b[1], b = _b[2], a = _b[3]; - return pack(r, g, b, a); - } - return 0; - }; - function hue2rgb(t1, t2, hue) { - if (hue < 0) { - hue += 1; - } - if (hue >= 1) { - hue -= 1; - } - if (hue < 1 / 6) { - return (t2 - t1) * hue * 6 + t1; - } - else if (hue < 1 / 2) { - return t2; - } - else if (hue < 2 / 3) { - return (t2 - t1) * 6 * (2 / 3 - hue) + t1; - } - else { - return t1; - } - } - var hsl = function (context, args) { - var tokens = args.filter(nonFunctionArgSeparator); - var hue = tokens[0], saturation = tokens[1], lightness = tokens[2], alpha = tokens[3]; - var h = (hue.type === 17 /* NUMBER_TOKEN */ ? deg(hue.number) : angle.parse(context, hue)) / (Math.PI * 2); - var s = isLengthPercentage(saturation) ? saturation.number / 100 : 0; - var l = isLengthPercentage(lightness) ? lightness.number / 100 : 0; - var a = typeof alpha !== 'undefined' && isLengthPercentage(alpha) ? getAbsoluteValue(alpha, 1) : 1; - if (s === 0) { - return pack(l * 255, l * 255, l * 255, 1); - } - var t2 = l <= 0.5 ? l * (s + 1) : l + s - l * s; - var t1 = l * 2 - t2; - var r = hue2rgb(t1, t2, h + 1 / 3); - var g = hue2rgb(t1, t2, h); - var b = hue2rgb(t1, t2, h - 1 / 3); - return pack(r * 255, g * 255, b * 255, a); - }; - var SUPPORTED_COLOR_FUNCTIONS = { - hsl: hsl, - hsla: hsl, - rgb: rgb, - rgba: rgb - }; - var parseColor = function (context, value) { - return color$1.parse(context, Parser.create(value).parseComponentValue()); - }; - var COLORS = { - ALICEBLUE: 0xf0f8ffff, - ANTIQUEWHITE: 0xfaebd7ff, - AQUA: 0x00ffffff, - AQUAMARINE: 0x7fffd4ff, - AZURE: 0xf0ffffff, - BEIGE: 0xf5f5dcff, - BISQUE: 0xffe4c4ff, - BLACK: 0x000000ff, - BLANCHEDALMOND: 0xffebcdff, - BLUE: 0x0000ffff, - BLUEVIOLET: 0x8a2be2ff, - BROWN: 0xa52a2aff, - BURLYWOOD: 0xdeb887ff, - CADETBLUE: 0x5f9ea0ff, - CHARTREUSE: 0x7fff00ff, - CHOCOLATE: 0xd2691eff, - CORAL: 0xff7f50ff, - CORNFLOWERBLUE: 0x6495edff, - CORNSILK: 0xfff8dcff, - CRIMSON: 0xdc143cff, - CYAN: 0x00ffffff, - DARKBLUE: 0x00008bff, - DARKCYAN: 0x008b8bff, - DARKGOLDENROD: 0xb886bbff, - DARKGRAY: 0xa9a9a9ff, - DARKGREEN: 0x006400ff, - DARKGREY: 0xa9a9a9ff, - DARKKHAKI: 0xbdb76bff, - DARKMAGENTA: 0x8b008bff, - DARKOLIVEGREEN: 0x556b2fff, - DARKORANGE: 0xff8c00ff, - DARKORCHID: 0x9932ccff, - DARKRED: 0x8b0000ff, - DARKSALMON: 0xe9967aff, - DARKSEAGREEN: 0x8fbc8fff, - DARKSLATEBLUE: 0x483d8bff, - DARKSLATEGRAY: 0x2f4f4fff, - DARKSLATEGREY: 0x2f4f4fff, - DARKTURQUOISE: 0x00ced1ff, - DARKVIOLET: 0x9400d3ff, - DEEPPINK: 0xff1493ff, - DEEPSKYBLUE: 0x00bfffff, - DIMGRAY: 0x696969ff, - DIMGREY: 0x696969ff, - DODGERBLUE: 0x1e90ffff, - FIREBRICK: 0xb22222ff, - FLORALWHITE: 0xfffaf0ff, - FORESTGREEN: 0x228b22ff, - FUCHSIA: 0xff00ffff, - GAINSBORO: 0xdcdcdcff, - GHOSTWHITE: 0xf8f8ffff, - GOLD: 0xffd700ff, - GOLDENROD: 0xdaa520ff, - GRAY: 0x808080ff, - GREEN: 0x008000ff, - GREENYELLOW: 0xadff2fff, - GREY: 0x808080ff, - HONEYDEW: 0xf0fff0ff, - HOTPINK: 0xff69b4ff, - INDIANRED: 0xcd5c5cff, - INDIGO: 0x4b0082ff, - IVORY: 0xfffff0ff, - KHAKI: 0xf0e68cff, - LAVENDER: 0xe6e6faff, - LAVENDERBLUSH: 0xfff0f5ff, - LAWNGREEN: 0x7cfc00ff, - LEMONCHIFFON: 0xfffacdff, - LIGHTBLUE: 0xadd8e6ff, - LIGHTCORAL: 0xf08080ff, - LIGHTCYAN: 0xe0ffffff, - LIGHTGOLDENRODYELLOW: 0xfafad2ff, - LIGHTGRAY: 0xd3d3d3ff, - LIGHTGREEN: 0x90ee90ff, - LIGHTGREY: 0xd3d3d3ff, - LIGHTPINK: 0xffb6c1ff, - LIGHTSALMON: 0xffa07aff, - LIGHTSEAGREEN: 0x20b2aaff, - LIGHTSKYBLUE: 0x87cefaff, - LIGHTSLATEGRAY: 0x778899ff, - LIGHTSLATEGREY: 0x778899ff, - LIGHTSTEELBLUE: 0xb0c4deff, - LIGHTYELLOW: 0xffffe0ff, - LIME: 0x00ff00ff, - LIMEGREEN: 0x32cd32ff, - LINEN: 0xfaf0e6ff, - MAGENTA: 0xff00ffff, - MAROON: 0x800000ff, - MEDIUMAQUAMARINE: 0x66cdaaff, - MEDIUMBLUE: 0x0000cdff, - MEDIUMORCHID: 0xba55d3ff, - MEDIUMPURPLE: 0x9370dbff, - MEDIUMSEAGREEN: 0x3cb371ff, - MEDIUMSLATEBLUE: 0x7b68eeff, - MEDIUMSPRINGGREEN: 0x00fa9aff, - MEDIUMTURQUOISE: 0x48d1ccff, - MEDIUMVIOLETRED: 0xc71585ff, - MIDNIGHTBLUE: 0x191970ff, - MINTCREAM: 0xf5fffaff, - MISTYROSE: 0xffe4e1ff, - MOCCASIN: 0xffe4b5ff, - NAVAJOWHITE: 0xffdeadff, - NAVY: 0x000080ff, - OLDLACE: 0xfdf5e6ff, - OLIVE: 0x808000ff, - OLIVEDRAB: 0x6b8e23ff, - ORANGE: 0xffa500ff, - ORANGERED: 0xff4500ff, - ORCHID: 0xda70d6ff, - PALEGOLDENROD: 0xeee8aaff, - PALEGREEN: 0x98fb98ff, - PALETURQUOISE: 0xafeeeeff, - PALEVIOLETRED: 0xdb7093ff, - PAPAYAWHIP: 0xffefd5ff, - PEACHPUFF: 0xffdab9ff, - PERU: 0xcd853fff, - PINK: 0xffc0cbff, - PLUM: 0xdda0ddff, - POWDERBLUE: 0xb0e0e6ff, - PURPLE: 0x800080ff, - REBECCAPURPLE: 0x663399ff, - RED: 0xff0000ff, - ROSYBROWN: 0xbc8f8fff, - ROYALBLUE: 0x4169e1ff, - SADDLEBROWN: 0x8b4513ff, - SALMON: 0xfa8072ff, - SANDYBROWN: 0xf4a460ff, - SEAGREEN: 0x2e8b57ff, - SEASHELL: 0xfff5eeff, - SIENNA: 0xa0522dff, - SILVER: 0xc0c0c0ff, - SKYBLUE: 0x87ceebff, - SLATEBLUE: 0x6a5acdff, - SLATEGRAY: 0x708090ff, - SLATEGREY: 0x708090ff, - SNOW: 0xfffafaff, - SPRINGGREEN: 0x00ff7fff, - STEELBLUE: 0x4682b4ff, - TAN: 0xd2b48cff, - TEAL: 0x008080ff, - THISTLE: 0xd8bfd8ff, - TOMATO: 0xff6347ff, - TRANSPARENT: 0x00000000, - TURQUOISE: 0x40e0d0ff, - VIOLET: 0xee82eeff, - WHEAT: 0xf5deb3ff, - WHITE: 0xffffffff, - WHITESMOKE: 0xf5f5f5ff, - YELLOW: 0xffff00ff, - YELLOWGREEN: 0x9acd32ff - }; - - var backgroundClip = { - name: 'background-clip', - initialValue: 'border-box', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return tokens.map(function (token) { - if (isIdentToken(token)) { - switch (token.value) { - case 'padding-box': - return 1 /* PADDING_BOX */; - case 'content-box': - return 2 /* CONTENT_BOX */; - } - } - return 0 /* BORDER_BOX */; - }); - } - }; - - var backgroundColor = { - name: "background-color", - initialValue: 'transparent', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'color' - }; - - var parseColorStop = function (context, args) { - var color = color$1.parse(context, args[0]); - var stop = args[1]; - return stop && isLengthPercentage(stop) ? { color: color, stop: stop } : { color: color, stop: null }; - }; - var processColorStops = function (stops, lineLength) { - var first = stops[0]; - var last = stops[stops.length - 1]; - if (first.stop === null) { - first.stop = ZERO_LENGTH; - } - if (last.stop === null) { - last.stop = HUNDRED_PERCENT; - } - var processStops = []; - var previous = 0; - for (var i = 0; i < stops.length; i++) { - var stop_1 = stops[i].stop; - if (stop_1 !== null) { - var absoluteValue = getAbsoluteValue(stop_1, lineLength); - if (absoluteValue > previous) { - processStops.push(absoluteValue); - } - else { - processStops.push(previous); - } - previous = absoluteValue; - } - else { - processStops.push(null); - } - } - var gapBegin = null; - for (var i = 0; i < processStops.length; i++) { - var stop_2 = processStops[i]; - if (stop_2 === null) { - if (gapBegin === null) { - gapBegin = i; - } - } - else if (gapBegin !== null) { - var gapLength = i - gapBegin; - var beforeGap = processStops[gapBegin - 1]; - var gapValue = (stop_2 - beforeGap) / (gapLength + 1); - for (var g = 1; g <= gapLength; g++) { - processStops[gapBegin + g - 1] = gapValue * g; - } - gapBegin = null; - } - } - return stops.map(function (_a, i) { - var color = _a.color; - return { color: color, stop: Math.max(Math.min(1, processStops[i] / lineLength), 0) }; - }); - }; - var getAngleFromCorner = function (corner, width, height) { - var centerX = width / 2; - var centerY = height / 2; - var x = getAbsoluteValue(corner[0], width) - centerX; - var y = centerY - getAbsoluteValue(corner[1], height); - return (Math.atan2(y, x) + Math.PI * 2) % (Math.PI * 2); - }; - var calculateGradientDirection = function (angle, width, height) { - var radian = typeof angle === 'number' ? angle : getAngleFromCorner(angle, width, height); - var lineLength = Math.abs(width * Math.sin(radian)) + Math.abs(height * Math.cos(radian)); - var halfWidth = width / 2; - var halfHeight = height / 2; - var halfLineLength = lineLength / 2; - var yDiff = Math.sin(radian - Math.PI / 2) * halfLineLength; - var xDiff = Math.cos(radian - Math.PI / 2) * halfLineLength; - return [lineLength, halfWidth - xDiff, halfWidth + xDiff, halfHeight - yDiff, halfHeight + yDiff]; - }; - var distance = function (a, b) { return Math.sqrt(a * a + b * b); }; - var findCorner = function (width, height, x, y, closest) { - var corners = [ - [0, 0], - [0, height], - [width, 0], - [width, height] - ]; - return corners.reduce(function (stat, corner) { - var cx = corner[0], cy = corner[1]; - var d = distance(x - cx, y - cy); - if (closest ? d < stat.optimumDistance : d > stat.optimumDistance) { - return { - optimumCorner: corner, - optimumDistance: d - }; - } - return stat; - }, { - optimumDistance: closest ? Infinity : -Infinity, - optimumCorner: null - }).optimumCorner; - }; - var calculateRadius = function (gradient, x, y, width, height) { - var rx = 0; - var ry = 0; - switch (gradient.size) { - case 0 /* CLOSEST_SIDE */: - // The ending shape is sized so that that it exactly meets the side of the gradient box closest to the gradient’s center. - // If the shape is an ellipse, it exactly meets the closest side in each dimension. - if (gradient.shape === 0 /* CIRCLE */) { - rx = ry = Math.min(Math.abs(x), Math.abs(x - width), Math.abs(y), Math.abs(y - height)); - } - else if (gradient.shape === 1 /* ELLIPSE */) { - rx = Math.min(Math.abs(x), Math.abs(x - width)); - ry = Math.min(Math.abs(y), Math.abs(y - height)); - } - break; - case 2 /* CLOSEST_CORNER */: - // The ending shape is sized so that that it passes through the corner of the gradient box closest to the gradient’s center. - // If the shape is an ellipse, the ending shape is given the same aspect-ratio it would have if closest-side were specified. - if (gradient.shape === 0 /* CIRCLE */) { - rx = ry = Math.min(distance(x, y), distance(x, y - height), distance(x - width, y), distance(x - width, y - height)); - } - else if (gradient.shape === 1 /* ELLIPSE */) { - // Compute the ratio ry/rx (which is to be the same as for "closest-side") - var c = Math.min(Math.abs(y), Math.abs(y - height)) / Math.min(Math.abs(x), Math.abs(x - width)); - var _a = findCorner(width, height, x, y, true), cx = _a[0], cy = _a[1]; - rx = distance(cx - x, (cy - y) / c); - ry = c * rx; - } - break; - case 1 /* FARTHEST_SIDE */: - // Same as closest-side, except the ending shape is sized based on the farthest side(s) - if (gradient.shape === 0 /* CIRCLE */) { - rx = ry = Math.max(Math.abs(x), Math.abs(x - width), Math.abs(y), Math.abs(y - height)); - } - else if (gradient.shape === 1 /* ELLIPSE */) { - rx = Math.max(Math.abs(x), Math.abs(x - width)); - ry = Math.max(Math.abs(y), Math.abs(y - height)); - } - break; - case 3 /* FARTHEST_CORNER */: - // Same as closest-corner, except the ending shape is sized based on the farthest corner. - // If the shape is an ellipse, the ending shape is given the same aspect ratio it would have if farthest-side were specified. - if (gradient.shape === 0 /* CIRCLE */) { - rx = ry = Math.max(distance(x, y), distance(x, y - height), distance(x - width, y), distance(x - width, y - height)); - } - else if (gradient.shape === 1 /* ELLIPSE */) { - // Compute the ratio ry/rx (which is to be the same as for "farthest-side") - var c = Math.max(Math.abs(y), Math.abs(y - height)) / Math.max(Math.abs(x), Math.abs(x - width)); - var _b = findCorner(width, height, x, y, false), cx = _b[0], cy = _b[1]; - rx = distance(cx - x, (cy - y) / c); - ry = c * rx; - } - break; - } - if (Array.isArray(gradient.size)) { - rx = getAbsoluteValue(gradient.size[0], width); - ry = gradient.size.length === 2 ? getAbsoluteValue(gradient.size[1], height) : rx; - } - return [rx, ry]; - }; - - var linearGradient = function (context, tokens) { - var angle$1 = deg(180); - var stops = []; - parseFunctionArgs(tokens).forEach(function (arg, i) { - if (i === 0) { - var firstToken = arg[0]; - if (firstToken.type === 20 /* IDENT_TOKEN */ && firstToken.value === 'to') { - angle$1 = parseNamedSide(arg); - return; - } - else if (isAngle(firstToken)) { - angle$1 = angle.parse(context, firstToken); - return; - } - } - var colorStop = parseColorStop(context, arg); - stops.push(colorStop); - }); - return { angle: angle$1, stops: stops, type: 1 /* LINEAR_GRADIENT */ }; - }; - - var prefixLinearGradient = function (context, tokens) { - var angle$1 = deg(180); - var stops = []; - parseFunctionArgs(tokens).forEach(function (arg, i) { - if (i === 0) { - var firstToken = arg[0]; - if (firstToken.type === 20 /* IDENT_TOKEN */ && - ['top', 'left', 'right', 'bottom'].indexOf(firstToken.value) !== -1) { - angle$1 = parseNamedSide(arg); - return; - } - else if (isAngle(firstToken)) { - angle$1 = (angle.parse(context, firstToken) + deg(270)) % deg(360); - return; - } - } - var colorStop = parseColorStop(context, arg); - stops.push(colorStop); - }); - return { - angle: angle$1, - stops: stops, - type: 1 /* LINEAR_GRADIENT */ - }; - }; - - var webkitGradient = function (context, tokens) { - var angle = deg(180); - var stops = []; - var type = 1 /* LINEAR_GRADIENT */; - var shape = 0 /* CIRCLE */; - var size = 3 /* FARTHEST_CORNER */; - var position = []; - parseFunctionArgs(tokens).forEach(function (arg, i) { - var firstToken = arg[0]; - if (i === 0) { - if (isIdentToken(firstToken) && firstToken.value === 'linear') { - type = 1 /* LINEAR_GRADIENT */; - return; - } - else if (isIdentToken(firstToken) && firstToken.value === 'radial') { - type = 2 /* RADIAL_GRADIENT */; - return; - } - } - if (firstToken.type === 18 /* FUNCTION */) { - if (firstToken.name === 'from') { - var color = color$1.parse(context, firstToken.values[0]); - stops.push({ stop: ZERO_LENGTH, color: color }); - } - else if (firstToken.name === 'to') { - var color = color$1.parse(context, firstToken.values[0]); - stops.push({ stop: HUNDRED_PERCENT, color: color }); - } - else if (firstToken.name === 'color-stop') { - var values = firstToken.values.filter(nonFunctionArgSeparator); - if (values.length === 2) { - var color = color$1.parse(context, values[1]); - var stop_1 = values[0]; - if (isNumberToken(stop_1)) { - stops.push({ - stop: { type: 16 /* PERCENTAGE_TOKEN */, number: stop_1.number * 100, flags: stop_1.flags }, - color: color - }); - } - } - } - } - }); - return type === 1 /* LINEAR_GRADIENT */ - ? { - angle: (angle + deg(180)) % deg(360), - stops: stops, - type: type - } - : { size: size, shape: shape, stops: stops, position: position, type: type }; - }; - - var CLOSEST_SIDE = 'closest-side'; - var FARTHEST_SIDE = 'farthest-side'; - var CLOSEST_CORNER = 'closest-corner'; - var FARTHEST_CORNER = 'farthest-corner'; - var CIRCLE = 'circle'; - var ELLIPSE = 'ellipse'; - var COVER = 'cover'; - var CONTAIN = 'contain'; - var radialGradient = function (context, tokens) { - var shape = 0 /* CIRCLE */; - var size = 3 /* FARTHEST_CORNER */; - var stops = []; - var position = []; - parseFunctionArgs(tokens).forEach(function (arg, i) { - var isColorStop = true; - if (i === 0) { - var isAtPosition_1 = false; - isColorStop = arg.reduce(function (acc, token) { - if (isAtPosition_1) { - if (isIdentToken(token)) { - switch (token.value) { - case 'center': - position.push(FIFTY_PERCENT); - return acc; - case 'top': - case 'left': - position.push(ZERO_LENGTH); - return acc; - case 'right': - case 'bottom': - position.push(HUNDRED_PERCENT); - return acc; - } - } - else if (isLengthPercentage(token) || isLength(token)) { - position.push(token); - } - } - else if (isIdentToken(token)) { - switch (token.value) { - case CIRCLE: - shape = 0 /* CIRCLE */; - return false; - case ELLIPSE: - shape = 1 /* ELLIPSE */; - return false; - case 'at': - isAtPosition_1 = true; - return false; - case CLOSEST_SIDE: - size = 0 /* CLOSEST_SIDE */; - return false; - case COVER: - case FARTHEST_SIDE: - size = 1 /* FARTHEST_SIDE */; - return false; - case CONTAIN: - case CLOSEST_CORNER: - size = 2 /* CLOSEST_CORNER */; - return false; - case FARTHEST_CORNER: - size = 3 /* FARTHEST_CORNER */; - return false; - } - } - else if (isLength(token) || isLengthPercentage(token)) { - if (!Array.isArray(size)) { - size = []; - } - size.push(token); - return false; - } - return acc; - }, isColorStop); - } - if (isColorStop) { - var colorStop = parseColorStop(context, arg); - stops.push(colorStop); - } - }); - return { size: size, shape: shape, stops: stops, position: position, type: 2 /* RADIAL_GRADIENT */ }; - }; - - var prefixRadialGradient = function (context, tokens) { - var shape = 0 /* CIRCLE */; - var size = 3 /* FARTHEST_CORNER */; - var stops = []; - var position = []; - parseFunctionArgs(tokens).forEach(function (arg, i) { - var isColorStop = true; - if (i === 0) { - isColorStop = arg.reduce(function (acc, token) { - if (isIdentToken(token)) { - switch (token.value) { - case 'center': - position.push(FIFTY_PERCENT); - return false; - case 'top': - case 'left': - position.push(ZERO_LENGTH); - return false; - case 'right': - case 'bottom': - position.push(HUNDRED_PERCENT); - return false; - } - } - else if (isLengthPercentage(token) || isLength(token)) { - position.push(token); - return false; - } - return acc; - }, isColorStop); - } - else if (i === 1) { - isColorStop = arg.reduce(function (acc, token) { - if (isIdentToken(token)) { - switch (token.value) { - case CIRCLE: - shape = 0 /* CIRCLE */; - return false; - case ELLIPSE: - shape = 1 /* ELLIPSE */; - return false; - case CONTAIN: - case CLOSEST_SIDE: - size = 0 /* CLOSEST_SIDE */; - return false; - case FARTHEST_SIDE: - size = 1 /* FARTHEST_SIDE */; - return false; - case CLOSEST_CORNER: - size = 2 /* CLOSEST_CORNER */; - return false; - case COVER: - case FARTHEST_CORNER: - size = 3 /* FARTHEST_CORNER */; - return false; - } - } - else if (isLength(token) || isLengthPercentage(token)) { - if (!Array.isArray(size)) { - size = []; - } - size.push(token); - return false; - } - return acc; - }, isColorStop); - } - if (isColorStop) { - var colorStop = parseColorStop(context, arg); - stops.push(colorStop); - } - }); - return { size: size, shape: shape, stops: stops, position: position, type: 2 /* RADIAL_GRADIENT */ }; - }; - - var isLinearGradient = function (background) { - return background.type === 1 /* LINEAR_GRADIENT */; - }; - var isRadialGradient = function (background) { - return background.type === 2 /* RADIAL_GRADIENT */; - }; - var image = { - name: 'image', - parse: function (context, value) { - if (value.type === 22 /* URL_TOKEN */) { - var image_1 = { url: value.value, type: 0 /* URL */ }; - context.cache.addImage(value.value); - return image_1; - } - if (value.type === 18 /* FUNCTION */) { - var imageFunction = SUPPORTED_IMAGE_FUNCTIONS[value.name]; - if (typeof imageFunction === 'undefined') { - throw new Error("Attempting to parse an unsupported image function \"" + value.name + "\""); - } - return imageFunction(context, value.values); - } - throw new Error("Unsupported image type " + value.type); - } - }; - function isSupportedImage(value) { - return (!(value.type === 20 /* IDENT_TOKEN */ && value.value === 'none') && - (value.type !== 18 /* FUNCTION */ || !!SUPPORTED_IMAGE_FUNCTIONS[value.name])); - } - var SUPPORTED_IMAGE_FUNCTIONS = { - 'linear-gradient': linearGradient, - '-moz-linear-gradient': prefixLinearGradient, - '-ms-linear-gradient': prefixLinearGradient, - '-o-linear-gradient': prefixLinearGradient, - '-webkit-linear-gradient': prefixLinearGradient, - 'radial-gradient': radialGradient, - '-moz-radial-gradient': prefixRadialGradient, - '-ms-radial-gradient': prefixRadialGradient, - '-o-radial-gradient': prefixRadialGradient, - '-webkit-radial-gradient': prefixRadialGradient, - '-webkit-gradient': webkitGradient - }; - - var backgroundImage = { - name: 'background-image', - initialValue: 'none', - type: 1 /* LIST */, - prefix: false, - parse: function (context, tokens) { - if (tokens.length === 0) { - return []; - } - var first = tokens[0]; - if (first.type === 20 /* IDENT_TOKEN */ && first.value === 'none') { - return []; - } - return tokens - .filter(function (value) { return nonFunctionArgSeparator(value) && isSupportedImage(value); }) - .map(function (value) { return image.parse(context, value); }); - } - }; - - var backgroundOrigin = { - name: 'background-origin', - initialValue: 'border-box', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return tokens.map(function (token) { - if (isIdentToken(token)) { - switch (token.value) { - case 'padding-box': - return 1 /* PADDING_BOX */; - case 'content-box': - return 2 /* CONTENT_BOX */; - } - } - return 0 /* BORDER_BOX */; - }); - } - }; - - var backgroundPosition = { - name: 'background-position', - initialValue: '0% 0%', - type: 1 /* LIST */, - prefix: false, - parse: function (_context, tokens) { - return parseFunctionArgs(tokens) - .map(function (values) { return values.filter(isLengthPercentage); }) - .map(parseLengthPercentageTuple); - } - }; - - var backgroundRepeat = { - name: 'background-repeat', - initialValue: 'repeat', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return parseFunctionArgs(tokens) - .map(function (values) { - return values - .filter(isIdentToken) - .map(function (token) { return token.value; }) - .join(' '); - }) - .map(parseBackgroundRepeat); - } - }; - var parseBackgroundRepeat = function (value) { - switch (value) { - case 'no-repeat': - return 1 /* NO_REPEAT */; - case 'repeat-x': - case 'repeat no-repeat': - return 2 /* REPEAT_X */; - case 'repeat-y': - case 'no-repeat repeat': - return 3 /* REPEAT_Y */; - case 'repeat': - default: - return 0 /* REPEAT */; - } - }; - - var BACKGROUND_SIZE; - (function (BACKGROUND_SIZE) { - BACKGROUND_SIZE["AUTO"] = "auto"; - BACKGROUND_SIZE["CONTAIN"] = "contain"; - BACKGROUND_SIZE["COVER"] = "cover"; - })(BACKGROUND_SIZE || (BACKGROUND_SIZE = {})); - var backgroundSize = { - name: 'background-size', - initialValue: '0', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return parseFunctionArgs(tokens).map(function (values) { return values.filter(isBackgroundSizeInfoToken); }); - } - }; - var isBackgroundSizeInfoToken = function (value) { - return isIdentToken(value) || isLengthPercentage(value); - }; - - var borderColorForSide = function (side) { return ({ - name: "border-" + side + "-color", - initialValue: 'transparent', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'color' - }); }; - var borderTopColor = borderColorForSide('top'); - var borderRightColor = borderColorForSide('right'); - var borderBottomColor = borderColorForSide('bottom'); - var borderLeftColor = borderColorForSide('left'); - - var borderRadiusForSide = function (side) { return ({ - name: "border-radius-" + side, - initialValue: '0 0', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return parseLengthPercentageTuple(tokens.filter(isLengthPercentage)); - } - }); }; - var borderTopLeftRadius = borderRadiusForSide('top-left'); - var borderTopRightRadius = borderRadiusForSide('top-right'); - var borderBottomRightRadius = borderRadiusForSide('bottom-right'); - var borderBottomLeftRadius = borderRadiusForSide('bottom-left'); - - var borderStyleForSide = function (side) { return ({ - name: "border-" + side + "-style", - initialValue: 'solid', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, style) { - switch (style) { - case 'none': - return 0 /* NONE */; - case 'dashed': - return 2 /* DASHED */; - case 'dotted': - return 3 /* DOTTED */; - case 'double': - return 4 /* DOUBLE */; - } - return 1 /* SOLID */; - } - }); }; - var borderTopStyle = borderStyleForSide('top'); - var borderRightStyle = borderStyleForSide('right'); - var borderBottomStyle = borderStyleForSide('bottom'); - var borderLeftStyle = borderStyleForSide('left'); - - var borderWidthForSide = function (side) { return ({ - name: "border-" + side + "-width", - initialValue: '0', - type: 0 /* VALUE */, - prefix: false, - parse: function (_context, token) { - if (isDimensionToken(token)) { - return token.number; - } - return 0; - } - }); }; - var borderTopWidth = borderWidthForSide('top'); - var borderRightWidth = borderWidthForSide('right'); - var borderBottomWidth = borderWidthForSide('bottom'); - var borderLeftWidth = borderWidthForSide('left'); - - var color = { - name: "color", - initialValue: 'transparent', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'color' - }; - - var direction = { - name: 'direction', - initialValue: 'ltr', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, direction) { - switch (direction) { - case 'rtl': - return 1 /* RTL */; - case 'ltr': - default: - return 0 /* LTR */; - } - } - }; - - var display = { - name: 'display', - initialValue: 'inline-block', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return tokens.filter(isIdentToken).reduce(function (bit, token) { - return bit | parseDisplayValue(token.value); - }, 0 /* NONE */); - } - }; - var parseDisplayValue = function (display) { - switch (display) { - case 'block': - case '-webkit-box': - return 2 /* BLOCK */; - case 'inline': - return 4 /* INLINE */; - case 'run-in': - return 8 /* RUN_IN */; - case 'flow': - return 16 /* FLOW */; - case 'flow-root': - return 32 /* FLOW_ROOT */; - case 'table': - return 64 /* TABLE */; - case 'flex': - case '-webkit-flex': - return 128 /* FLEX */; - case 'grid': - case '-ms-grid': - return 256 /* GRID */; - case 'ruby': - return 512 /* RUBY */; - case 'subgrid': - return 1024 /* SUBGRID */; - case 'list-item': - return 2048 /* LIST_ITEM */; - case 'table-row-group': - return 4096 /* TABLE_ROW_GROUP */; - case 'table-header-group': - return 8192 /* TABLE_HEADER_GROUP */; - case 'table-footer-group': - return 16384 /* TABLE_FOOTER_GROUP */; - case 'table-row': - return 32768 /* TABLE_ROW */; - case 'table-cell': - return 65536 /* TABLE_CELL */; - case 'table-column-group': - return 131072 /* TABLE_COLUMN_GROUP */; - case 'table-column': - return 262144 /* TABLE_COLUMN */; - case 'table-caption': - return 524288 /* TABLE_CAPTION */; - case 'ruby-base': - return 1048576 /* RUBY_BASE */; - case 'ruby-text': - return 2097152 /* RUBY_TEXT */; - case 'ruby-base-container': - return 4194304 /* RUBY_BASE_CONTAINER */; - case 'ruby-text-container': - return 8388608 /* RUBY_TEXT_CONTAINER */; - case 'contents': - return 16777216 /* CONTENTS */; - case 'inline-block': - return 33554432 /* INLINE_BLOCK */; - case 'inline-list-item': - return 67108864 /* INLINE_LIST_ITEM */; - case 'inline-table': - return 134217728 /* INLINE_TABLE */; - case 'inline-flex': - return 268435456 /* INLINE_FLEX */; - case 'inline-grid': - return 536870912 /* INLINE_GRID */; - } - return 0 /* NONE */; - }; - - var float = { - name: 'float', - initialValue: 'none', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, float) { - switch (float) { - case 'left': - return 1 /* LEFT */; - case 'right': - return 2 /* RIGHT */; - case 'inline-start': - return 3 /* INLINE_START */; - case 'inline-end': - return 4 /* INLINE_END */; - } - return 0 /* NONE */; - } - }; - - var letterSpacing = { - name: 'letter-spacing', - initialValue: '0', - prefix: false, - type: 0 /* VALUE */, - parse: function (_context, token) { - if (token.type === 20 /* IDENT_TOKEN */ && token.value === 'normal') { - return 0; - } - if (token.type === 17 /* NUMBER_TOKEN */) { - return token.number; - } - if (token.type === 15 /* DIMENSION_TOKEN */) { - return token.number; - } - return 0; - } - }; - - var LINE_BREAK; - (function (LINE_BREAK) { - LINE_BREAK["NORMAL"] = "normal"; - LINE_BREAK["STRICT"] = "strict"; - })(LINE_BREAK || (LINE_BREAK = {})); - var lineBreak = { - name: 'line-break', - initialValue: 'normal', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, lineBreak) { - switch (lineBreak) { - case 'strict': - return LINE_BREAK.STRICT; - case 'normal': - default: - return LINE_BREAK.NORMAL; - } - } - }; - - var lineHeight = { - name: 'line-height', - initialValue: 'normal', - prefix: false, - type: 4 /* TOKEN_VALUE */ - }; - var computeLineHeight = function (token, fontSize) { - if (isIdentToken(token) && token.value === 'normal') { - return 1.2 * fontSize; - } - else if (token.type === 17 /* NUMBER_TOKEN */) { - return fontSize * token.number; - } - else if (isLengthPercentage(token)) { - return getAbsoluteValue(token, fontSize); - } - return fontSize; - }; - - var listStyleImage = { - name: 'list-style-image', - initialValue: 'none', - type: 0 /* VALUE */, - prefix: false, - parse: function (context, token) { - if (token.type === 20 /* IDENT_TOKEN */ && token.value === 'none') { - return null; - } - return image.parse(context, token); - } - }; - - var listStylePosition = { - name: 'list-style-position', - initialValue: 'outside', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, position) { - switch (position) { - case 'inside': - return 0 /* INSIDE */; - case 'outside': - default: - return 1 /* OUTSIDE */; - } - } - }; - - var listStyleType = { - name: 'list-style-type', - initialValue: 'none', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, type) { - switch (type) { - case 'disc': - return 0 /* DISC */; - case 'circle': - return 1 /* CIRCLE */; - case 'square': - return 2 /* SQUARE */; - case 'decimal': - return 3 /* DECIMAL */; - case 'cjk-decimal': - return 4 /* CJK_DECIMAL */; - case 'decimal-leading-zero': - return 5 /* DECIMAL_LEADING_ZERO */; - case 'lower-roman': - return 6 /* LOWER_ROMAN */; - case 'upper-roman': - return 7 /* UPPER_ROMAN */; - case 'lower-greek': - return 8 /* LOWER_GREEK */; - case 'lower-alpha': - return 9 /* LOWER_ALPHA */; - case 'upper-alpha': - return 10 /* UPPER_ALPHA */; - case 'arabic-indic': - return 11 /* ARABIC_INDIC */; - case 'armenian': - return 12 /* ARMENIAN */; - case 'bengali': - return 13 /* BENGALI */; - case 'cambodian': - return 14 /* CAMBODIAN */; - case 'cjk-earthly-branch': - return 15 /* CJK_EARTHLY_BRANCH */; - case 'cjk-heavenly-stem': - return 16 /* CJK_HEAVENLY_STEM */; - case 'cjk-ideographic': - return 17 /* CJK_IDEOGRAPHIC */; - case 'devanagari': - return 18 /* DEVANAGARI */; - case 'ethiopic-numeric': - return 19 /* ETHIOPIC_NUMERIC */; - case 'georgian': - return 20 /* GEORGIAN */; - case 'gujarati': - return 21 /* GUJARATI */; - case 'gurmukhi': - return 22 /* GURMUKHI */; - case 'hebrew': - return 22 /* HEBREW */; - case 'hiragana': - return 23 /* HIRAGANA */; - case 'hiragana-iroha': - return 24 /* HIRAGANA_IROHA */; - case 'japanese-formal': - return 25 /* JAPANESE_FORMAL */; - case 'japanese-informal': - return 26 /* JAPANESE_INFORMAL */; - case 'kannada': - return 27 /* KANNADA */; - case 'katakana': - return 28 /* KATAKANA */; - case 'katakana-iroha': - return 29 /* KATAKANA_IROHA */; - case 'khmer': - return 30 /* KHMER */; - case 'korean-hangul-formal': - return 31 /* KOREAN_HANGUL_FORMAL */; - case 'korean-hanja-formal': - return 32 /* KOREAN_HANJA_FORMAL */; - case 'korean-hanja-informal': - return 33 /* KOREAN_HANJA_INFORMAL */; - case 'lao': - return 34 /* LAO */; - case 'lower-armenian': - return 35 /* LOWER_ARMENIAN */; - case 'malayalam': - return 36 /* MALAYALAM */; - case 'mongolian': - return 37 /* MONGOLIAN */; - case 'myanmar': - return 38 /* MYANMAR */; - case 'oriya': - return 39 /* ORIYA */; - case 'persian': - return 40 /* PERSIAN */; - case 'simp-chinese-formal': - return 41 /* SIMP_CHINESE_FORMAL */; - case 'simp-chinese-informal': - return 42 /* SIMP_CHINESE_INFORMAL */; - case 'tamil': - return 43 /* TAMIL */; - case 'telugu': - return 44 /* TELUGU */; - case 'thai': - return 45 /* THAI */; - case 'tibetan': - return 46 /* TIBETAN */; - case 'trad-chinese-formal': - return 47 /* TRAD_CHINESE_FORMAL */; - case 'trad-chinese-informal': - return 48 /* TRAD_CHINESE_INFORMAL */; - case 'upper-armenian': - return 49 /* UPPER_ARMENIAN */; - case 'disclosure-open': - return 50 /* DISCLOSURE_OPEN */; - case 'disclosure-closed': - return 51 /* DISCLOSURE_CLOSED */; - case 'none': - default: - return -1 /* NONE */; - } - } - }; - - var marginForSide = function (side) { return ({ - name: "margin-" + side, - initialValue: '0', - prefix: false, - type: 4 /* TOKEN_VALUE */ - }); }; - var marginTop = marginForSide('top'); - var marginRight = marginForSide('right'); - var marginBottom = marginForSide('bottom'); - var marginLeft = marginForSide('left'); - - var overflow = { - name: 'overflow', - initialValue: 'visible', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return tokens.filter(isIdentToken).map(function (overflow) { - switch (overflow.value) { - case 'hidden': - return 1 /* HIDDEN */; - case 'scroll': - return 2 /* SCROLL */; - case 'clip': - return 3 /* CLIP */; - case 'auto': - return 4 /* AUTO */; - case 'visible': - default: - return 0 /* VISIBLE */; - } - }); - } - }; - - var overflowWrap = { - name: 'overflow-wrap', - initialValue: 'normal', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, overflow) { - switch (overflow) { - case 'break-word': - return "break-word" /* BREAK_WORD */; - case 'normal': - default: - return "normal" /* NORMAL */; - } - } - }; - - var paddingForSide = function (side) { return ({ - name: "padding-" + side, - initialValue: '0', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'length-percentage' - }); }; - var paddingTop = paddingForSide('top'); - var paddingRight = paddingForSide('right'); - var paddingBottom = paddingForSide('bottom'); - var paddingLeft = paddingForSide('left'); - - var textAlign = { - name: 'text-align', - initialValue: 'left', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, textAlign) { - switch (textAlign) { - case 'right': - return 2 /* RIGHT */; - case 'center': - case 'justify': - return 1 /* CENTER */; - case 'left': - default: - return 0 /* LEFT */; - } - } - }; - - var position = { - name: 'position', - initialValue: 'static', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, position) { - switch (position) { - case 'relative': - return 1 /* RELATIVE */; - case 'absolute': - return 2 /* ABSOLUTE */; - case 'fixed': - return 3 /* FIXED */; - case 'sticky': - return 4 /* STICKY */; - } - return 0 /* STATIC */; - } - }; - - var textShadow = { - name: 'text-shadow', - initialValue: 'none', - type: 1 /* LIST */, - prefix: false, - parse: function (context, tokens) { - if (tokens.length === 1 && isIdentWithValue(tokens[0], 'none')) { - return []; - } - return parseFunctionArgs(tokens).map(function (values) { - var shadow = { - color: COLORS.TRANSPARENT, - offsetX: ZERO_LENGTH, - offsetY: ZERO_LENGTH, - blur: ZERO_LENGTH - }; - var c = 0; - for (var i = 0; i < values.length; i++) { - var token = values[i]; - if (isLength(token)) { - if (c === 0) { - shadow.offsetX = token; - } - else if (c === 1) { - shadow.offsetY = token; - } - else { - shadow.blur = token; - } - c++; - } - else { - shadow.color = color$1.parse(context, token); - } - } - return shadow; - }); - } - }; - - var textTransform = { - name: 'text-transform', - initialValue: 'none', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, textTransform) { - switch (textTransform) { - case 'uppercase': - return 2 /* UPPERCASE */; - case 'lowercase': - return 1 /* LOWERCASE */; - case 'capitalize': - return 3 /* CAPITALIZE */; - } - return 0 /* NONE */; - } - }; - - var transform$1 = { - name: 'transform', - initialValue: 'none', - prefix: true, - type: 0 /* VALUE */, - parse: function (_context, token) { - if (token.type === 20 /* IDENT_TOKEN */ && token.value === 'none') { - return null; - } - if (token.type === 18 /* FUNCTION */) { - var transformFunction = SUPPORTED_TRANSFORM_FUNCTIONS[token.name]; - if (typeof transformFunction === 'undefined') { - throw new Error("Attempting to parse an unsupported transform function \"" + token.name + "\""); - } - return transformFunction(token.values); - } - return null; - } - }; - var matrix = function (args) { - var values = args.filter(function (arg) { return arg.type === 17 /* NUMBER_TOKEN */; }).map(function (arg) { return arg.number; }); - return values.length === 6 ? values : null; - }; - // doesn't support 3D transforms at the moment - var matrix3d = function (args) { - var values = args.filter(function (arg) { return arg.type === 17 /* NUMBER_TOKEN */; }).map(function (arg) { return arg.number; }); - var a1 = values[0], b1 = values[1]; values[2]; values[3]; var a2 = values[4], b2 = values[5]; values[6]; values[7]; values[8]; values[9]; values[10]; values[11]; var a4 = values[12], b4 = values[13]; values[14]; values[15]; - return values.length === 16 ? [a1, b1, a2, b2, a4, b4] : null; - }; - var SUPPORTED_TRANSFORM_FUNCTIONS = { - matrix: matrix, - matrix3d: matrix3d - }; - - var DEFAULT_VALUE = { - type: 16 /* PERCENTAGE_TOKEN */, - number: 50, - flags: FLAG_INTEGER - }; - var DEFAULT = [DEFAULT_VALUE, DEFAULT_VALUE]; - var transformOrigin = { - name: 'transform-origin', - initialValue: '50% 50%', - prefix: true, - type: 1 /* LIST */, - parse: function (_context, tokens) { - var origins = tokens.filter(isLengthPercentage); - if (origins.length !== 2) { - return DEFAULT; - } - return [origins[0], origins[1]]; - } - }; - - var visibility = { - name: 'visible', - initialValue: 'none', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, visibility) { - switch (visibility) { - case 'hidden': - return 1 /* HIDDEN */; - case 'collapse': - return 2 /* COLLAPSE */; - case 'visible': - default: - return 0 /* VISIBLE */; - } - } - }; - - var WORD_BREAK; - (function (WORD_BREAK) { - WORD_BREAK["NORMAL"] = "normal"; - WORD_BREAK["BREAK_ALL"] = "break-all"; - WORD_BREAK["KEEP_ALL"] = "keep-all"; - })(WORD_BREAK || (WORD_BREAK = {})); - var wordBreak = { - name: 'word-break', - initialValue: 'normal', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, wordBreak) { - switch (wordBreak) { - case 'break-all': - return WORD_BREAK.BREAK_ALL; - case 'keep-all': - return WORD_BREAK.KEEP_ALL; - case 'normal': - default: - return WORD_BREAK.NORMAL; - } - } - }; - - var zIndex = { - name: 'z-index', - initialValue: 'auto', - prefix: false, - type: 0 /* VALUE */, - parse: function (_context, token) { - if (token.type === 20 /* IDENT_TOKEN */) { - return { auto: true, order: 0 }; - } - if (isNumberToken(token)) { - return { auto: false, order: token.number }; - } - throw new Error("Invalid z-index number parsed"); - } - }; - - var time = { - name: 'time', - parse: function (_context, value) { - if (value.type === 15 /* DIMENSION_TOKEN */) { - switch (value.unit.toLowerCase()) { - case 's': - return 1000 * value.number; - case 'ms': - return value.number; - } - } - throw new Error("Unsupported time type"); - } - }; - - var opacity = { - name: 'opacity', - initialValue: '1', - type: 0 /* VALUE */, - prefix: false, - parse: function (_context, token) { - if (isNumberToken(token)) { - return token.number; - } - return 1; - } - }; - - var textDecorationColor = { - name: "text-decoration-color", - initialValue: 'transparent', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'color' - }; - - var textDecorationLine = { - name: 'text-decoration-line', - initialValue: 'none', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return tokens - .filter(isIdentToken) - .map(function (token) { - switch (token.value) { - case 'underline': - return 1 /* UNDERLINE */; - case 'overline': - return 2 /* OVERLINE */; - case 'line-through': - return 3 /* LINE_THROUGH */; - case 'none': - return 4 /* BLINK */; - } - return 0 /* NONE */; - }) - .filter(function (line) { return line !== 0 /* NONE */; }); - } - }; - - var fontFamily = { - name: "font-family", - initialValue: '', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - var accumulator = []; - var results = []; - tokens.forEach(function (token) { - switch (token.type) { - case 20 /* IDENT_TOKEN */: - case 0 /* STRING_TOKEN */: - accumulator.push(token.value); - break; - case 17 /* NUMBER_TOKEN */: - accumulator.push(token.number.toString()); - break; - case 4 /* COMMA_TOKEN */: - results.push(accumulator.join(' ')); - accumulator.length = 0; - break; - } - }); - if (accumulator.length) { - results.push(accumulator.join(' ')); - } - return results.map(function (result) { return (result.indexOf(' ') === -1 ? result : "'" + result + "'"); }); - } - }; - - var fontSize = { - name: "font-size", - initialValue: '0', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'length' - }; - - var fontWeight = { - name: 'font-weight', - initialValue: 'normal', - type: 0 /* VALUE */, - prefix: false, - parse: function (_context, token) { - if (isNumberToken(token)) { - return token.number; - } - if (isIdentToken(token)) { - switch (token.value) { - case 'bold': - return 700; - case 'normal': - default: - return 400; - } - } - return 400; - } - }; - - var fontVariant = { - name: 'font-variant', - initialValue: 'none', - type: 1 /* LIST */, - prefix: false, - parse: function (_context, tokens) { - return tokens.filter(isIdentToken).map(function (token) { return token.value; }); - } - }; - - var fontStyle = { - name: 'font-style', - initialValue: 'normal', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, overflow) { - switch (overflow) { - case 'oblique': - return "oblique" /* OBLIQUE */; - case 'italic': - return "italic" /* ITALIC */; - case 'normal': - default: - return "normal" /* NORMAL */; - } - } - }; - - var contains = function (bit, value) { return (bit & value) !== 0; }; - - var content = { - name: 'content', - initialValue: 'none', - type: 1 /* LIST */, - prefix: false, - parse: function (_context, tokens) { - if (tokens.length === 0) { - return []; - } - var first = tokens[0]; - if (first.type === 20 /* IDENT_TOKEN */ && first.value === 'none') { - return []; - } - return tokens; - } - }; - - var counterIncrement = { - name: 'counter-increment', - initialValue: 'none', - prefix: true, - type: 1 /* LIST */, - parse: function (_context, tokens) { - if (tokens.length === 0) { - return null; - } - var first = tokens[0]; - if (first.type === 20 /* IDENT_TOKEN */ && first.value === 'none') { - return null; - } - var increments = []; - var filtered = tokens.filter(nonWhiteSpace); - for (var i = 0; i < filtered.length; i++) { - var counter = filtered[i]; - var next = filtered[i + 1]; - if (counter.type === 20 /* IDENT_TOKEN */) { - var increment = next && isNumberToken(next) ? next.number : 1; - increments.push({ counter: counter.value, increment: increment }); - } - } - return increments; - } - }; - - var counterReset = { - name: 'counter-reset', - initialValue: 'none', - prefix: true, - type: 1 /* LIST */, - parse: function (_context, tokens) { - if (tokens.length === 0) { - return []; - } - var resets = []; - var filtered = tokens.filter(nonWhiteSpace); - for (var i = 0; i < filtered.length; i++) { - var counter = filtered[i]; - var next = filtered[i + 1]; - if (isIdentToken(counter) && counter.value !== 'none') { - var reset = next && isNumberToken(next) ? next.number : 0; - resets.push({ counter: counter.value, reset: reset }); - } - } - return resets; - } - }; - - var duration = { - name: 'duration', - initialValue: '0s', - prefix: false, - type: 1 /* LIST */, - parse: function (context, tokens) { - return tokens.filter(isDimensionToken).map(function (token) { return time.parse(context, token); }); - } - }; - - var quotes = { - name: 'quotes', - initialValue: 'none', - prefix: true, - type: 1 /* LIST */, - parse: function (_context, tokens) { - if (tokens.length === 0) { - return null; - } - var first = tokens[0]; - if (first.type === 20 /* IDENT_TOKEN */ && first.value === 'none') { - return null; - } - var quotes = []; - var filtered = tokens.filter(isStringToken); - if (filtered.length % 2 !== 0) { - return null; - } - for (var i = 0; i < filtered.length; i += 2) { - var open_1 = filtered[i].value; - var close_1 = filtered[i + 1].value; - quotes.push({ open: open_1, close: close_1 }); - } - return quotes; - } - }; - var getQuote = function (quotes, depth, open) { - if (!quotes) { - return ''; - } - var quote = quotes[Math.min(depth, quotes.length - 1)]; - if (!quote) { - return ''; - } - return open ? quote.open : quote.close; - }; - - var paintOrder = { - name: 'paint-order', - initialValue: 'normal', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - var DEFAULT_VALUE = [0 /* FILL */, 1 /* STROKE */, 2 /* MARKERS */]; - var layers = []; - tokens.filter(isIdentToken).forEach(function (token) { - switch (token.value) { - case 'stroke': - layers.push(1 /* STROKE */); - break; - case 'fill': - layers.push(0 /* FILL */); - break; - case 'markers': - layers.push(2 /* MARKERS */); - break; - } - }); - DEFAULT_VALUE.forEach(function (value) { - if (layers.indexOf(value) === -1) { - layers.push(value); - } - }); - return layers; - } - }; - - var webkitTextStrokeColor = { - name: "-webkit-text-stroke-color", - initialValue: 'currentcolor', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'color' - }; - - var webkitTextStrokeWidth = { - name: "-webkit-text-stroke-width", - initialValue: '0', - type: 0 /* VALUE */, - prefix: false, - parse: function (_context, token) { - if (isDimensionToken(token)) { - return token.number; - } - return 0; - } - }; - - var CSSParsedDeclaration = /** @class */ (function () { - function CSSParsedDeclaration(context, declaration) { - var _a, _b; - this.animationDuration = parse(context, duration, declaration.animationDuration); - this.backgroundClip = parse(context, backgroundClip, declaration.backgroundClip); - this.backgroundColor = parse(context, backgroundColor, declaration.backgroundColor); - this.backgroundImage = parse(context, backgroundImage, declaration.backgroundImage); - this.backgroundOrigin = parse(context, backgroundOrigin, declaration.backgroundOrigin); - this.backgroundPosition = parse(context, backgroundPosition, declaration.backgroundPosition); - this.backgroundRepeat = parse(context, backgroundRepeat, declaration.backgroundRepeat); - this.backgroundSize = parse(context, backgroundSize, declaration.backgroundSize); - this.borderTopColor = parse(context, borderTopColor, declaration.borderTopColor); - this.borderRightColor = parse(context, borderRightColor, declaration.borderRightColor); - this.borderBottomColor = parse(context, borderBottomColor, declaration.borderBottomColor); - this.borderLeftColor = parse(context, borderLeftColor, declaration.borderLeftColor); - this.borderTopLeftRadius = parse(context, borderTopLeftRadius, declaration.borderTopLeftRadius); - this.borderTopRightRadius = parse(context, borderTopRightRadius, declaration.borderTopRightRadius); - this.borderBottomRightRadius = parse(context, borderBottomRightRadius, declaration.borderBottomRightRadius); - this.borderBottomLeftRadius = parse(context, borderBottomLeftRadius, declaration.borderBottomLeftRadius); - this.borderTopStyle = parse(context, borderTopStyle, declaration.borderTopStyle); - this.borderRightStyle = parse(context, borderRightStyle, declaration.borderRightStyle); - this.borderBottomStyle = parse(context, borderBottomStyle, declaration.borderBottomStyle); - this.borderLeftStyle = parse(context, borderLeftStyle, declaration.borderLeftStyle); - this.borderTopWidth = parse(context, borderTopWidth, declaration.borderTopWidth); - this.borderRightWidth = parse(context, borderRightWidth, declaration.borderRightWidth); - this.borderBottomWidth = parse(context, borderBottomWidth, declaration.borderBottomWidth); - this.borderLeftWidth = parse(context, borderLeftWidth, declaration.borderLeftWidth); - this.color = parse(context, color, declaration.color); - this.direction = parse(context, direction, declaration.direction); - this.display = parse(context, display, declaration.display); - this.float = parse(context, float, declaration.cssFloat); - this.fontFamily = parse(context, fontFamily, declaration.fontFamily); - this.fontSize = parse(context, fontSize, declaration.fontSize); - this.fontStyle = parse(context, fontStyle, declaration.fontStyle); - this.fontVariant = parse(context, fontVariant, declaration.fontVariant); - this.fontWeight = parse(context, fontWeight, declaration.fontWeight); - this.letterSpacing = parse(context, letterSpacing, declaration.letterSpacing); - this.lineBreak = parse(context, lineBreak, declaration.lineBreak); - this.lineHeight = parse(context, lineHeight, declaration.lineHeight); - this.listStyleImage = parse(context, listStyleImage, declaration.listStyleImage); - this.listStylePosition = parse(context, listStylePosition, declaration.listStylePosition); - this.listStyleType = parse(context, listStyleType, declaration.listStyleType); - this.marginTop = parse(context, marginTop, declaration.marginTop); - this.marginRight = parse(context, marginRight, declaration.marginRight); - this.marginBottom = parse(context, marginBottom, declaration.marginBottom); - this.marginLeft = parse(context, marginLeft, declaration.marginLeft); - this.opacity = parse(context, opacity, declaration.opacity); - var overflowTuple = parse(context, overflow, declaration.overflow); - this.overflowX = overflowTuple[0]; - this.overflowY = overflowTuple[overflowTuple.length > 1 ? 1 : 0]; - this.overflowWrap = parse(context, overflowWrap, declaration.overflowWrap); - this.paddingTop = parse(context, paddingTop, declaration.paddingTop); - this.paddingRight = parse(context, paddingRight, declaration.paddingRight); - this.paddingBottom = parse(context, paddingBottom, declaration.paddingBottom); - this.paddingLeft = parse(context, paddingLeft, declaration.paddingLeft); - this.paintOrder = parse(context, paintOrder, declaration.paintOrder); - this.position = parse(context, position, declaration.position); - this.textAlign = parse(context, textAlign, declaration.textAlign); - this.textDecorationColor = parse(context, textDecorationColor, (_a = declaration.textDecorationColor) !== null && _a !== void 0 ? _a : declaration.color); - this.textDecorationLine = parse(context, textDecorationLine, (_b = declaration.textDecorationLine) !== null && _b !== void 0 ? _b : declaration.textDecoration); - this.textShadow = parse(context, textShadow, declaration.textShadow); - this.textTransform = parse(context, textTransform, declaration.textTransform); - this.transform = parse(context, transform$1, declaration.transform); - this.transformOrigin = parse(context, transformOrigin, declaration.transformOrigin); - this.visibility = parse(context, visibility, declaration.visibility); - this.webkitTextStrokeColor = parse(context, webkitTextStrokeColor, declaration.webkitTextStrokeColor); - this.webkitTextStrokeWidth = parse(context, webkitTextStrokeWidth, declaration.webkitTextStrokeWidth); - this.wordBreak = parse(context, wordBreak, declaration.wordBreak); - this.zIndex = parse(context, zIndex, declaration.zIndex); - } - CSSParsedDeclaration.prototype.isVisible = function () { - return this.display > 0 && this.opacity > 0 && this.visibility === 0 /* VISIBLE */; - }; - CSSParsedDeclaration.prototype.isTransparent = function () { - return isTransparent(this.backgroundColor); - }; - CSSParsedDeclaration.prototype.isTransformed = function () { - return this.transform !== null; - }; - CSSParsedDeclaration.prototype.isPositioned = function () { - return this.position !== 0 /* STATIC */; - }; - CSSParsedDeclaration.prototype.isPositionedWithZIndex = function () { - return this.isPositioned() && !this.zIndex.auto; - }; - CSSParsedDeclaration.prototype.isFloating = function () { - return this.float !== 0 /* NONE */; - }; - CSSParsedDeclaration.prototype.isInlineLevel = function () { - return (contains(this.display, 4 /* INLINE */) || - contains(this.display, 33554432 /* INLINE_BLOCK */) || - contains(this.display, 268435456 /* INLINE_FLEX */) || - contains(this.display, 536870912 /* INLINE_GRID */) || - contains(this.display, 67108864 /* INLINE_LIST_ITEM */) || - contains(this.display, 134217728 /* INLINE_TABLE */)); - }; - return CSSParsedDeclaration; - }()); - var CSSParsedPseudoDeclaration = /** @class */ (function () { - function CSSParsedPseudoDeclaration(context, declaration) { - this.content = parse(context, content, declaration.content); - this.quotes = parse(context, quotes, declaration.quotes); - } - return CSSParsedPseudoDeclaration; - }()); - var CSSParsedCounterDeclaration = /** @class */ (function () { - function CSSParsedCounterDeclaration(context, declaration) { - this.counterIncrement = parse(context, counterIncrement, declaration.counterIncrement); - this.counterReset = parse(context, counterReset, declaration.counterReset); - } - return CSSParsedCounterDeclaration; - }()); - // eslint-disable-next-line @typescript-eslint/no-explicit-any - var parse = function (context, descriptor, style) { - var tokenizer = new Tokenizer(); - var value = style !== null && typeof style !== 'undefined' ? style.toString() : descriptor.initialValue; - tokenizer.write(value); - var parser = new Parser(tokenizer.read()); - switch (descriptor.type) { - case 2 /* IDENT_VALUE */: - var token = parser.parseComponentValue(); - return descriptor.parse(context, isIdentToken(token) ? token.value : descriptor.initialValue); - case 0 /* VALUE */: - return descriptor.parse(context, parser.parseComponentValue()); - case 1 /* LIST */: - return descriptor.parse(context, parser.parseComponentValues()); - case 4 /* TOKEN_VALUE */: - return parser.parseComponentValue(); - case 3 /* TYPE_VALUE */: - switch (descriptor.format) { - case 'angle': - return angle.parse(context, parser.parseComponentValue()); - case 'color': - return color$1.parse(context, parser.parseComponentValue()); - case 'image': - return image.parse(context, parser.parseComponentValue()); - case 'length': - var length_1 = parser.parseComponentValue(); - return isLength(length_1) ? length_1 : ZERO_LENGTH; - case 'length-percentage': - var value_1 = parser.parseComponentValue(); - return isLengthPercentage(value_1) ? value_1 : ZERO_LENGTH; - case 'time': - return time.parse(context, parser.parseComponentValue()); - } - break; - } - }; - - var elementDebuggerAttribute = 'data-html2canvas-debug'; - var getElementDebugType = function (element) { - var attribute = element.getAttribute(elementDebuggerAttribute); - switch (attribute) { - case 'all': - return 1 /* ALL */; - case 'clone': - return 2 /* CLONE */; - case 'parse': - return 3 /* PARSE */; - case 'render': - return 4 /* RENDER */; - default: - return 0 /* NONE */; - } - }; - var isDebugging = function (element, type) { - var elementType = getElementDebugType(element); - return elementType === 1 /* ALL */ || type === elementType; - }; - - var ElementContainer = /** @class */ (function () { - function ElementContainer(context, element) { - this.context = context; - this.textNodes = []; - this.elements = []; - this.flags = 0; - if (isDebugging(element, 3 /* PARSE */)) { - debugger; - } - this.styles = new CSSParsedDeclaration(context, window.getComputedStyle(element, null)); - if (isHTMLElementNode(element)) { - if (this.styles.animationDuration.some(function (duration) { return duration > 0; })) { - element.style.animationDuration = '0s'; - } - if (this.styles.transform !== null) { - // getBoundingClientRect takes transforms into account - element.style.transform = 'none'; - } - } - this.bounds = parseBounds(this.context, element); - if (isDebugging(element, 4 /* RENDER */)) { - this.flags |= 16 /* DEBUG_RENDER */; - } - } - return ElementContainer; - }()); - - /* - * text-segmentation 1.0.3 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ - var base64 = 'AAAAAAAAAAAAEA4AGBkAAFAaAAACAAAAAAAIABAAGAAwADgACAAQAAgAEAAIABAACAAQAAgAEAAIABAACAAQAAgAEAAIABAAQABIAEQATAAIABAACAAQAAgAEAAIABAAVABcAAgAEAAIABAACAAQAGAAaABwAHgAgACIAI4AlgAIABAAmwCjAKgAsAC2AL4AvQDFAMoA0gBPAVYBWgEIAAgACACMANoAYgFkAWwBdAF8AX0BhQGNAZUBlgGeAaMBlQGWAasBswF8AbsBwwF0AcsBYwHTAQgA2wG/AOMBdAF8AekB8QF0AfkB+wHiAHQBfAEIAAMC5gQIAAsCEgIIAAgAFgIeAggAIgIpAggAMQI5AkACygEIAAgASAJQAlgCYAIIAAgACAAKBQoFCgUTBRMFGQUrBSsFCAAIAAgACAAIAAgACAAIAAgACABdAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABoAmgCrwGvAQgAbgJ2AggAHgEIAAgACADnAXsCCAAIAAgAgwIIAAgACAAIAAgACACKAggAkQKZAggAPADJAAgAoQKkAqwCsgK6AsICCADJAggA0AIIAAgACAAIANYC3gIIAAgACAAIAAgACABAAOYCCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAkASoB+QIEAAgACAA8AEMCCABCBQgACABJBVAFCAAIAAgACAAIAAgACAAIAAgACABTBVoFCAAIAFoFCABfBWUFCAAIAAgACAAIAAgAbQUIAAgACAAIAAgACABzBXsFfQWFBYoFigWKBZEFigWKBYoFmAWfBaYFrgWxBbkFCAAIAAgACAAIAAgACAAIAAgACAAIAMEFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAMgFCADQBQgACAAIAAgACAAIAAgACAAIAAgACAAIAO4CCAAIAAgAiQAIAAgACABAAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAD0AggACAD8AggACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIANYFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAMDvwAIAAgAJAIIAAgACAAIAAgACAAIAAgACwMTAwgACAB9BOsEGwMjAwgAKwMyAwsFYgE3A/MEPwMIAEUDTQNRAwgAWQOsAGEDCAAIAAgACAAIAAgACABpAzQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFIQUoBSwFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABtAwgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABMAEwACAAIAAgACAAIABgACAAIAAgACAC/AAgACAAyAQgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACACAAIAAwAAgACAAIAAgACAAIAAgACAAIAAAARABIAAgACAAIABQASAAIAAgAIABwAEAAjgCIABsAqAC2AL0AigDQAtwC+IJIQqVAZUBWQqVAZUBlQGVAZUBlQGrC5UBlQGVAZUBlQGVAZUBlQGVAXsKlQGVAbAK6wsrDGUMpQzlDJUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAfAKAAuZA64AtwCJALoC6ADwAAgAuACgA/oEpgO6AqsD+AAIAAgAswMIAAgACAAIAIkAuwP5AfsBwwPLAwgACAAIAAgACADRA9kDCAAIAOED6QMIAAgACAAIAAgACADuA/YDCAAIAP4DyQAIAAgABgQIAAgAXQAOBAgACAAIAAgACAAIABMECAAIAAgACAAIAAgACAD8AAQBCAAIAAgAGgQiBCoECAExBAgAEAEIAAgACAAIAAgACAAIAAgACAAIAAgACAA4BAgACABABEYECAAIAAgATAQYAQgAVAQIAAgACAAIAAgACAAIAAgACAAIAFoECAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAOQEIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAB+BAcACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAEABhgSMBAgACAAIAAgAlAQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAwAEAAQABAADAAMAAwADAAQABAAEAAQABAAEAAQABHATAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAdQMIAAgACAAIAAgACAAIAMkACAAIAAgAfQMIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACACFA4kDCAAIAAgACAAIAOcBCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAIcDCAAIAAgACAAIAAgACAAIAAgACAAIAJEDCAAIAAgACADFAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABgBAgAZgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAbAQCBXIECAAIAHkECAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABAAJwEQACjBKoEsgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAC6BMIECAAIAAgACAAIAAgACABmBAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAxwQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAGYECAAIAAgAzgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAigWKBYoFigWKBYoFigWKBd0FXwUIAOIF6gXxBYoF3gT5BQAGCAaKBYoFigWKBYoFigWKBYoFigWKBYoFigXWBIoFigWKBYoFigWKBYoFigWKBYsFEAaKBYoFigWKBYoFigWKBRQGCACKBYoFigWKBQgACAAIANEECAAIABgGigUgBggAJgYIAC4GMwaKBYoF0wQ3Bj4GigWKBYoFigWKBYoFigWKBYoFigWKBYoFigUIAAgACAAIAAgACAAIAAgAigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWLBf///////wQABAAEAAQABAAEAAQABAAEAAQAAwAEAAQAAgAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAQADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAUAAAAFAAUAAAAFAAUAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUAAQAAAAUABQAFAAUABQAFAAAAAAAFAAUAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAFAAUAAQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUABQAFAAAABwAHAAcAAAAHAAcABwAFAAEAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAcABwAFAAUAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAAAAQABAAAAAAAAAAAAAAAFAAUABQAFAAAABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABwAHAAcAAAAHAAcAAAAAAAUABQAHAAUAAQAHAAEABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABwABAAUABQAFAAUAAAAAAAAAAAAAAAEAAQABAAEAAQABAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABQANAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEAAQABAAEAAQABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAABQAHAAUABQAFAAAAAAAAAAcABQAFAAUABQAFAAQABAAEAAQABAAEAAQABAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUAAAAFAAUABQAFAAUAAAAFAAUABQAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAAAAAAAAAAAAUABQAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAUAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABwAHAAcABwAFAAcABwAAAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAUABwAHAAUABQAFAAUAAAAAAAcABwAAAAAABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAABQAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABwAHAAcABQAFAAAAAAAAAAAABQAFAAAAAAAFAAUABQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAFAAUABQAFAAUAAAAFAAUABwAAAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAFAAUABwAFAAUABQAFAAAAAAAHAAcAAAAAAAcABwAFAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABwAAAAAAAAAHAAcABwAAAAcABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAABQAHAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAcABwAAAAUABQAFAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABQAHAAcABQAHAAcAAAAFAAcABwAAAAcABwAFAAUAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAFAAcABwAFAAUABQAAAAUAAAAHAAcABwAHAAcABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAHAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABwAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAUAAAAFAAAAAAAAAAAABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUABQAFAAUAAAAFAAUAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABwAFAAUABQAFAAUABQAAAAUABQAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABQAFAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABQAFAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAHAAUABQAFAAUABQAFAAUABwAHAAcABwAHAAcABwAHAAUABwAHAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABwAHAAcABwAFAAUABwAHAAcAAAAAAAAAAAAHAAcABQAHAAcABwAHAAcABwAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAHAAUABQAFAAUABQAFAAUAAAAFAAAABQAAAAAABQAFAAUABQAFAAUABQAFAAcABwAHAAcABwAHAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAUABQAFAAUABQAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABwAFAAcABwAHAAcABwAFAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAUABQAFAAUABwAHAAUABQAHAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABQAFAAcABwAHAAUABwAFAAUABQAHAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAUABQAFAAUABQAFAAUABQAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAcABQAFAAUABQAFAAUABQAAAAAAAAAAAAUAAAAAAAAAAAAAAAAABQAAAAAABwAFAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUAAAAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAABQAAAAAAAAAFAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAUABQAHAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAHAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABwAFAAUABQAFAAcABwAFAAUABwAHAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAcABwAFAAUABwAHAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAFAAUABQAAAAAABQAFAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAFAAcABwAAAAAAAAAAAAAABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAFAAcABwAFAAcABwAAAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAFAAUABQAAAAUABQAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABwAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABQAFAAUABQAFAAUABQAFAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAHAAcABQAHAAUABQAAAAAAAAAAAAAAAAAFAAAABwAHAAcABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAcABwAAAAAABwAHAAAAAAAHAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABwAHAAUABQAFAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABQAFAAUABQAFAAUABwAFAAcABwAFAAcABQAFAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABQAFAAUABQAAAAAABwAHAAcABwAFAAUABwAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAHAAUABQAFAAUABQAFAAUABQAHAAcABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAFAAcABwAFAAUABQAFAAUABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAcABwAFAAUABQAFAAcABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABQAHAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAAAAAAFAAUABwAHAAcABwAFAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABwAHAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAHAAUABQAFAAUABQAFAAUABwAFAAUABwAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAAAAAAAABQAAAAUABQAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAHAAcAAAAFAAUAAAAHAAcABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAAAAAAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAUABQAFAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAABQAFAAUABQAFAAUABQAAAAUABQAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAFAAUABQAFAAUADgAOAA4ADgAOAA4ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAAAAAAAAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAMAAwADAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAAAAAAAAAAAAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAAAAAAAAAAAAsADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwACwAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAADgAOAA4AAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAAAA4ADgAOAA4ADgAOAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAA4AAAAOAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAADgAAAAAAAAAAAA4AAAAOAAAAAAAAAAAADgAOAA4AAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAA4ADgAOAA4ADgAOAA4ADgAOAAAADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4AAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAOAA4ADgAOAA4ADgAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAAAAAAA='; - - /* - * utrie 1.0.2 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ - var chars$1 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'; - // Use a lookup table to find the index. - var lookup$1 = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256); - for (var i$1 = 0; i$1 < chars$1.length; i$1++) { - lookup$1[chars$1.charCodeAt(i$1)] = i$1; - } - var decode = function (base64) { - var bufferLength = base64.length * 0.75, len = base64.length, i, p = 0, encoded1, encoded2, encoded3, encoded4; - if (base64[base64.length - 1] === '=') { - bufferLength--; - if (base64[base64.length - 2] === '=') { - bufferLength--; - } - } - var buffer = typeof ArrayBuffer !== 'undefined' && - typeof Uint8Array !== 'undefined' && - typeof Uint8Array.prototype.slice !== 'undefined' - ? new ArrayBuffer(bufferLength) - : new Array(bufferLength); - var bytes = Array.isArray(buffer) ? buffer : new Uint8Array(buffer); - for (i = 0; i < len; i += 4) { - encoded1 = lookup$1[base64.charCodeAt(i)]; - encoded2 = lookup$1[base64.charCodeAt(i + 1)]; - encoded3 = lookup$1[base64.charCodeAt(i + 2)]; - encoded4 = lookup$1[base64.charCodeAt(i + 3)]; - bytes[p++] = (encoded1 << 2) | (encoded2 >> 4); - bytes[p++] = ((encoded2 & 15) << 4) | (encoded3 >> 2); - bytes[p++] = ((encoded3 & 3) << 6) | (encoded4 & 63); - } - return buffer; - }; - var polyUint16Array = function (buffer) { - var length = buffer.length; - var bytes = []; - for (var i = 0; i < length; i += 2) { - bytes.push((buffer[i + 1] << 8) | buffer[i]); - } - return bytes; - }; - var polyUint32Array = function (buffer) { - var length = buffer.length; - var bytes = []; - for (var i = 0; i < length; i += 4) { - bytes.push((buffer[i + 3] << 24) | (buffer[i + 2] << 16) | (buffer[i + 1] << 8) | buffer[i]); - } - return bytes; - }; - - /** Shift size for getting the index-2 table offset. */ - var UTRIE2_SHIFT_2 = 5; - /** Shift size for getting the index-1 table offset. */ - var UTRIE2_SHIFT_1 = 6 + 5; - /** - * Shift size for shifting left the index array values. - * Increases possible data size with 16-bit index values at the cost - * of compactability. - * This requires data blocks to be aligned by UTRIE2_DATA_GRANULARITY. - */ - var UTRIE2_INDEX_SHIFT = 2; - /** - * Difference between the two shift sizes, - * for getting an index-1 offset from an index-2 offset. 6=11-5 - */ - var UTRIE2_SHIFT_1_2 = UTRIE2_SHIFT_1 - UTRIE2_SHIFT_2; - /** - * The part of the index-2 table for U+D800..U+DBFF stores values for - * lead surrogate code _units_ not code _points_. - * Values for lead surrogate code _points_ are indexed with this portion of the table. - * Length=32=0x20=0x400>>UTRIE2_SHIFT_2. (There are 1024=0x400 lead surrogates.) - */ - var UTRIE2_LSCP_INDEX_2_OFFSET = 0x10000 >> UTRIE2_SHIFT_2; - /** Number of entries in a data block. 32=0x20 */ - var UTRIE2_DATA_BLOCK_LENGTH = 1 << UTRIE2_SHIFT_2; - /** Mask for getting the lower bits for the in-data-block offset. */ - var UTRIE2_DATA_MASK = UTRIE2_DATA_BLOCK_LENGTH - 1; - var UTRIE2_LSCP_INDEX_2_LENGTH = 0x400 >> UTRIE2_SHIFT_2; - /** Count the lengths of both BMP pieces. 2080=0x820 */ - var UTRIE2_INDEX_2_BMP_LENGTH = UTRIE2_LSCP_INDEX_2_OFFSET + UTRIE2_LSCP_INDEX_2_LENGTH; - /** - * The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820. - * Length 32=0x20 for lead bytes C0..DF, regardless of UTRIE2_SHIFT_2. - */ - var UTRIE2_UTF8_2B_INDEX_2_OFFSET = UTRIE2_INDEX_2_BMP_LENGTH; - var UTRIE2_UTF8_2B_INDEX_2_LENGTH = 0x800 >> 6; /* U+0800 is the first code point after 2-byte UTF-8 */ - /** - * The index-1 table, only used for supplementary code points, at offset 2112=0x840. - * Variable length, for code points up to highStart, where the last single-value range starts. - * Maximum length 512=0x200=0x100000>>UTRIE2_SHIFT_1. - * (For 0x100000 supplementary code points U+10000..U+10ffff.) - * - * The part of the index-2 table for supplementary code points starts - * after this index-1 table. - * - * Both the index-1 table and the following part of the index-2 table - * are omitted completely if there is only BMP data. - */ - var UTRIE2_INDEX_1_OFFSET = UTRIE2_UTF8_2B_INDEX_2_OFFSET + UTRIE2_UTF8_2B_INDEX_2_LENGTH; - /** - * Number of index-1 entries for the BMP. 32=0x20 - * This part of the index-1 table is omitted from the serialized form. - */ - var UTRIE2_OMITTED_BMP_INDEX_1_LENGTH = 0x10000 >> UTRIE2_SHIFT_1; - /** Number of entries in an index-2 block. 64=0x40 */ - var UTRIE2_INDEX_2_BLOCK_LENGTH = 1 << UTRIE2_SHIFT_1_2; - /** Mask for getting the lower bits for the in-index-2-block offset. */ - var UTRIE2_INDEX_2_MASK = UTRIE2_INDEX_2_BLOCK_LENGTH - 1; - var slice16 = function (view, start, end) { - if (view.slice) { - return view.slice(start, end); - } - return new Uint16Array(Array.prototype.slice.call(view, start, end)); - }; - var slice32 = function (view, start, end) { - if (view.slice) { - return view.slice(start, end); - } - return new Uint32Array(Array.prototype.slice.call(view, start, end)); - }; - var createTrieFromBase64 = function (base64, _byteLength) { - var buffer = decode(base64); - var view32 = Array.isArray(buffer) ? polyUint32Array(buffer) : new Uint32Array(buffer); - var view16 = Array.isArray(buffer) ? polyUint16Array(buffer) : new Uint16Array(buffer); - var headerLength = 24; - var index = slice16(view16, headerLength / 2, view32[4] / 2); - var data = view32[5] === 2 - ? slice16(view16, (headerLength + view32[4]) / 2) - : slice32(view32, Math.ceil((headerLength + view32[4]) / 4)); - return new Trie(view32[0], view32[1], view32[2], view32[3], index, data); - }; - var Trie = /** @class */ (function () { - function Trie(initialValue, errorValue, highStart, highValueIndex, index, data) { - this.initialValue = initialValue; - this.errorValue = errorValue; - this.highStart = highStart; - this.highValueIndex = highValueIndex; - this.index = index; - this.data = data; - } - /** - * Get the value for a code point as stored in the Trie. - * - * @param codePoint the code point - * @return the value - */ - Trie.prototype.get = function (codePoint) { - var ix; - if (codePoint >= 0) { - if (codePoint < 0x0d800 || (codePoint > 0x0dbff && codePoint <= 0x0ffff)) { - // Ordinary BMP code point, excluding leading surrogates. - // BMP uses a single level lookup. BMP index starts at offset 0 in the Trie2 index. - // 16 bit data is stored in the index array itself. - ix = this.index[codePoint >> UTRIE2_SHIFT_2]; - ix = (ix << UTRIE2_INDEX_SHIFT) + (codePoint & UTRIE2_DATA_MASK); - return this.data[ix]; - } - if (codePoint <= 0xffff) { - // Lead Surrogate Code Point. A Separate index section is stored for - // lead surrogate code units and code points. - // The main index has the code unit data. - // For this function, we need the code point data. - // Note: this expression could be refactored for slightly improved efficiency, but - // surrogate code points will be so rare in practice that it's not worth it. - ix = this.index[UTRIE2_LSCP_INDEX_2_OFFSET + ((codePoint - 0xd800) >> UTRIE2_SHIFT_2)]; - ix = (ix << UTRIE2_INDEX_SHIFT) + (codePoint & UTRIE2_DATA_MASK); - return this.data[ix]; - } - if (codePoint < this.highStart) { - // Supplemental code point, use two-level lookup. - ix = UTRIE2_INDEX_1_OFFSET - UTRIE2_OMITTED_BMP_INDEX_1_LENGTH + (codePoint >> UTRIE2_SHIFT_1); - ix = this.index[ix]; - ix += (codePoint >> UTRIE2_SHIFT_2) & UTRIE2_INDEX_2_MASK; - ix = this.index[ix]; - ix = (ix << UTRIE2_INDEX_SHIFT) + (codePoint & UTRIE2_DATA_MASK); - return this.data[ix]; - } - if (codePoint <= 0x10ffff) { - return this.data[this.highValueIndex]; - } - } - // Fall through. The code point is outside of the legal range of 0..0x10ffff. - return this.errorValue; - }; - return Trie; - }()); - - /* - * base64-arraybuffer 1.0.2 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ - var chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'; - // Use a lookup table to find the index. - var lookup = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256); - for (var i = 0; i < chars.length; i++) { - lookup[chars.charCodeAt(i)] = i; - } - - var Prepend = 1; - var CR = 2; - var LF = 3; - var Control = 4; - var Extend = 5; - var SpacingMark = 7; - var L = 8; - var V = 9; - var T = 10; - var LV = 11; - var LVT = 12; - var ZWJ = 13; - var Extended_Pictographic = 14; - var RI = 15; - var toCodePoints = function (str) { - var codePoints = []; - var i = 0; - var length = str.length; - while (i < length) { - var value = str.charCodeAt(i++); - if (value >= 0xd800 && value <= 0xdbff && i < length) { - var extra = str.charCodeAt(i++); - if ((extra & 0xfc00) === 0xdc00) { - codePoints.push(((value & 0x3ff) << 10) + (extra & 0x3ff) + 0x10000); - } - else { - codePoints.push(value); - i--; - } - } - else { - codePoints.push(value); - } - } - return codePoints; - }; - var fromCodePoint = function () { - var codePoints = []; - for (var _i = 0; _i < arguments.length; _i++) { - codePoints[_i] = arguments[_i]; - } - if (String.fromCodePoint) { - return String.fromCodePoint.apply(String, codePoints); - } - var length = codePoints.length; - if (!length) { - return ''; - } - var codeUnits = []; - var index = -1; - var result = ''; - while (++index < length) { - var codePoint = codePoints[index]; - if (codePoint <= 0xffff) { - codeUnits.push(codePoint); - } - else { - codePoint -= 0x10000; - codeUnits.push((codePoint >> 10) + 0xd800, (codePoint % 0x400) + 0xdc00); - } - if (index + 1 === length || codeUnits.length > 0x4000) { - result += String.fromCharCode.apply(String, codeUnits); - codeUnits.length = 0; - } - } - return result; - }; - var UnicodeTrie = createTrieFromBase64(base64); - var BREAK_NOT_ALLOWED = '×'; - var BREAK_ALLOWED = '÷'; - var codePointToClass = function (codePoint) { return UnicodeTrie.get(codePoint); }; - var _graphemeBreakAtIndex = function (_codePoints, classTypes, index) { - var prevIndex = index - 2; - var prev = classTypes[prevIndex]; - var current = classTypes[index - 1]; - var next = classTypes[index]; - // GB3 Do not break between a CR and LF - if (current === CR && next === LF) { - return BREAK_NOT_ALLOWED; - } - // GB4 Otherwise, break before and after controls. - if (current === CR || current === LF || current === Control) { - return BREAK_ALLOWED; - } - // GB5 - if (next === CR || next === LF || next === Control) { - return BREAK_ALLOWED; - } - // Do not break Hangul syllable sequences. - // GB6 - if (current === L && [L, V, LV, LVT].indexOf(next) !== -1) { - return BREAK_NOT_ALLOWED; - } - // GB7 - if ((current === LV || current === V) && (next === V || next === T)) { - return BREAK_NOT_ALLOWED; - } - // GB8 - if ((current === LVT || current === T) && next === T) { - return BREAK_NOT_ALLOWED; - } - // GB9 Do not break before extending characters or ZWJ. - if (next === ZWJ || next === Extend) { - return BREAK_NOT_ALLOWED; - } - // Do not break before SpacingMarks, or after Prepend characters. - // GB9a - if (next === SpacingMark) { - return BREAK_NOT_ALLOWED; - } - // GB9a - if (current === Prepend) { - return BREAK_NOT_ALLOWED; - } - // GB11 Do not break within emoji modifier sequences or emoji zwj sequences. - if (current === ZWJ && next === Extended_Pictographic) { - while (prev === Extend) { - prev = classTypes[--prevIndex]; - } - if (prev === Extended_Pictographic) { - return BREAK_NOT_ALLOWED; - } - } - // GB12 Do not break within emoji flag sequences. - // That is, do not break between regional indicator (RI) symbols - // if there is an odd number of RI characters before the break point. - if (current === RI && next === RI) { - var countRI = 0; - while (prev === RI) { - countRI++; - prev = classTypes[--prevIndex]; - } - if (countRI % 2 === 0) { - return BREAK_NOT_ALLOWED; - } - } - return BREAK_ALLOWED; - }; - var GraphemeBreaker = function (str) { - var codePoints = toCodePoints(str); - var length = codePoints.length; - var index = 0; - var lastEnd = 0; - var classTypes = codePoints.map(codePointToClass); - return { - next: function () { - if (index >= length) { - return { done: true, value: null }; - } - var graphemeBreak = BREAK_NOT_ALLOWED; - while (index < length && - (graphemeBreak = _graphemeBreakAtIndex(codePoints, classTypes, ++index)) === BREAK_NOT_ALLOWED) { } - if (graphemeBreak !== BREAK_NOT_ALLOWED || index === length) { - var value = fromCodePoint.apply(null, codePoints.slice(lastEnd, index)); - lastEnd = index; - return { value: value, done: false }; - } - return { done: true, value: null }; - }, - }; - }; - var splitGraphemes = function (str) { - var breaker = GraphemeBreaker(str); - var graphemes = []; - var bk; - while (!(bk = breaker.next()).done) { - if (bk.value) { - graphemes.push(bk.value.slice()); - } - } - return graphemes; - }; - - var testRangeBounds = function (document) { - var TEST_HEIGHT = 123; - if (document.createRange) { - var range = document.createRange(); - if (range.getBoundingClientRect) { - var testElement = document.createElement('boundtest'); - testElement.style.height = TEST_HEIGHT + "px"; - testElement.style.display = 'block'; - document.body.appendChild(testElement); - range.selectNode(testElement); - var rangeBounds = range.getBoundingClientRect(); - var rangeHeight = Math.round(rangeBounds.height); - document.body.removeChild(testElement); - if (rangeHeight === TEST_HEIGHT) { - return true; - } - } - } - return false; - }; - var testIOSLineBreak = function (document) { - var testElement = document.createElement('boundtest'); - testElement.style.width = '50px'; - testElement.style.display = 'block'; - testElement.style.fontSize = '12px'; - testElement.style.letterSpacing = '0px'; - testElement.style.wordSpacing = '0px'; - document.body.appendChild(testElement); - var range = document.createRange(); - testElement.innerHTML = typeof ''.repeat === 'function' ? '👨'.repeat(10) : ''; - var node = testElement.firstChild; - var textList = toCodePoints$1(node.data).map(function (i) { return fromCodePoint$1(i); }); - var offset = 0; - var prev = {}; - // ios 13 does not handle range getBoundingClientRect line changes correctly #2177 - var supports = textList.every(function (text, i) { - range.setStart(node, offset); - range.setEnd(node, offset + text.length); - var rect = range.getBoundingClientRect(); - offset += text.length; - var boundAhead = rect.x > prev.x || rect.y > prev.y; - prev = rect; - if (i === 0) { - return true; - } - return boundAhead; - }); - document.body.removeChild(testElement); - return supports; - }; - var testCORS = function () { return typeof new Image().crossOrigin !== 'undefined'; }; - var testResponseType = function () { return typeof new XMLHttpRequest().responseType === 'string'; }; - var testSVG = function (document) { - var img = new Image(); - var canvas = document.createElement('canvas'); - var ctx = canvas.getContext('2d'); - if (!ctx) { - return false; - } - img.src = "data:image/svg+xml,"; - try { - ctx.drawImage(img, 0, 0); - canvas.toDataURL(); - } - catch (e) { - return false; - } - return true; - }; - var isGreenPixel = function (data) { - return data[0] === 0 && data[1] === 255 && data[2] === 0 && data[3] === 255; - }; - var testForeignObject = function (document) { - var canvas = document.createElement('canvas'); - var size = 100; - canvas.width = size; - canvas.height = size; - var ctx = canvas.getContext('2d'); - if (!ctx) { - return Promise.reject(false); - } - ctx.fillStyle = 'rgb(0, 255, 0)'; - ctx.fillRect(0, 0, size, size); - var img = new Image(); - var greenImageSrc = canvas.toDataURL(); - img.src = greenImageSrc; - var svg = createForeignObjectSVG(size, size, 0, 0, img); - ctx.fillStyle = 'red'; - ctx.fillRect(0, 0, size, size); - return loadSerializedSVG$1(svg) - .then(function (img) { - ctx.drawImage(img, 0, 0); - var data = ctx.getImageData(0, 0, size, size).data; - ctx.fillStyle = 'red'; - ctx.fillRect(0, 0, size, size); - var node = document.createElement('div'); - node.style.backgroundImage = "url(" + greenImageSrc + ")"; - node.style.height = size + "px"; - // Firefox 55 does not render inline tags - return isGreenPixel(data) - ? loadSerializedSVG$1(createForeignObjectSVG(size, size, 0, 0, node)) - : Promise.reject(false); - }) - .then(function (img) { - ctx.drawImage(img, 0, 0); - // Edge does not render background-images - return isGreenPixel(ctx.getImageData(0, 0, size, size).data); - }) - .catch(function () { return false; }); - }; - var createForeignObjectSVG = function (width, height, x, y, node) { - var xmlns = 'http://www.w3.org/2000/svg'; - var svg = document.createElementNS(xmlns, 'svg'); - var foreignObject = document.createElementNS(xmlns, 'foreignObject'); - svg.setAttributeNS(null, 'width', width.toString()); - svg.setAttributeNS(null, 'height', height.toString()); - foreignObject.setAttributeNS(null, 'width', '100%'); - foreignObject.setAttributeNS(null, 'height', '100%'); - foreignObject.setAttributeNS(null, 'x', x.toString()); - foreignObject.setAttributeNS(null, 'y', y.toString()); - foreignObject.setAttributeNS(null, 'externalResourcesRequired', 'true'); - svg.appendChild(foreignObject); - foreignObject.appendChild(node); - return svg; - }; - var loadSerializedSVG$1 = function (svg) { - return new Promise(function (resolve, reject) { - var img = new Image(); - img.onload = function () { return resolve(img); }; - img.onerror = reject; - img.src = "data:image/svg+xml;charset=utf-8," + encodeURIComponent(new XMLSerializer().serializeToString(svg)); - }); - }; - var FEATURES = { - get SUPPORT_RANGE_BOUNDS() { - var value = testRangeBounds(document); - Object.defineProperty(FEATURES, 'SUPPORT_RANGE_BOUNDS', { value: value }); - return value; - }, - get SUPPORT_WORD_BREAKING() { - var value = FEATURES.SUPPORT_RANGE_BOUNDS && testIOSLineBreak(document); - Object.defineProperty(FEATURES, 'SUPPORT_WORD_BREAKING', { value: value }); - return value; - }, - get SUPPORT_SVG_DRAWING() { - var value = testSVG(document); - Object.defineProperty(FEATURES, 'SUPPORT_SVG_DRAWING', { value: value }); - return value; - }, - get SUPPORT_FOREIGNOBJECT_DRAWING() { - var value = typeof Array.from === 'function' && typeof window.fetch === 'function' - ? testForeignObject(document) - : Promise.resolve(false); - Object.defineProperty(FEATURES, 'SUPPORT_FOREIGNOBJECT_DRAWING', { value: value }); - return value; - }, - get SUPPORT_CORS_IMAGES() { - var value = testCORS(); - Object.defineProperty(FEATURES, 'SUPPORT_CORS_IMAGES', { value: value }); - return value; - }, - get SUPPORT_RESPONSE_TYPE() { - var value = testResponseType(); - Object.defineProperty(FEATURES, 'SUPPORT_RESPONSE_TYPE', { value: value }); - return value; - }, - get SUPPORT_CORS_XHR() { - var value = 'withCredentials' in new XMLHttpRequest(); - Object.defineProperty(FEATURES, 'SUPPORT_CORS_XHR', { value: value }); - return value; - }, - get SUPPORT_NATIVE_TEXT_SEGMENTATION() { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - var value = !!(typeof Intl !== 'undefined' && Intl.Segmenter); - Object.defineProperty(FEATURES, 'SUPPORT_NATIVE_TEXT_SEGMENTATION', { value: value }); - return value; - } - }; - - var TextBounds = /** @class */ (function () { - function TextBounds(text, bounds) { - this.text = text; - this.bounds = bounds; - } - return TextBounds; - }()); - var parseTextBounds = function (context, value, styles, node) { - var textList = breakText(value, styles); - var textBounds = []; - var offset = 0; - textList.forEach(function (text) { - if (styles.textDecorationLine.length || text.trim().length > 0) { - if (FEATURES.SUPPORT_RANGE_BOUNDS) { - var clientRects = createRange(node, offset, text.length).getClientRects(); - if (clientRects.length > 1) { - var subSegments = segmentGraphemes(text); - var subOffset_1 = 0; - subSegments.forEach(function (subSegment) { - textBounds.push(new TextBounds(subSegment, Bounds.fromDOMRectList(context, createRange(node, subOffset_1 + offset, subSegment.length).getClientRects()))); - subOffset_1 += subSegment.length; - }); - } - else { - textBounds.push(new TextBounds(text, Bounds.fromDOMRectList(context, clientRects))); - } - } - else { - var replacementNode = node.splitText(text.length); - textBounds.push(new TextBounds(text, getWrapperBounds(context, node))); - node = replacementNode; - } - } - else if (!FEATURES.SUPPORT_RANGE_BOUNDS) { - node = node.splitText(text.length); - } - offset += text.length; - }); - return textBounds; - }; - var getWrapperBounds = function (context, node) { - var ownerDocument = node.ownerDocument; - if (ownerDocument) { - var wrapper = ownerDocument.createElement('html2canvaswrapper'); - wrapper.appendChild(node.cloneNode(true)); - var parentNode = node.parentNode; - if (parentNode) { - parentNode.replaceChild(wrapper, node); - var bounds = parseBounds(context, wrapper); - if (wrapper.firstChild) { - parentNode.replaceChild(wrapper.firstChild, wrapper); - } - return bounds; - } - } - return Bounds.EMPTY; - }; - var createRange = function (node, offset, length) { - var ownerDocument = node.ownerDocument; - if (!ownerDocument) { - throw new Error('Node has no owner document'); - } - var range = ownerDocument.createRange(); - range.setStart(node, offset); - range.setEnd(node, offset + length); - return range; - }; - var segmentGraphemes = function (value) { - if (FEATURES.SUPPORT_NATIVE_TEXT_SEGMENTATION) { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - var segmenter = new Intl.Segmenter(void 0, { granularity: 'grapheme' }); - // eslint-disable-next-line @typescript-eslint/no-explicit-any - return Array.from(segmenter.segment(value)).map(function (segment) { return segment.segment; }); - } - return splitGraphemes(value); - }; - var segmentWords = function (value, styles) { - if (FEATURES.SUPPORT_NATIVE_TEXT_SEGMENTATION) { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - var segmenter = new Intl.Segmenter(void 0, { - granularity: 'word' - }); - // eslint-disable-next-line @typescript-eslint/no-explicit-any - return Array.from(segmenter.segment(value)).map(function (segment) { return segment.segment; }); - } - return breakWords(value, styles); - }; - var breakText = function (value, styles) { - return styles.letterSpacing !== 0 ? segmentGraphemes(value) : segmentWords(value, styles); - }; - // https://drafts.csswg.org/css-text/#word-separator - var wordSeparators = [0x0020, 0x00a0, 0x1361, 0x10100, 0x10101, 0x1039, 0x1091]; - var breakWords = function (str, styles) { - var breaker = LineBreaker(str, { - lineBreak: styles.lineBreak, - wordBreak: styles.overflowWrap === "break-word" /* BREAK_WORD */ ? 'break-word' : styles.wordBreak - }); - var words = []; - var bk; - var _loop_1 = function () { - if (bk.value) { - var value = bk.value.slice(); - var codePoints = toCodePoints$1(value); - var word_1 = ''; - codePoints.forEach(function (codePoint) { - if (wordSeparators.indexOf(codePoint) === -1) { - word_1 += fromCodePoint$1(codePoint); - } - else { - if (word_1.length) { - words.push(word_1); - } - words.push(fromCodePoint$1(codePoint)); - word_1 = ''; - } - }); - if (word_1.length) { - words.push(word_1); - } - } - }; - while (!(bk = breaker.next()).done) { - _loop_1(); - } - return words; - }; - - var TextContainer = /** @class */ (function () { - function TextContainer(context, node, styles) { - this.text = transform(node.data, styles.textTransform); - this.textBounds = parseTextBounds(context, this.text, styles, node); - } - return TextContainer; - }()); - var transform = function (text, transform) { - switch (transform) { - case 1 /* LOWERCASE */: - return text.toLowerCase(); - case 3 /* CAPITALIZE */: - return text.replace(CAPITALIZE, capitalize); - case 2 /* UPPERCASE */: - return text.toUpperCase(); - default: - return text; - } - }; - var CAPITALIZE = /(^|\s|:|-|\(|\))([a-z])/g; - var capitalize = function (m, p1, p2) { - if (m.length > 0) { - return p1 + p2.toUpperCase(); - } - return m; - }; - - var ImageElementContainer = /** @class */ (function (_super) { - __extends(ImageElementContainer, _super); - function ImageElementContainer(context, img) { - var _this = _super.call(this, context, img) || this; - _this.src = img.currentSrc || img.src; - _this.intrinsicWidth = img.naturalWidth; - _this.intrinsicHeight = img.naturalHeight; - _this.context.cache.addImage(_this.src); - return _this; - } - return ImageElementContainer; - }(ElementContainer)); - - var CanvasElementContainer = /** @class */ (function (_super) { - __extends(CanvasElementContainer, _super); - function CanvasElementContainer(context, canvas) { - var _this = _super.call(this, context, canvas) || this; - _this.canvas = canvas; - _this.intrinsicWidth = canvas.width; - _this.intrinsicHeight = canvas.height; - return _this; - } - return CanvasElementContainer; - }(ElementContainer)); - - var SVGElementContainer = /** @class */ (function (_super) { - __extends(SVGElementContainer, _super); - function SVGElementContainer(context, img) { - var _this = _super.call(this, context, img) || this; - var s = new XMLSerializer(); - var bounds = parseBounds(context, img); - img.setAttribute('width', bounds.width + "px"); - img.setAttribute('height', bounds.height + "px"); - _this.svg = "data:image/svg+xml," + encodeURIComponent(s.serializeToString(img)); - _this.intrinsicWidth = img.width.baseVal.value; - _this.intrinsicHeight = img.height.baseVal.value; - _this.context.cache.addImage(_this.svg); - return _this; - } - return SVGElementContainer; - }(ElementContainer)); - - var LIElementContainer = /** @class */ (function (_super) { - __extends(LIElementContainer, _super); - function LIElementContainer(context, element) { - var _this = _super.call(this, context, element) || this; - _this.value = element.value; - return _this; - } - return LIElementContainer; - }(ElementContainer)); - - var OLElementContainer = /** @class */ (function (_super) { - __extends(OLElementContainer, _super); - function OLElementContainer(context, element) { - var _this = _super.call(this, context, element) || this; - _this.start = element.start; - _this.reversed = typeof element.reversed === 'boolean' && element.reversed === true; - return _this; - } - return OLElementContainer; - }(ElementContainer)); - - var CHECKBOX_BORDER_RADIUS = [ - { - type: 15 /* DIMENSION_TOKEN */, - flags: 0, - unit: 'px', - number: 3 - } - ]; - var RADIO_BORDER_RADIUS = [ - { - type: 16 /* PERCENTAGE_TOKEN */, - flags: 0, - number: 50 - } - ]; - var reformatInputBounds = function (bounds) { - if (bounds.width > bounds.height) { - return new Bounds(bounds.left + (bounds.width - bounds.height) / 2, bounds.top, bounds.height, bounds.height); - } - else if (bounds.width < bounds.height) { - return new Bounds(bounds.left, bounds.top + (bounds.height - bounds.width) / 2, bounds.width, bounds.width); - } - return bounds; - }; - var getInputValue = function (node) { - var value = node.type === PASSWORD ? new Array(node.value.length + 1).join('\u2022') : node.value; - return value.length === 0 ? node.placeholder || '' : value; - }; - var CHECKBOX = 'checkbox'; - var RADIO = 'radio'; - var PASSWORD = 'password'; - var INPUT_COLOR = 0x2a2a2aff; - var InputElementContainer = /** @class */ (function (_super) { - __extends(InputElementContainer, _super); - function InputElementContainer(context, input) { - var _this = _super.call(this, context, input) || this; - _this.type = input.type.toLowerCase(); - _this.checked = input.checked; - _this.value = getInputValue(input); - if (_this.type === CHECKBOX || _this.type === RADIO) { - _this.styles.backgroundColor = 0xdededeff; - _this.styles.borderTopColor = - _this.styles.borderRightColor = - _this.styles.borderBottomColor = - _this.styles.borderLeftColor = - 0xa5a5a5ff; - _this.styles.borderTopWidth = - _this.styles.borderRightWidth = - _this.styles.borderBottomWidth = - _this.styles.borderLeftWidth = - 1; - _this.styles.borderTopStyle = - _this.styles.borderRightStyle = - _this.styles.borderBottomStyle = - _this.styles.borderLeftStyle = - 1 /* SOLID */; - _this.styles.backgroundClip = [0 /* BORDER_BOX */]; - _this.styles.backgroundOrigin = [0 /* BORDER_BOX */]; - _this.bounds = reformatInputBounds(_this.bounds); - } - switch (_this.type) { - case CHECKBOX: - _this.styles.borderTopRightRadius = - _this.styles.borderTopLeftRadius = - _this.styles.borderBottomRightRadius = - _this.styles.borderBottomLeftRadius = - CHECKBOX_BORDER_RADIUS; - break; - case RADIO: - _this.styles.borderTopRightRadius = - _this.styles.borderTopLeftRadius = - _this.styles.borderBottomRightRadius = - _this.styles.borderBottomLeftRadius = - RADIO_BORDER_RADIUS; - break; - } - return _this; - } - return InputElementContainer; - }(ElementContainer)); - - var SelectElementContainer = /** @class */ (function (_super) { - __extends(SelectElementContainer, _super); - function SelectElementContainer(context, element) { - var _this = _super.call(this, context, element) || this; - var option = element.options[element.selectedIndex || 0]; - _this.value = option ? option.text || '' : ''; - return _this; - } - return SelectElementContainer; - }(ElementContainer)); - - var TextareaElementContainer = /** @class */ (function (_super) { - __extends(TextareaElementContainer, _super); - function TextareaElementContainer(context, element) { - var _this = _super.call(this, context, element) || this; - _this.value = element.value; - return _this; - } - return TextareaElementContainer; - }(ElementContainer)); - - var IFrameElementContainer = /** @class */ (function (_super) { - __extends(IFrameElementContainer, _super); - function IFrameElementContainer(context, iframe) { - var _this = _super.call(this, context, iframe) || this; - _this.src = iframe.src; - _this.width = parseInt(iframe.width, 10) || 0; - _this.height = parseInt(iframe.height, 10) || 0; - _this.backgroundColor = _this.styles.backgroundColor; - try { - if (iframe.contentWindow && - iframe.contentWindow.document && - iframe.contentWindow.document.documentElement) { - _this.tree = parseTree(context, iframe.contentWindow.document.documentElement); - // http://www.w3.org/TR/css3-background/#special-backgrounds - var documentBackgroundColor = iframe.contentWindow.document.documentElement - ? parseColor(context, getComputedStyle(iframe.contentWindow.document.documentElement).backgroundColor) - : COLORS.TRANSPARENT; - var bodyBackgroundColor = iframe.contentWindow.document.body - ? parseColor(context, getComputedStyle(iframe.contentWindow.document.body).backgroundColor) - : COLORS.TRANSPARENT; - _this.backgroundColor = isTransparent(documentBackgroundColor) - ? isTransparent(bodyBackgroundColor) - ? _this.styles.backgroundColor - : bodyBackgroundColor - : documentBackgroundColor; - } - } - catch (e) { } - return _this; - } - return IFrameElementContainer; - }(ElementContainer)); - - var LIST_OWNERS = ['OL', 'UL', 'MENU']; - var parseNodeTree = function (context, node, parent, root) { - for (var childNode = node.firstChild, nextNode = void 0; childNode; childNode = nextNode) { - nextNode = childNode.nextSibling; - if (isTextNode(childNode) && childNode.data.trim().length > 0) { - parent.textNodes.push(new TextContainer(context, childNode, parent.styles)); - } - else if (isElementNode(childNode)) { - if (isSlotElement(childNode) && childNode.assignedNodes) { - childNode.assignedNodes().forEach(function (childNode) { return parseNodeTree(context, childNode, parent, root); }); - } - else { - var container = createContainer(context, childNode); - if (container.styles.isVisible()) { - if (createsRealStackingContext(childNode, container, root)) { - container.flags |= 4 /* CREATES_REAL_STACKING_CONTEXT */; - } - else if (createsStackingContext(container.styles)) { - container.flags |= 2 /* CREATES_STACKING_CONTEXT */; - } - if (LIST_OWNERS.indexOf(childNode.tagName) !== -1) { - container.flags |= 8 /* IS_LIST_OWNER */; - } - parent.elements.push(container); - childNode.slot; - if (childNode.shadowRoot) { - parseNodeTree(context, childNode.shadowRoot, container, root); - } - else if (!isTextareaElement(childNode) && - !isSVGElement(childNode) && - !isSelectElement(childNode)) { - parseNodeTree(context, childNode, container, root); - } - } - } - } - } - }; - var createContainer = function (context, element) { - if (isImageElement(element)) { - return new ImageElementContainer(context, element); - } - if (isCanvasElement(element)) { - return new CanvasElementContainer(context, element); - } - if (isSVGElement(element)) { - return new SVGElementContainer(context, element); - } - if (isLIElement(element)) { - return new LIElementContainer(context, element); - } - if (isOLElement(element)) { - return new OLElementContainer(context, element); - } - if (isInputElement(element)) { - return new InputElementContainer(context, element); - } - if (isSelectElement(element)) { - return new SelectElementContainer(context, element); - } - if (isTextareaElement(element)) { - return new TextareaElementContainer(context, element); - } - if (isIFrameElement(element)) { - return new IFrameElementContainer(context, element); - } - return new ElementContainer(context, element); - }; - var parseTree = function (context, element) { - var container = createContainer(context, element); - container.flags |= 4 /* CREATES_REAL_STACKING_CONTEXT */; - parseNodeTree(context, element, container, container); - return container; - }; - var createsRealStackingContext = function (node, container, root) { - return (container.styles.isPositionedWithZIndex() || - container.styles.opacity < 1 || - container.styles.isTransformed() || - (isBodyElement(node) && root.styles.isTransparent())); - }; - var createsStackingContext = function (styles) { return styles.isPositioned() || styles.isFloating(); }; - var isTextNode = function (node) { return node.nodeType === Node.TEXT_NODE; }; - var isElementNode = function (node) { return node.nodeType === Node.ELEMENT_NODE; }; - var isHTMLElementNode = function (node) { - return isElementNode(node) && typeof node.style !== 'undefined' && !isSVGElementNode(node); - }; - var isSVGElementNode = function (element) { - return typeof element.className === 'object'; - }; - var isLIElement = function (node) { return node.tagName === 'LI'; }; - var isOLElement = function (node) { return node.tagName === 'OL'; }; - var isInputElement = function (node) { return node.tagName === 'INPUT'; }; - var isHTMLElement = function (node) { return node.tagName === 'HTML'; }; - var isSVGElement = function (node) { return node.tagName === 'svg'; }; - var isBodyElement = function (node) { return node.tagName === 'BODY'; }; - var isCanvasElement = function (node) { return node.tagName === 'CANVAS'; }; - var isVideoElement = function (node) { return node.tagName === 'VIDEO'; }; - var isImageElement = function (node) { return node.tagName === 'IMG'; }; - var isIFrameElement = function (node) { return node.tagName === 'IFRAME'; }; - var isStyleElement = function (node) { return node.tagName === 'STYLE'; }; - var isScriptElement = function (node) { return node.tagName === 'SCRIPT'; }; - var isTextareaElement = function (node) { return node.tagName === 'TEXTAREA'; }; - var isSelectElement = function (node) { return node.tagName === 'SELECT'; }; - var isSlotElement = function (node) { return node.tagName === 'SLOT'; }; - // https://html.spec.whatwg.org/multipage/custom-elements.html#valid-custom-element-name - var isCustomElement = function (node) { return node.tagName.indexOf('-') > 0; }; - - var CounterState = /** @class */ (function () { - function CounterState() { - this.counters = {}; - } - CounterState.prototype.getCounterValue = function (name) { - var counter = this.counters[name]; - if (counter && counter.length) { - return counter[counter.length - 1]; - } - return 1; - }; - CounterState.prototype.getCounterValues = function (name) { - var counter = this.counters[name]; - return counter ? counter : []; - }; - CounterState.prototype.pop = function (counters) { - var _this = this; - counters.forEach(function (counter) { return _this.counters[counter].pop(); }); - }; - CounterState.prototype.parse = function (style) { - var _this = this; - var counterIncrement = style.counterIncrement; - var counterReset = style.counterReset; - var canReset = true; - if (counterIncrement !== null) { - counterIncrement.forEach(function (entry) { - var counter = _this.counters[entry.counter]; - if (counter && entry.increment !== 0) { - canReset = false; - if (!counter.length) { - counter.push(1); - } - counter[Math.max(0, counter.length - 1)] += entry.increment; - } - }); - } - var counterNames = []; - if (canReset) { - counterReset.forEach(function (entry) { - var counter = _this.counters[entry.counter]; - counterNames.push(entry.counter); - if (!counter) { - counter = _this.counters[entry.counter] = []; - } - counter.push(entry.reset); - }); - } - return counterNames; - }; - return CounterState; - }()); - var ROMAN_UPPER = { - integers: [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1], - values: ['M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I'] - }; - var ARMENIAN = { - integers: [ - 9000, 8000, 7000, 6000, 5000, 4000, 3000, 2000, 1000, 900, 800, 700, 600, 500, 400, 300, 200, 100, 90, 80, 70, - 60, 50, 40, 30, 20, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 - ], - values: [ - 'Ք', - 'Փ', - 'Ւ', - 'Ց', - 'Ր', - 'Տ', - 'Վ', - 'Ս', - 'Ռ', - 'Ջ', - 'Պ', - 'Չ', - 'Ո', - 'Շ', - 'Ն', - 'Յ', - 'Մ', - 'Ճ', - 'Ղ', - 'Ձ', - 'Հ', - 'Կ', - 'Ծ', - 'Խ', - 'Լ', - 'Ի', - 'Ժ', - 'Թ', - 'Ը', - 'Է', - 'Զ', - 'Ե', - 'Դ', - 'Գ', - 'Բ', - 'Ա' - ] - }; - var HEBREW = { - integers: [ - 10000, 9000, 8000, 7000, 6000, 5000, 4000, 3000, 2000, 1000, 400, 300, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, - 19, 18, 17, 16, 15, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 - ], - values: [ - 'י׳', - 'ט׳', - 'ח׳', - 'ז׳', - 'ו׳', - 'ה׳', - 'ד׳', - 'ג׳', - 'ב׳', - 'א׳', - 'ת', - 'ש', - 'ר', - 'ק', - 'צ', - 'פ', - 'ע', - 'ס', - 'נ', - 'מ', - 'ל', - 'כ', - 'יט', - 'יח', - 'יז', - 'טז', - 'טו', - 'י', - 'ט', - 'ח', - 'ז', - 'ו', - 'ה', - 'ד', - 'ג', - 'ב', - 'א' - ] - }; - var GEORGIAN = { - integers: [ - 10000, 9000, 8000, 7000, 6000, 5000, 4000, 3000, 2000, 1000, 900, 800, 700, 600, 500, 400, 300, 200, 100, 90, - 80, 70, 60, 50, 40, 30, 20, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 - ], - values: [ - 'ჵ', - 'ჰ', - 'ჯ', - 'ჴ', - 'ხ', - 'ჭ', - 'წ', - 'ძ', - 'ც', - 'ჩ', - 'შ', - 'ყ', - 'ღ', - 'ქ', - 'ფ', - 'ჳ', - 'ტ', - 'ს', - 'რ', - 'ჟ', - 'პ', - 'ო', - 'ჲ', - 'ნ', - 'მ', - 'ლ', - 'კ', - 'ი', - 'თ', - 'ჱ', - 'ზ', - 'ვ', - 'ე', - 'დ', - 'გ', - 'ბ', - 'ა' - ] - }; - var createAdditiveCounter = function (value, min, max, symbols, fallback, suffix) { - if (value < min || value > max) { - return createCounterText(value, fallback, suffix.length > 0); - } - return (symbols.integers.reduce(function (string, integer, index) { - while (value >= integer) { - value -= integer; - string += symbols.values[index]; - } - return string; - }, '') + suffix); - }; - var createCounterStyleWithSymbolResolver = function (value, codePointRangeLength, isNumeric, resolver) { - var string = ''; - do { - if (!isNumeric) { - value--; - } - string = resolver(value) + string; - value /= codePointRangeLength; - } while (value * codePointRangeLength >= codePointRangeLength); - return string; - }; - var createCounterStyleFromRange = function (value, codePointRangeStart, codePointRangeEnd, isNumeric, suffix) { - var codePointRangeLength = codePointRangeEnd - codePointRangeStart + 1; - return ((value < 0 ? '-' : '') + - (createCounterStyleWithSymbolResolver(Math.abs(value), codePointRangeLength, isNumeric, function (codePoint) { - return fromCodePoint$1(Math.floor(codePoint % codePointRangeLength) + codePointRangeStart); - }) + - suffix)); - }; - var createCounterStyleFromSymbols = function (value, symbols, suffix) { - if (suffix === void 0) { suffix = '. '; } - var codePointRangeLength = symbols.length; - return (createCounterStyleWithSymbolResolver(Math.abs(value), codePointRangeLength, false, function (codePoint) { return symbols[Math.floor(codePoint % codePointRangeLength)]; }) + suffix); - }; - var CJK_ZEROS = 1 << 0; - var CJK_TEN_COEFFICIENTS = 1 << 1; - var CJK_TEN_HIGH_COEFFICIENTS = 1 << 2; - var CJK_HUNDRED_COEFFICIENTS = 1 << 3; - var createCJKCounter = function (value, numbers, multipliers, negativeSign, suffix, flags) { - if (value < -9999 || value > 9999) { - return createCounterText(value, 4 /* CJK_DECIMAL */, suffix.length > 0); - } - var tmp = Math.abs(value); - var string = suffix; - if (tmp === 0) { - return numbers[0] + string; - } - for (var digit = 0; tmp > 0 && digit <= 4; digit++) { - var coefficient = tmp % 10; - if (coefficient === 0 && contains(flags, CJK_ZEROS) && string !== '') { - string = numbers[coefficient] + string; - } - else if (coefficient > 1 || - (coefficient === 1 && digit === 0) || - (coefficient === 1 && digit === 1 && contains(flags, CJK_TEN_COEFFICIENTS)) || - (coefficient === 1 && digit === 1 && contains(flags, CJK_TEN_HIGH_COEFFICIENTS) && value > 100) || - (coefficient === 1 && digit > 1 && contains(flags, CJK_HUNDRED_COEFFICIENTS))) { - string = numbers[coefficient] + (digit > 0 ? multipliers[digit - 1] : '') + string; - } - else if (coefficient === 1 && digit > 0) { - string = multipliers[digit - 1] + string; - } - tmp = Math.floor(tmp / 10); - } - return (value < 0 ? negativeSign : '') + string; - }; - var CHINESE_INFORMAL_MULTIPLIERS = '十百千萬'; - var CHINESE_FORMAL_MULTIPLIERS = '拾佰仟萬'; - var JAPANESE_NEGATIVE = 'マイナス'; - var KOREAN_NEGATIVE = '마이너스'; - var createCounterText = function (value, type, appendSuffix) { - var defaultSuffix = appendSuffix ? '. ' : ''; - var cjkSuffix = appendSuffix ? '、' : ''; - var koreanSuffix = appendSuffix ? ', ' : ''; - var spaceSuffix = appendSuffix ? ' ' : ''; - switch (type) { - case 0 /* DISC */: - return '•' + spaceSuffix; - case 1 /* CIRCLE */: - return '◦' + spaceSuffix; - case 2 /* SQUARE */: - return '◾' + spaceSuffix; - case 5 /* DECIMAL_LEADING_ZERO */: - var string = createCounterStyleFromRange(value, 48, 57, true, defaultSuffix); - return string.length < 4 ? "0" + string : string; - case 4 /* CJK_DECIMAL */: - return createCounterStyleFromSymbols(value, '〇一二三四五六七八九', cjkSuffix); - case 6 /* LOWER_ROMAN */: - return createAdditiveCounter(value, 1, 3999, ROMAN_UPPER, 3 /* DECIMAL */, defaultSuffix).toLowerCase(); - case 7 /* UPPER_ROMAN */: - return createAdditiveCounter(value, 1, 3999, ROMAN_UPPER, 3 /* DECIMAL */, defaultSuffix); - case 8 /* LOWER_GREEK */: - return createCounterStyleFromRange(value, 945, 969, false, defaultSuffix); - case 9 /* LOWER_ALPHA */: - return createCounterStyleFromRange(value, 97, 122, false, defaultSuffix); - case 10 /* UPPER_ALPHA */: - return createCounterStyleFromRange(value, 65, 90, false, defaultSuffix); - case 11 /* ARABIC_INDIC */: - return createCounterStyleFromRange(value, 1632, 1641, true, defaultSuffix); - case 12 /* ARMENIAN */: - case 49 /* UPPER_ARMENIAN */: - return createAdditiveCounter(value, 1, 9999, ARMENIAN, 3 /* DECIMAL */, defaultSuffix); - case 35 /* LOWER_ARMENIAN */: - return createAdditiveCounter(value, 1, 9999, ARMENIAN, 3 /* DECIMAL */, defaultSuffix).toLowerCase(); - case 13 /* BENGALI */: - return createCounterStyleFromRange(value, 2534, 2543, true, defaultSuffix); - case 14 /* CAMBODIAN */: - case 30 /* KHMER */: - return createCounterStyleFromRange(value, 6112, 6121, true, defaultSuffix); - case 15 /* CJK_EARTHLY_BRANCH */: - return createCounterStyleFromSymbols(value, '子丑寅卯辰巳午未申酉戌亥', cjkSuffix); - case 16 /* CJK_HEAVENLY_STEM */: - return createCounterStyleFromSymbols(value, '甲乙丙丁戊己庚辛壬癸', cjkSuffix); - case 17 /* CJK_IDEOGRAPHIC */: - case 48 /* TRAD_CHINESE_INFORMAL */: - return createCJKCounter(value, '零一二三四五六七八九', CHINESE_INFORMAL_MULTIPLIERS, '負', cjkSuffix, CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS | CJK_HUNDRED_COEFFICIENTS); - case 47 /* TRAD_CHINESE_FORMAL */: - return createCJKCounter(value, '零壹貳參肆伍陸柒捌玖', CHINESE_FORMAL_MULTIPLIERS, '負', cjkSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS | CJK_HUNDRED_COEFFICIENTS); - case 42 /* SIMP_CHINESE_INFORMAL */: - return createCJKCounter(value, '零一二三四五六七八九', CHINESE_INFORMAL_MULTIPLIERS, '负', cjkSuffix, CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS | CJK_HUNDRED_COEFFICIENTS); - case 41 /* SIMP_CHINESE_FORMAL */: - return createCJKCounter(value, '零壹贰叁肆伍陆柒捌玖', CHINESE_FORMAL_MULTIPLIERS, '负', cjkSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS | CJK_HUNDRED_COEFFICIENTS); - case 26 /* JAPANESE_INFORMAL */: - return createCJKCounter(value, '〇一二三四五六七八九', '十百千万', JAPANESE_NEGATIVE, cjkSuffix, 0); - case 25 /* JAPANESE_FORMAL */: - return createCJKCounter(value, '零壱弐参四伍六七八九', '拾百千万', JAPANESE_NEGATIVE, cjkSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS); - case 31 /* KOREAN_HANGUL_FORMAL */: - return createCJKCounter(value, '영일이삼사오육칠팔구', '십백천만', KOREAN_NEGATIVE, koreanSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS); - case 33 /* KOREAN_HANJA_INFORMAL */: - return createCJKCounter(value, '零一二三四五六七八九', '十百千萬', KOREAN_NEGATIVE, koreanSuffix, 0); - case 32 /* KOREAN_HANJA_FORMAL */: - return createCJKCounter(value, '零壹貳參四五六七八九', '拾百千', KOREAN_NEGATIVE, koreanSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS); - case 18 /* DEVANAGARI */: - return createCounterStyleFromRange(value, 0x966, 0x96f, true, defaultSuffix); - case 20 /* GEORGIAN */: - return createAdditiveCounter(value, 1, 19999, GEORGIAN, 3 /* DECIMAL */, defaultSuffix); - case 21 /* GUJARATI */: - return createCounterStyleFromRange(value, 0xae6, 0xaef, true, defaultSuffix); - case 22 /* GURMUKHI */: - return createCounterStyleFromRange(value, 0xa66, 0xa6f, true, defaultSuffix); - case 22 /* HEBREW */: - return createAdditiveCounter(value, 1, 10999, HEBREW, 3 /* DECIMAL */, defaultSuffix); - case 23 /* HIRAGANA */: - return createCounterStyleFromSymbols(value, 'あいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわゐゑをん'); - case 24 /* HIRAGANA_IROHA */: - return createCounterStyleFromSymbols(value, 'いろはにほへとちりぬるをわかよたれそつねならむうゐのおくやまけふこえてあさきゆめみしゑひもせす'); - case 27 /* KANNADA */: - return createCounterStyleFromRange(value, 0xce6, 0xcef, true, defaultSuffix); - case 28 /* KATAKANA */: - return createCounterStyleFromSymbols(value, 'アイウエオカキクケコサシスセソタチツテトナニヌネノハヒフヘホマミムメモヤユヨラリルレロワヰヱヲン', cjkSuffix); - case 29 /* KATAKANA_IROHA */: - return createCounterStyleFromSymbols(value, 'イロハニホヘトチリヌルヲワカヨタレソツネナラムウヰノオクヤマケフコエテアサキユメミシヱヒモセス', cjkSuffix); - case 34 /* LAO */: - return createCounterStyleFromRange(value, 0xed0, 0xed9, true, defaultSuffix); - case 37 /* MONGOLIAN */: - return createCounterStyleFromRange(value, 0x1810, 0x1819, true, defaultSuffix); - case 38 /* MYANMAR */: - return createCounterStyleFromRange(value, 0x1040, 0x1049, true, defaultSuffix); - case 39 /* ORIYA */: - return createCounterStyleFromRange(value, 0xb66, 0xb6f, true, defaultSuffix); - case 40 /* PERSIAN */: - return createCounterStyleFromRange(value, 0x6f0, 0x6f9, true, defaultSuffix); - case 43 /* TAMIL */: - return createCounterStyleFromRange(value, 0xbe6, 0xbef, true, defaultSuffix); - case 44 /* TELUGU */: - return createCounterStyleFromRange(value, 0xc66, 0xc6f, true, defaultSuffix); - case 45 /* THAI */: - return createCounterStyleFromRange(value, 0xe50, 0xe59, true, defaultSuffix); - case 46 /* TIBETAN */: - return createCounterStyleFromRange(value, 0xf20, 0xf29, true, defaultSuffix); - case 3 /* DECIMAL */: - default: - return createCounterStyleFromRange(value, 48, 57, true, defaultSuffix); - } - }; - - var IGNORE_ATTRIBUTE = 'data-html2canvas-ignore'; - var DocumentCloner = /** @class */ (function () { - function DocumentCloner(context, element, options) { - this.context = context; - this.options = options; - this.scrolledElements = []; - this.referenceElement = element; - this.counters = new CounterState(); - this.quoteDepth = 0; - if (!element.ownerDocument) { - throw new Error('Cloned element does not have an owner document'); - } - this.documentElement = this.cloneNode(element.ownerDocument.documentElement, false); - } - DocumentCloner.prototype.toIFrame = function (ownerDocument, windowSize) { - var _this = this; - var iframe = createIFrameContainer(ownerDocument, windowSize); - if (!iframe.contentWindow) { - return Promise.reject("Unable to find iframe window"); - } - var scrollX = ownerDocument.defaultView.pageXOffset; - var scrollY = ownerDocument.defaultView.pageYOffset; - var cloneWindow = iframe.contentWindow; - var documentClone = cloneWindow.document; - /* Chrome doesn't detect relative background-images assigned in inline - ''' \ No newline at end of file diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/demo_toolbox.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/demo_toolbox.py deleted file mode 100644 index 7030bd5a1d57647061064aa91c734e2f496e9b83..0000000000000000000000000000000000000000 --- a/spaces/kira4424/Tacotron-zero-short-voice-clone/demo_toolbox.py +++ /dev/null @@ -1,49 +0,0 @@ -from pathlib import Path -from toolbox import Toolbox -from utils.argutils import print_args -from utils.modelutils import check_model_paths -import argparse -import os - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="Runs the toolbox", - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - parser.add_argument("-d", "--datasets_root", type=Path, help= \ - "Path to the directory containing your datasets. See toolbox/__init__.py for a list of " - "supported datasets.", default=None) - parser.add_argument("-vc", "--vc_mode", action="store_true", - help="Voice Conversion Mode(PPG based)") - parser.add_argument("-e", "--enc_models_dir", type=Path, default="encoder/saved_models", - help="Directory containing saved encoder models") - parser.add_argument("-s", "--syn_models_dir", type=Path, default="synthesizer/saved_models", - help="Directory containing saved synthesizer models") - parser.add_argument("-v", "--voc_models_dir", type=Path, default="vocoder/saved_models", - help="Directory containing saved vocoder models") - parser.add_argument("-ex", "--extractor_models_dir", type=Path, default="ppg_extractor/saved_models", - help="Directory containing saved extrator models") - parser.add_argument("-cv", "--convertor_models_dir", type=Path, default="ppg2mel/saved_models", - help="Directory containing saved convert models") - parser.add_argument("--cpu", action="store_true", help=\ - "If True, processing is done on CPU, even when a GPU is available.") - parser.add_argument("--seed", type=int, default=None, help=\ - "Optional random number seed value to make toolbox deterministic.") - parser.add_argument("--no_mp3_support", action="store_true", help=\ - "If True, no mp3 files are allowed.") - args = parser.parse_args() - print_args(args, parser) - - if args.cpu: - # Hide GPUs from Pytorch to force CPU processing - os.environ["CUDA_VISIBLE_DEVICES"] = "" - del args.cpu - - ## Remind the user to download pretrained models if needed - check_model_paths(encoder_path=args.enc_models_dir, synthesizer_path=args.syn_models_dir, - vocoder_path=args.voc_models_dir) - - # Launch the toolbox - Toolbox(**vars(args)) diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/encoder/params_model.py b/spaces/kira4424/Tacotron-zero-short-voice-clone/encoder/params_model.py deleted file mode 100644 index 3e356472fb5a27f370cb3920976a11d12a76c1b7..0000000000000000000000000000000000000000 --- a/spaces/kira4424/Tacotron-zero-short-voice-clone/encoder/params_model.py +++ /dev/null @@ -1,11 +0,0 @@ - -## Model parameters -model_hidden_size = 256 -model_embedding_size = 256 -model_num_layers = 3 - - -## Training parameters -learning_rate_init = 1e-4 -speakers_per_batch = 64 -utterances_per_speaker = 10 diff --git a/spaces/kirch/Text2Video-Zero/annotator/openpose/__init__.py b/spaces/kirch/Text2Video-Zero/annotator/openpose/__init__.py deleted file mode 100644 index 8c26f1b37dae854f51da938da2fa67a8ef48ce5a..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/openpose/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" - -import torch -import numpy as np -from . import util -from .body import Body -from .hand import Hand -from annotator.util import annotator_ckpts_path - - -body_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/body_pose_model.pth" -hand_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/hand_pose_model.pth" - - -class OpenposeDetector: - def __init__(self): - body_modelpath = os.path.join(annotator_ckpts_path, "body_pose_model.pth") - hand_modelpath = os.path.join(annotator_ckpts_path, "hand_pose_model.pth") - - if not os.path.exists(hand_modelpath): - from basicsr.utils.download_util import load_file_from_url - load_file_from_url(body_model_path, model_dir=annotator_ckpts_path) - load_file_from_url(hand_model_path, model_dir=annotator_ckpts_path) - - self.body_estimation = Body(body_modelpath) - self.hand_estimation = Hand(hand_modelpath) - - def __call__(self, oriImg, hand=False): - oriImg = oriImg[:, :, ::-1].copy() - with torch.no_grad(): - candidate, subset = self.body_estimation(oriImg) - canvas = np.zeros_like(oriImg) - canvas = util.draw_bodypose(canvas, candidate, subset) - if hand: - hands_list = util.handDetect(candidate, subset, oriImg) - all_hand_peaks = [] - for x, y, w, is_left in hands_list: - peaks = self.hand_estimation(oriImg[y:y+w, x:x+w, :]) - peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x) - peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y) - all_hand_peaks.append(peaks) - canvas = util.draw_handpose(canvas, all_hand_peaks) - return canvas, dict(candidate=candidate.tolist(), subset=subset.tolist()) diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/utils/ext_loader.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/utils/ext_loader.py deleted file mode 100644 index 08132d2c1b9a1c28880e4bab4d4fa1ba39d9d083..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/utils/ext_loader.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import importlib -import os -import pkgutil -import warnings -from collections import namedtuple - -import torch - -if torch.__version__ != 'parrots': - - def load_ext(name, funcs): - ext = importlib.import_module('mmcv.' + name) - for fun in funcs: - assert hasattr(ext, fun), f'{fun} miss in module {name}' - return ext -else: - from parrots import extension - from parrots.base import ParrotsException - - has_return_value_ops = [ - 'nms', - 'softnms', - 'nms_match', - 'nms_rotated', - 'top_pool_forward', - 'top_pool_backward', - 'bottom_pool_forward', - 'bottom_pool_backward', - 'left_pool_forward', - 'left_pool_backward', - 'right_pool_forward', - 'right_pool_backward', - 'fused_bias_leakyrelu', - 'upfirdn2d', - 'ms_deform_attn_forward', - 'pixel_group', - 'contour_expand', - ] - - def get_fake_func(name, e): - - def fake_func(*args, **kwargs): - warnings.warn(f'{name} is not supported in parrots now') - raise e - - return fake_func - - def load_ext(name, funcs): - ExtModule = namedtuple('ExtModule', funcs) - ext_list = [] - lib_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - for fun in funcs: - try: - ext_fun = extension.load(fun, name, lib_dir=lib_root) - except ParrotsException as e: - if 'No element registered' not in e.message: - warnings.warn(e.message) - ext_fun = get_fake_func(fun, e) - ext_list.append(ext_fun) - else: - if fun in has_return_value_ops: - ext_list.append(ext_fun.op) - else: - ext_list.append(ext_fun.op_) - return ExtModule(*ext_list) - - -def check_ops_exist(): - ext_loader = pkgutil.find_loader('mmcv._ext') - return ext_loader is not None diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/core/utils/__init__.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/core/utils/__init__.py deleted file mode 100644 index f2678b321c295bcceaef945111ac3524be19d6e4..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/core/utils/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .misc import add_prefix - -__all__ = ['add_prefix'] diff --git a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/visualizers/directory.py b/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/visualizers/directory.py deleted file mode 100644 index bc42e00500c7a5b70b2cef83b03e45b5bb471ff8..0000000000000000000000000000000000000000 --- a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/visualizers/directory.py +++ /dev/null @@ -1,36 +0,0 @@ -import os - -import cv2 -import numpy as np - -from saicinpainting.training.visualizers.base import BaseVisualizer, visualize_mask_and_images_batch -from saicinpainting.utils import check_and_warn_input_range - - -class DirectoryVisualizer(BaseVisualizer): - DEFAULT_KEY_ORDER = 'image predicted_image inpainted'.split(' ') - - def __init__(self, outdir, key_order=DEFAULT_KEY_ORDER, max_items_in_batch=10, - last_without_mask=True, rescale_keys=None): - self.outdir = outdir - os.makedirs(self.outdir, exist_ok=True) - self.key_order = key_order - self.max_items_in_batch = max_items_in_batch - self.last_without_mask = last_without_mask - self.rescale_keys = rescale_keys - - def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None): - check_and_warn_input_range(batch['image'], 0, 1, 'DirectoryVisualizer target image') - vis_img = visualize_mask_and_images_batch(batch, self.key_order, max_items=self.max_items_in_batch, - last_without_mask=self.last_without_mask, - rescale_keys=self.rescale_keys) - - vis_img = np.clip(vis_img * 255, 0, 255).astype('uint8') - - curoutdir = os.path.join(self.outdir, f'epoch{epoch_i:04d}{suffix}') - os.makedirs(curoutdir, exist_ok=True) - rank_suffix = f'_r{rank}' if rank is not None else '' - out_fname = os.path.join(curoutdir, f'batch{batch_i:07d}{rank_suffix}.jpg') - - vis_img = cv2.cvtColor(vis_img, cv2.COLOR_RGB2BGR) - cv2.imwrite(out_fname, vis_img) diff --git a/spaces/kukuhtw/VToonify/vtoonify/model/simple_augment.py b/spaces/kukuhtw/VToonify/vtoonify/model/simple_augment.py deleted file mode 100644 index 515d272734e4d10d346461965099a86e53f58701..0000000000000000000000000000000000000000 --- a/spaces/kukuhtw/VToonify/vtoonify/model/simple_augment.py +++ /dev/null @@ -1,468 +0,0 @@ -# almost the same as model.stylegan.non_leaking -# we only modify the parameters in sample_affine() to make the transformations mild - -import math - -import torch -from torch import autograd -from torch.nn import functional as F -import numpy as np - -from model.stylegan.distributed import reduce_sum -from model.stylegan.op import upfirdn2d - - -class AdaptiveAugment: - def __init__(self, ada_aug_target, ada_aug_len, update_every, device): - self.ada_aug_target = ada_aug_target - self.ada_aug_len = ada_aug_len - self.update_every = update_every - - self.ada_update = 0 - self.ada_aug_buf = torch.tensor([0.0, 0.0], device=device) - self.r_t_stat = 0 - self.ada_aug_p = 0 - - @torch.no_grad() - def tune(self, real_pred): - self.ada_aug_buf += torch.tensor( - (torch.sign(real_pred).sum().item(), real_pred.shape[0]), - device=real_pred.device, - ) - self.ada_update += 1 - - if self.ada_update % self.update_every == 0: - self.ada_aug_buf = reduce_sum(self.ada_aug_buf) - pred_signs, n_pred = self.ada_aug_buf.tolist() - - self.r_t_stat = pred_signs / n_pred - - if self.r_t_stat > self.ada_aug_target: - sign = 1 - - else: - sign = -1 - - self.ada_aug_p += sign * n_pred / self.ada_aug_len - self.ada_aug_p = min(1, max(0, self.ada_aug_p)) - self.ada_aug_buf.mul_(0) - self.ada_update = 0 - - return self.ada_aug_p - - -SYM6 = ( - 0.015404109327027373, - 0.0034907120842174702, - -0.11799011114819057, - -0.048311742585633, - 0.4910559419267466, - 0.787641141030194, - 0.3379294217276218, - -0.07263752278646252, - -0.021060292512300564, - 0.04472490177066578, - 0.0017677118642428036, - -0.007800708325034148, -) - - -def translate_mat(t_x, t_y, device="cpu"): - batch = t_x.shape[0] - - mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1) - translate = torch.stack((t_x, t_y), 1) - mat[:, :2, 2] = translate - - return mat - - -def rotate_mat(theta, device="cpu"): - batch = theta.shape[0] - - mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1) - sin_t = torch.sin(theta) - cos_t = torch.cos(theta) - rot = torch.stack((cos_t, -sin_t, sin_t, cos_t), 1).view(batch, 2, 2) - mat[:, :2, :2] = rot - - return mat - - -def scale_mat(s_x, s_y, device="cpu"): - batch = s_x.shape[0] - - mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1) - mat[:, 0, 0] = s_x - mat[:, 1, 1] = s_y - - return mat - - -def translate3d_mat(t_x, t_y, t_z): - batch = t_x.shape[0] - - mat = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) - translate = torch.stack((t_x, t_y, t_z), 1) - mat[:, :3, 3] = translate - - return mat - - -def rotate3d_mat(axis, theta): - batch = theta.shape[0] - - u_x, u_y, u_z = axis - - eye = torch.eye(3).unsqueeze(0) - cross = torch.tensor([(0, -u_z, u_y), (u_z, 0, -u_x), (-u_y, u_x, 0)]).unsqueeze(0) - outer = torch.tensor(axis) - outer = (outer.unsqueeze(1) * outer).unsqueeze(0) - - sin_t = torch.sin(theta).view(-1, 1, 1) - cos_t = torch.cos(theta).view(-1, 1, 1) - - rot = cos_t * eye + sin_t * cross + (1 - cos_t) * outer - - eye_4 = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) - eye_4[:, :3, :3] = rot - - return eye_4 - - -def scale3d_mat(s_x, s_y, s_z): - batch = s_x.shape[0] - - mat = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) - mat[:, 0, 0] = s_x - mat[:, 1, 1] = s_y - mat[:, 2, 2] = s_z - - return mat - - -def luma_flip_mat(axis, i): - batch = i.shape[0] - - eye = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) - axis = torch.tensor(axis + (0,)) - flip = 2 * torch.ger(axis, axis) * i.view(-1, 1, 1) - - return eye - flip - - -def saturation_mat(axis, i): - batch = i.shape[0] - - eye = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1) - axis = torch.tensor(axis + (0,)) - axis = torch.ger(axis, axis) - saturate = axis + (eye - axis) * i.view(-1, 1, 1) - - return saturate - - -def lognormal_sample(size, mean=0, std=1, device="cpu"): - return torch.empty(size, device=device).log_normal_(mean=mean, std=std) - - -def category_sample(size, categories, device="cpu"): - category = torch.tensor(categories, device=device) - sample = torch.randint(high=len(categories), size=(size,), device=device) - - return category[sample] - - -def uniform_sample(size, low, high, device="cpu"): - return torch.empty(size, device=device).uniform_(low, high) - - -def normal_sample(size, mean=0, std=1, device="cpu"): - return torch.empty(size, device=device).normal_(mean, std) - - -def bernoulli_sample(size, p, device="cpu"): - return torch.empty(size, device=device).bernoulli_(p) - - -def random_mat_apply(p, transform, prev, eye, device="cpu"): - size = transform.shape[0] - select = bernoulli_sample(size, p, device=device).view(size, 1, 1) - select_transform = select * transform + (1 - select) * eye - - return select_transform @ prev - - -def sample_affine(p, size, height, width, device="cpu"): - G = torch.eye(3, device=device).unsqueeze(0).repeat(size, 1, 1) - eye = G - - # flip - param = category_sample(size, (0, 1)) - Gc = scale_mat(1 - 2.0 * param, torch.ones(size), device=device) - G = random_mat_apply(p, Gc, G, eye, device=device) - # print('flip', G, scale_mat(1 - 2.0 * param, torch.ones(size)), sep='\n') - - # 90 rotate - #param = category_sample(size, (0, 3)) - #Gc = rotate_mat(-math.pi / 2 * param, device=device) - #G = random_mat_apply(p, Gc, G, eye, device=device) - # print('90 rotate', G, rotate_mat(-math.pi / 2 * param), sep='\n') - - # integer translate - param = uniform_sample(size, -0.125, 0.125) - param_height = torch.round(param * height) / height - param_width = torch.round(param * width) / width - Gc = translate_mat(param_width, param_height, device=device) - G = random_mat_apply(p, Gc, G, eye, device=device) - # print('integer translate', G, translate_mat(param_width, param_height), sep='\n') - - # isotropic scale - param = lognormal_sample(size, std=0.1 * math.log(2)) - Gc = scale_mat(param, param, device=device) - G = random_mat_apply(p, Gc, G, eye, device=device) - # print('isotropic scale', G, scale_mat(param, param), sep='\n') - - p_rot = 1 - math.sqrt(1 - p) - - # pre-rotate - param = uniform_sample(size, -math.pi * 0.25, math.pi * 0.25) - Gc = rotate_mat(-param, device=device) - G = random_mat_apply(p_rot, Gc, G, eye, device=device) - # print('pre-rotate', G, rotate_mat(-param), sep='\n') - - # anisotropic scale - param = lognormal_sample(size, std=0.1 * math.log(2)) - Gc = scale_mat(param, 1 / param, device=device) - G = random_mat_apply(p, Gc, G, eye, device=device) - # print('anisotropic scale', G, scale_mat(param, 1 / param), sep='\n') - - # post-rotate - param = uniform_sample(size, -math.pi * 0.25, math.pi * 0.25) - Gc = rotate_mat(-param, device=device) - G = random_mat_apply(p_rot, Gc, G, eye, device=device) - # print('post-rotate', G, rotate_mat(-param), sep='\n') - - # fractional translate - param = normal_sample(size, std=0.125) - Gc = translate_mat(param, param, device=device) - G = random_mat_apply(p, Gc, G, eye, device=device) - # print('fractional translate', G, translate_mat(param, param), sep='\n') - - return G - - -def sample_color(p, size): - C = torch.eye(4).unsqueeze(0).repeat(size, 1, 1) - eye = C - axis_val = 1 / math.sqrt(3) - axis = (axis_val, axis_val, axis_val) - - # brightness - param = normal_sample(size, std=0.2) - Cc = translate3d_mat(param, param, param) - C = random_mat_apply(p, Cc, C, eye) - - # contrast - param = lognormal_sample(size, std=0.5 * math.log(2)) - Cc = scale3d_mat(param, param, param) - C = random_mat_apply(p, Cc, C, eye) - - # luma flip - param = category_sample(size, (0, 1)) - Cc = luma_flip_mat(axis, param) - C = random_mat_apply(p, Cc, C, eye) - - # hue rotation - param = uniform_sample(size, -math.pi, math.pi) - Cc = rotate3d_mat(axis, param) - C = random_mat_apply(p, Cc, C, eye) - - # saturation - param = lognormal_sample(size, std=1 * math.log(2)) - Cc = saturation_mat(axis, param) - C = random_mat_apply(p, Cc, C, eye) - - return C - - -def make_grid(shape, x0, x1, y0, y1, device): - n, c, h, w = shape - grid = torch.empty(n, h, w, 3, device=device) - grid[:, :, :, 0] = torch.linspace(x0, x1, w, device=device) - grid[:, :, :, 1] = torch.linspace(y0, y1, h, device=device).unsqueeze(-1) - grid[:, :, :, 2] = 1 - - return grid - - -def affine_grid(grid, mat): - n, h, w, _ = grid.shape - return (grid.view(n, h * w, 3) @ mat.transpose(1, 2)).view(n, h, w, 2) - - -def get_padding(G, height, width, kernel_size): - device = G.device - - cx = (width - 1) / 2 - cy = (height - 1) / 2 - cp = torch.tensor( - [(-cx, -cy, 1), (cx, -cy, 1), (cx, cy, 1), (-cx, cy, 1)], device=device - ) - cp = G @ cp.T - - pad_k = kernel_size // 4 - - pad = cp[:, :2, :].permute(1, 0, 2).flatten(1) - pad = torch.cat((-pad, pad)).max(1).values - pad = pad + torch.tensor([pad_k * 2 - cx, pad_k * 2 - cy] * 2, device=device) - pad = pad.max(torch.tensor([0, 0] * 2, device=device)) - pad = pad.min(torch.tensor([width - 1, height - 1] * 2, device=device)) - - pad_x1, pad_y1, pad_x2, pad_y2 = pad.ceil().to(torch.int32) - - return pad_x1, pad_x2, pad_y1, pad_y2 - - -def try_sample_affine_and_pad(img, p, kernel_size, G=None): - batch, _, height, width = img.shape - - G_try = G - - if G is None: - G_try = torch.inverse(sample_affine(p, batch, height, width)) - - pad_x1, pad_x2, pad_y1, pad_y2 = get_padding(G_try, height, width, kernel_size) - - img_pad = F.pad(img, (pad_x1, pad_x2, pad_y1, pad_y2), mode="reflect") - - return img_pad, G_try, (pad_x1, pad_x2, pad_y1, pad_y2) - - -class GridSampleForward(autograd.Function): - @staticmethod - def forward(ctx, input, grid): - out = F.grid_sample( - input, grid, mode="bilinear", padding_mode="zeros", align_corners=False - ) - ctx.save_for_backward(input, grid) - - return out - - @staticmethod - def backward(ctx, grad_output): - input, grid = ctx.saved_tensors - grad_input, grad_grid = GridSampleBackward.apply(grad_output, input, grid) - - return grad_input, grad_grid - - -class GridSampleBackward(autograd.Function): - @staticmethod - def forward(ctx, grad_output, input, grid): - op = torch._C._jit_get_operation("aten::grid_sampler_2d_backward") - grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) - ctx.save_for_backward(grid) - - return grad_input, grad_grid - - @staticmethod - def backward(ctx, grad_grad_input, grad_grad_grid): - grid, = ctx.saved_tensors - grad_grad_output = None - - if ctx.needs_input_grad[0]: - grad_grad_output = GridSampleForward.apply(grad_grad_input, grid) - - return grad_grad_output, None, None - - -grid_sample = GridSampleForward.apply - - -def scale_mat_single(s_x, s_y): - return torch.tensor(((s_x, 0, 0), (0, s_y, 0), (0, 0, 1)), dtype=torch.float32) - - -def translate_mat_single(t_x, t_y): - return torch.tensor(((1, 0, t_x), (0, 1, t_y), (0, 0, 1)), dtype=torch.float32) - - -def random_apply_affine(img, p, G=None, antialiasing_kernel=SYM6): - kernel = antialiasing_kernel - len_k = len(kernel) - - kernel = torch.as_tensor(kernel).to(img) - # kernel = torch.ger(kernel, kernel).to(img) - kernel_flip = torch.flip(kernel, (0,)) - - img_pad, G, (pad_x1, pad_x2, pad_y1, pad_y2) = try_sample_affine_and_pad( - img, p, len_k, G - ) - - G_inv = ( - translate_mat_single((pad_x1 - pad_x2).item() / 2, (pad_y1 - pad_y2).item() / 2) - @ G - ) - up_pad = ( - (len_k + 2 - 1) // 2, - (len_k - 2) // 2, - (len_k + 2 - 1) // 2, - (len_k - 2) // 2, - ) - img_2x = upfirdn2d(img_pad, kernel.unsqueeze(0), up=(2, 1), pad=(*up_pad[:2], 0, 0)) - img_2x = upfirdn2d(img_2x, kernel.unsqueeze(1), up=(1, 2), pad=(0, 0, *up_pad[2:])) - G_inv = scale_mat_single(2, 2) @ G_inv @ scale_mat_single(1 / 2, 1 / 2) - G_inv = translate_mat_single(-0.5, -0.5) @ G_inv @ translate_mat_single(0.5, 0.5) - batch_size, channel, height, width = img.shape - pad_k = len_k // 4 - shape = (batch_size, channel, (height + pad_k * 2) * 2, (width + pad_k * 2) * 2) - G_inv = ( - scale_mat_single(2 / img_2x.shape[3], 2 / img_2x.shape[2]) - @ G_inv - @ scale_mat_single(1 / (2 / shape[3]), 1 / (2 / shape[2])) - ) - grid = F.affine_grid(G_inv[:, :2, :].to(img_2x), shape, align_corners=False) - img_affine = grid_sample(img_2x, grid) - d_p = -pad_k * 2 - down_pad = ( - d_p + (len_k - 2 + 1) // 2, - d_p + (len_k - 2) // 2, - d_p + (len_k - 2 + 1) // 2, - d_p + (len_k - 2) // 2, - ) - img_down = upfirdn2d( - img_affine, kernel_flip.unsqueeze(0), down=(2, 1), pad=(*down_pad[:2], 0, 0) - ) - img_down = upfirdn2d( - img_down, kernel_flip.unsqueeze(1), down=(1, 2), pad=(0, 0, *down_pad[2:]) - ) - - return img_down, G - - -def apply_color(img, mat): - batch = img.shape[0] - img = img.permute(0, 2, 3, 1) - mat_mul = mat[:, :3, :3].transpose(1, 2).view(batch, 1, 3, 3) - mat_add = mat[:, :3, 3].view(batch, 1, 1, 3) - img = img @ mat_mul + mat_add - img = img.permute(0, 3, 1, 2) - - return img - - -def random_apply_color(img, p, C=None): - if C is None: - C = sample_color(p, img.shape[0]) - - img = apply_color(img, C.to(img)) - - return img, C - - -def augment(img, p, transform_matrix=(None, None)): - img, G = random_apply_affine(img, p, transform_matrix[0]) - img, C = random_apply_color(img, p, transform_matrix[1]) - - return img, (G, C) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/_deprecate.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/_deprecate.py deleted file mode 100644 index 81f2189dcfcb789861c5054dac0838fca01a28bf..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/PIL/_deprecate.py +++ /dev/null @@ -1,71 +0,0 @@ -from __future__ import annotations - -import warnings - -from . import __version__ - - -def deprecate( - deprecated: str, - when: int | None, - replacement: str | None = None, - *, - action: str | None = None, - plural: bool = False, -) -> None: - """ - Deprecations helper. - - :param deprecated: Name of thing to be deprecated. - :param when: Pillow major version to be removed in. - :param replacement: Name of replacement. - :param action: Instead of "replacement", give a custom call to action - e.g. "Upgrade to new thing". - :param plural: if the deprecated thing is plural, needing "are" instead of "is". - - Usually of the form: - - "[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd). - Use [replacement] instead." - - You can leave out the replacement sentence: - - "[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd)" - - Or with another call to action: - - "[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd). - [action]." - """ - - is_ = "are" if plural else "is" - - if when is None: - removed = "a future version" - elif when <= int(__version__.split(".")[0]): - msg = f"{deprecated} {is_} deprecated and should be removed." - raise RuntimeError(msg) - elif when == 10: - removed = "Pillow 10 (2023-07-01)" - elif when == 11: - removed = "Pillow 11 (2024-10-15)" - else: - msg = f"Unknown removal version: {when}. Update {__name__}?" - raise ValueError(msg) - - if replacement and action: - msg = "Use only one of 'replacement' and 'action'" - raise ValueError(msg) - - if replacement: - action = f". Use {replacement} instead." - elif action: - action = f". {action.rstrip('.')}." - else: - action = "" - - warnings.warn( - f"{deprecated} {is_} deprecated and will be removed in {removed}{action}", - DeprecationWarning, - stacklevel=3, - ) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/anyio/_core/_eventloop.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/anyio/_core/_eventloop.py deleted file mode 100644 index ae9864851baee17613175361a9983f6756a2b0d1..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/anyio/_core/_eventloop.py +++ /dev/null @@ -1,153 +0,0 @@ -from __future__ import annotations - -import math -import sys -import threading -from contextlib import contextmanager -from importlib import import_module -from typing import ( - Any, - Awaitable, - Callable, - Generator, - TypeVar, -) - -import sniffio - -# This must be updated when new backends are introduced -from ._compat import DeprecatedAwaitableFloat - -BACKENDS = "asyncio", "trio" - -T_Retval = TypeVar("T_Retval") -threadlocals = threading.local() - - -def run( - func: Callable[..., Awaitable[T_Retval]], - *args: object, - backend: str = "asyncio", - backend_options: dict[str, Any] | None = None, -) -> T_Retval: - """ - Run the given coroutine function in an asynchronous event loop. - - The current thread must not be already running an event loop. - - :param func: a coroutine function - :param args: positional arguments to ``func`` - :param backend: name of the asynchronous event loop implementation – currently either - ``asyncio`` or ``trio`` - :param backend_options: keyword arguments to call the backend ``run()`` implementation with - (documented :ref:`here `) - :return: the return value of the coroutine function - :raises RuntimeError: if an asynchronous event loop is already running in this thread - :raises LookupError: if the named backend is not found - - """ - try: - asynclib_name = sniffio.current_async_library() - except sniffio.AsyncLibraryNotFoundError: - pass - else: - raise RuntimeError(f"Already running {asynclib_name} in this thread") - - try: - asynclib = import_module(f"..._backends._{backend}", package=__name__) - except ImportError as exc: - raise LookupError(f"No such backend: {backend}") from exc - - token = None - if sniffio.current_async_library_cvar.get(None) is None: - # Since we're in control of the event loop, we can cache the name of the async library - token = sniffio.current_async_library_cvar.set(backend) - - try: - backend_options = backend_options or {} - return asynclib.run(func, *args, **backend_options) - finally: - if token: - sniffio.current_async_library_cvar.reset(token) - - -async def sleep(delay: float) -> None: - """ - Pause the current task for the specified duration. - - :param delay: the duration, in seconds - - """ - return await get_asynclib().sleep(delay) - - -async def sleep_forever() -> None: - """ - Pause the current task until it's cancelled. - - This is a shortcut for ``sleep(math.inf)``. - - .. versionadded:: 3.1 - - """ - await sleep(math.inf) - - -async def sleep_until(deadline: float) -> None: - """ - Pause the current task until the given time. - - :param deadline: the absolute time to wake up at (according to the internal monotonic clock of - the event loop) - - .. versionadded:: 3.1 - - """ - now = current_time() - await sleep(max(deadline - now, 0)) - - -def current_time() -> DeprecatedAwaitableFloat: - """ - Return the current value of the event loop's internal clock. - - :return: the clock value (seconds) - - """ - return DeprecatedAwaitableFloat(get_asynclib().current_time(), current_time) - - -def get_all_backends() -> tuple[str, ...]: - """Return a tuple of the names of all built-in backends.""" - return BACKENDS - - -def get_cancelled_exc_class() -> type[BaseException]: - """Return the current async library's cancellation exception class.""" - return get_asynclib().CancelledError - - -# -# Private API -# - - -@contextmanager -def claim_worker_thread(backend: str) -> Generator[Any, None, None]: - module = sys.modules["anyio._backends._" + backend] - threadlocals.current_async_module = module - try: - yield - finally: - del threadlocals.current_async_module - - -def get_asynclib(asynclib_name: str | None = None) -> Any: - if asynclib_name is None: - asynclib_name = sniffio.current_async_library() - - modulename = "anyio._backends._" + asynclib_name - try: - return sys.modules[modulename] - except KeyError: - return import_module(modulename) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/functorch/_src/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/functorch/_src/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/lambdalabs/LambdaSuperRes/KAIR/main_download_pretrained_models.py b/spaces/lambdalabs/LambdaSuperRes/KAIR/main_download_pretrained_models.py deleted file mode 100644 index 0205359857df34981746b8c35025a7fad2152123..0000000000000000000000000000000000000000 --- a/spaces/lambdalabs/LambdaSuperRes/KAIR/main_download_pretrained_models.py +++ /dev/null @@ -1,141 +0,0 @@ -import argparse -import os -import requests -import re - - -""" -How to use: -download all the models: - python main_download_pretrained_models.py --models "all" --model_dir "model_zoo" - -download DnCNN models: - python main_download_pretrained_models.py --models "DnCNN" --model_dir "model_zoo" - -download SRMD models: - python main_download_pretrained_models.py --models "SRMD" --model_dir "model_zoo" - -download BSRGAN models: - python main_download_pretrained_models.py --models "BSRGAN" --model_dir "model_zoo" - -download FFDNet models: - python main_download_pretrained_models.py --models "FFDNet" --model_dir "model_zoo" - -download DPSR models: - python main_download_pretrained_models.py --models "DPSR" --model_dir "model_zoo" - -download SwinIR models: - python main_download_pretrained_models.py --models "SwinIR" --model_dir "model_zoo" - -download VRT models: - python main_download_pretrained_models.py --models "VRT" --model_dir "model_zoo" - -download other models: - python main_download_pretrained_models.py --models "others" --model_dir "model_zoo" - ------------------------------------------------------------------- - -download 'dncnn_15.pth' and 'dncnn_50.pth' - python main_download_pretrained_models.py --models "dncnn_15.pth dncnn_50.pth" --model_dir "model_zoo" - ------------------------------------------------------------------- - -download DnCNN models and 'BSRGAN.pth' - python main_download_pretrained_models.py --models "DnCNN BSRGAN.pth" --model_dir "model_zoo" - -""" - - -def download_pretrained_model(model_dir='model_zoo', model_name='dncnn3.pth'): - if os.path.exists(os.path.join(model_dir, model_name)): - print(f'already exists, skip downloading [{model_name}]') - else: - os.makedirs(model_dir, exist_ok=True) - if 'SwinIR' in model_name: - url = 'https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/{}'.format(model_name) - elif 'VRT' in model_name: - url = 'https://github.com/JingyunLiang/VRT/releases/download/v0.0/{}'.format(model_name) - else: - url = 'https://github.com/cszn/KAIR/releases/download/v1.0/{}'.format(model_name) - r = requests.get(url, allow_redirects=True) - print(f'downloading [{model_dir}/{model_name}] ...') - open(os.path.join(model_dir, model_name), 'wb').write(r.content) - print('done!') - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--models', - type=lambda s: re.split(' |, ', s), - default = "dncnn3.pth", - help='comma or space delimited list of characters, e.g., "DnCNN", "DnCNN BSRGAN.pth", "dncnn_15.pth dncnn_50.pth"') - parser.add_argument('--model_dir', type=str, default='model_zoo', help='path of model_zoo') - args = parser.parse_args() - - print(f'trying to download {args.models}') - - method_model_zoo = {'DnCNN': ['dncnn_15.pth', 'dncnn_25.pth', 'dncnn_50.pth', 'dncnn3.pth', 'dncnn_color_blind.pth', 'dncnn_gray_blind.pth'], - 'SRMD': ['srmdnf_x2.pth', 'srmdnf_x3.pth', 'srmdnf_x4.pth', 'srmd_x2.pth', 'srmd_x3.pth', 'srmd_x4.pth'], - 'DPSR': ['dpsr_x2.pth', 'dpsr_x3.pth', 'dpsr_x4.pth', 'dpsr_x4_gan.pth'], - 'FFDNet': ['ffdnet_color.pth', 'ffdnet_gray.pth', 'ffdnet_color_clip.pth', 'ffdnet_gray_clip.pth'], - 'USRNet': ['usrgan.pth', 'usrgan_tiny.pth', 'usrnet.pth', 'usrnet_tiny.pth'], - 'DPIR': ['drunet_gray.pth', 'drunet_color.pth', 'drunet_deblocking_color.pth', 'drunet_deblocking_grayscale.pth'], - 'BSRGAN': ['BSRGAN.pth', 'BSRNet.pth', 'BSRGANx2.pth'], - 'IRCNN': ['ircnn_color.pth', 'ircnn_gray.pth'], - 'SwinIR': ['001_classicalSR_DF2K_s64w8_SwinIR-M_x2.pth', '001_classicalSR_DF2K_s64w8_SwinIR-M_x3.pth', - '001_classicalSR_DF2K_s64w8_SwinIR-M_x4.pth', '001_classicalSR_DF2K_s64w8_SwinIR-M_x8.pth', - '001_classicalSR_DIV2K_s48w8_SwinIR-M_x2.pth', '001_classicalSR_DIV2K_s48w8_SwinIR-M_x3.pth', - '001_classicalSR_DIV2K_s48w8_SwinIR-M_x4.pth', '001_classicalSR_DIV2K_s48w8_SwinIR-M_x8.pth', - '002_lightweightSR_DIV2K_s64w8_SwinIR-S_x2.pth', '002_lightweightSR_DIV2K_s64w8_SwinIR-S_x3.pth', - '002_lightweightSR_DIV2K_s64w8_SwinIR-S_x4.pth', '003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_GAN.pth', - '003_realSR_BSRGAN_DFO_s64w8_SwinIR-M_x4_PSNR.pth', '004_grayDN_DFWB_s128w8_SwinIR-M_noise15.pth', - '004_grayDN_DFWB_s128w8_SwinIR-M_noise25.pth', '004_grayDN_DFWB_s128w8_SwinIR-M_noise50.pth', - '005_colorDN_DFWB_s128w8_SwinIR-M_noise15.pth', '005_colorDN_DFWB_s128w8_SwinIR-M_noise25.pth', - '005_colorDN_DFWB_s128w8_SwinIR-M_noise50.pth', '006_CAR_DFWB_s126w7_SwinIR-M_jpeg10.pth', - '006_CAR_DFWB_s126w7_SwinIR-M_jpeg20.pth', '006_CAR_DFWB_s126w7_SwinIR-M_jpeg30.pth', - '006_CAR_DFWB_s126w7_SwinIR-M_jpeg40.pth'], - 'VRT': ['001_VRT_videosr_bi_REDS_6frames.pth', '002_VRT_videosr_bi_REDS_16frames.pth', - '003_VRT_videosr_bi_Vimeo_7frames.pth', '004_VRT_videosr_bd_Vimeo_7frames.pth', - '005_VRT_videodeblurring_DVD.pth', '006_VRT_videodeblurring_GoPro.pth', - '007_VRT_videodeblurring_REDS.pth', '008_VRT_videodenoising_DAVIS.pth'], - 'others': ['msrresnet_x4_psnr.pth', 'msrresnet_x4_gan.pth', 'imdn_x4.pth', 'RRDB.pth', 'ESRGAN.pth', - 'FSSR_DPED.pth', 'FSSR_JPEG.pth', 'RealSR_DPED.pth', 'RealSR_JPEG.pth'] - } - - method_zoo = list(method_model_zoo.keys()) - model_zoo = [] - for b in list(method_model_zoo.values()): - model_zoo += b - - if 'all' in args.models: - for method in method_zoo: - for model_name in method_model_zoo[method]: - download_pretrained_model(args.model_dir, model_name) - else: - for method_model in args.models: - if method_model in method_zoo: # method, need for loop - for model_name in method_model_zoo[method_model]: - if 'SwinIR' in model_name: - download_pretrained_model(os.path.join(args.model_dir, 'swinir'), model_name) - elif 'VRT' in model_name: - download_pretrained_model(os.path.join(args.model_dir, 'vrt'), model_name) - else: - download_pretrained_model(args.model_dir, model_name) - elif method_model in model_zoo: # model, do not need for loop - if 'SwinIR' in method_model: - download_pretrained_model(os.path.join(args.model_dir, 'swinir'), method_model) - elif 'VRT' in method_model: - download_pretrained_model(os.path.join(args.model_dir, 'vrt'), method_model) - else: - download_pretrained_model(args.model_dir, method_model) - else: - print(f'Do not find {method_model} from the pre-trained model zoo!') - - - - - - - - - diff --git a/spaces/leogabraneth/text-generation-webui-main/api-examples/api-example-chat.py b/spaces/leogabraneth/text-generation-webui-main/api-examples/api-example-chat.py deleted file mode 100644 index 0f7a44aa79fae6c8697f0426248c55c3189ec0f3..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/api-examples/api-example-chat.py +++ /dev/null @@ -1,94 +0,0 @@ -import html -import json - -import requests - -# For local streaming, the websockets are hosted without ssl - http:// -HOST = 'localhost:5000' -URI = f'http://{HOST}/api/v1/chat' - -# For reverse-proxied streaming, the remote will likely host with ssl - https:// -# URI = 'https://your-uri-here.trycloudflare.com/api/v1/chat' - - -def run(user_input, history): - request = { - 'user_input': user_input, - 'max_new_tokens': 250, - 'auto_max_new_tokens': False, - 'max_tokens_second': 0, - 'history': history, - 'mode': 'instruct', # Valid options: 'chat', 'chat-instruct', 'instruct' - 'character': 'Example', - 'instruction_template': 'Vicuna-v1.1', # Will get autodetected if unset - 'your_name': 'You', - # 'name1': 'name of user', # Optional - # 'name2': 'name of character', # Optional - # 'context': 'character context', # Optional - # 'greeting': 'greeting', # Optional - # 'name1_instruct': 'You', # Optional - # 'name2_instruct': 'Assistant', # Optional - # 'context_instruct': 'context_instruct', # Optional - # 'turn_template': 'turn_template', # Optional - 'regenerate': False, - '_continue': False, - 'chat_instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>', - - # Generation params. If 'preset' is set to different than 'None', the values - # in presets/preset-name.yaml are used instead of the individual numbers. - 'preset': 'None', - 'do_sample': True, - 'temperature': 0.7, - 'top_p': 0.1, - 'typical_p': 1, - 'epsilon_cutoff': 0, # In units of 1e-4 - 'eta_cutoff': 0, # In units of 1e-4 - 'tfs': 1, - 'top_a': 0, - 'repetition_penalty': 1.18, - 'presence_penalty': 0, - 'frequency_penalty': 0, - 'repetition_penalty_range': 0, - 'top_k': 40, - 'min_length': 0, - 'no_repeat_ngram_size': 0, - 'num_beams': 1, - 'penalty_alpha': 0, - 'length_penalty': 1, - 'early_stopping': False, - 'mirostat_mode': 0, - 'mirostat_tau': 5, - 'mirostat_eta': 0.1, - 'grammar_string': '', - 'guidance_scale': 1, - 'negative_prompt': '', - - 'seed': -1, - 'add_bos_token': True, - 'truncation_length': 2048, - 'ban_eos_token': False, - 'custom_token_bans': '', - 'skip_special_tokens': True, - 'stopping_strings': [] - } - - response = requests.post(URI, json=request) - - if response.status_code == 200: - result = response.json()['results'][0]['history'] - print(json.dumps(result, indent=4)) - print() - print(html.unescape(result['visible'][-1][1])) - - -if __name__ == '__main__': - user_input = "Please give me a step-by-step guide on how to plant a tree in my backyard." - - # Basic example - history = {'internal': [], 'visible': []} - - # "Continue" example. Make sure to set '_continue' to True above - # arr = [user_input, 'Surely, here is'] - # history = {'internal': [arr], 'visible': [arr]} - - run(user_input, history) diff --git a/spaces/leurez/moss/src/api/index.ts b/spaces/leurez/moss/src/api/index.ts deleted file mode 100644 index 0ca33fd30456f53e2c784a3e09187c61137b65d2..0000000000000000000000000000000000000000 --- a/spaces/leurez/moss/src/api/index.ts +++ /dev/null @@ -1,66 +0,0 @@ -import type { AxiosProgressEvent, GenericAbortSignal } from 'axios' -import { post } from '@/utils/request' -import { useAuthStore, useSettingStore } from '@/store' - -export function fetchChatAPI( - prompt: string, - options?: { conversationId?: string; parentMessageId?: string }, - signal?: GenericAbortSignal, -) { - return post({ - url: '/chat', - data: { prompt, options }, - signal, - }) -} - -export function fetchChatConfig() { - return post({ - url: '/config', - }) -} - -export function fetchChatAPIProcess( - params: { - prompt: string - options?: { conversationId?: string; parentMessageId?: string } - signal?: GenericAbortSignal - onDownloadProgress?: (progressEvent: AxiosProgressEvent) => void }, -) { - const settingStore = useSettingStore() - const authStore = useAuthStore() - - let data: Record = { - prompt: params.prompt, - options: params.options, - } - - if (authStore.isChatGPTAPI) { - data = { - ...data, - systemMessage: settingStore.systemMessage, - temperature: settingStore.temperature, - top_p: settingStore.top_p, - } - } - - return post({ - url: '/chat-process', - data, - signal: params.signal, - onDownloadProgress: params.onDownloadProgress, - }) -} - -export function fetchSession() { - return post({ - url: '/session', - }) -} - -export function fetchVerify(token: string) { - return post({ - url: '/verify', - data: { token }, - }) -} diff --git a/spaces/lightli/bingo-newbing/src/components/ui/textarea.tsx b/spaces/lightli/bingo-newbing/src/components/ui/textarea.tsx deleted file mode 100644 index e25af722c7a5dc1121a9ab58d6716952f9f76081..0000000000000000000000000000000000000000 --- a/spaces/lightli/bingo-newbing/src/components/ui/textarea.tsx +++ /dev/null @@ -1,24 +0,0 @@ -import * as React from 'react' - -import { cn } from '@/lib/utils' - -export interface TextareaProps - extends React.TextareaHTMLAttributes {} - -const Textarea = React.forwardRef( - ({ className, ...props }, ref) => { - return ( - -

      - Input text to perform semantic similarity search... -

      - - - - - diff --git a/spaces/radames/Candle-T5-Generation-Wasm/build/m-quantized.d.ts b/spaces/radames/Candle-T5-Generation-Wasm/build/m-quantized.d.ts deleted file mode 100644 index 21c078ca6d35d5432ad19ec8e4306c62e4072d9b..0000000000000000000000000000000000000000 --- a/spaces/radames/Candle-T5-Generation-Wasm/build/m-quantized.d.ts +++ /dev/null @@ -1,74 +0,0 @@ -/* tslint:disable */ -/* eslint-disable */ -/** -*/ -export class ModelConditionalGeneration { - free(): void; -/** -* @param {Uint8Array} weights -* @param {Uint8Array} tokenizer -* @param {Uint8Array} config -*/ - constructor(weights: Uint8Array, tokenizer: Uint8Array, config: Uint8Array); -/** -* @param {any} input -* @returns {any} -*/ - decode(input: any): any; -} -/** -*/ -export class ModelEncoder { - free(): void; -/** -* @param {Uint8Array} weights -* @param {Uint8Array} tokenizer -* @param {Uint8Array} config -*/ - constructor(weights: Uint8Array, tokenizer: Uint8Array, config: Uint8Array); -/** -* @param {any} input -* @returns {any} -*/ - decode(input: any): any; -} - -export type InitInput = RequestInfo | URL | Response | BufferSource | WebAssembly.Module; - -export interface InitOutput { - readonly memory: WebAssembly.Memory; - readonly __wbg_modelencoder_free: (a: number) => void; - readonly __wbg_modelconditionalgeneration_free: (a: number) => void; - readonly modelconditionalgeneration_load: (a: number, b: number, c: number, d: number, e: number, f: number, g: number) => void; - readonly modelconditionalgeneration_decode: (a: number, b: number, c: number) => void; - readonly modelencoder_load: (a: number, b: number, c: number, d: number, e: number, f: number, g: number) => void; - readonly modelencoder_decode: (a: number, b: number, c: number) => void; - readonly main: (a: number, b: number) => number; - readonly __wbindgen_malloc: (a: number, b: number) => number; - readonly __wbindgen_realloc: (a: number, b: number, c: number, d: number) => number; - readonly __wbindgen_add_to_stack_pointer: (a: number) => number; - readonly __wbindgen_free: (a: number, b: number, c: number) => void; - readonly __wbindgen_exn_store: (a: number) => void; - readonly __wbindgen_start: () => void; -} - -export type SyncInitInput = BufferSource | WebAssembly.Module; -/** -* Instantiates the given `module`, which can either be bytes or -* a precompiled `WebAssembly.Module`. -* -* @param {SyncInitInput} module -* -* @returns {InitOutput} -*/ -export function initSync(module: SyncInitInput): InitOutput; - -/** -* If `module_or_path` is {RequestInfo} or {URL}, makes a request and -* for everything else, calls `WebAssembly.instantiate` directly. -* -* @param {InitInput | Promise} module_or_path -* -* @returns {Promise} -*/ -export default function __wbg_init (module_or_path?: InitInput | Promise): Promise; diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Aster V7 6x64 Key Benefits and Features of Using Aster V7 on Windows.md b/spaces/raedeXanto/academic-chatgpt-beta/Aster V7 6x64 Key Benefits and Features of Using Aster V7 on Windows.md deleted file mode 100644 index 3b306a269492231f0d1a200d1f742302a323bab6..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Aster V7 6x64 Key Benefits and Features of Using Aster V7 on Windows.md +++ /dev/null @@ -1,108 +0,0 @@ - -

      Aster V7 6x64 Key: What Is It and How to Use It?

      -

      Have you ever wanted to use one computer with multiple monitors, keyboards and mice? Have you ever wished to share your PC with your family, friends or colleagues without compromising your privacy and performance? Have you ever dreamed of playing games or watching movies on a big screen with surround sound? If you answered yes to any of these questions, then you need Aster V7 6x64 key.

      -

      Introduction

      -

      In this article, we will explain what Aster V7 is, what Aster V7 6x64 key is, why you need it, how to use it and what benefits it can bring to you. By the end of this article, you will be able to turn your single PC into a multi-user station with ease and convenience.

      -

      aster v7 6x64 key


      DOWNLOADhttps://tinourl.com/2uL2Bg



      -

      What is Aster V7?

      -

      Aster V7 is a software that allows you to create multiple workstations from one PC. It enables you to connect multiple monitors, keyboards and mice to your PC and assign them to different users. Each user can work independently on their own desktop, run their own applications and access their own files. Aster V7 supports Windows 7/8/10 (32-bit and 64-bit) operating systems.

      -

      What is Aster V7 6x64 key?

      -

      Aster V7 6x64 key is an activation code that you need to use Aster V7 on a 64-bit Windows system. It is a unique combination of letters and numbers that verifies your purchase and unlocks all the features of Aster V7. You can buy Aster V7 6x64 key from the official website of ASTER or from other authorized sellers.

      -

      Why do you need Aster V7 6x64 key?

      -

      You need Aster V7 6x64 key if you want to use Aster V7 on a 64-bit Windows system. Without the key, you can only use Aster V7 in trial mode for 30 days. In trial mode, you can only create two workstations and some functions are limited. With the key, you can create up to six workstations (depending on your hardware configuration) and enjoy all the functions of Aster V7.

      -

      How to use Aster V7 6x64 key?

      -

      Using Aster V7 6x64 key is very easy. You just need to follow these simple steps:

      -

      aster v7 6x64 activation code
      -aster v7 6x64 crack download
      -aster v7 6x64 license key generator
      -aster v7 6x64 serial number
      -aster v7 6x64 full version free
      -aster v7 6x64 multiseat software
      -aster v7 6x64 windows 10
      -aster v7 6x64 keygen
      -aster v7 6x64 patch
      -aster v7 6x64 registration key
      -aster v7 6x64 coupon code
      -aster v7 6x64 discount offer
      -aster v7 6x64 review
      -aster v7 6x64 tutorial
      -aster v7 6x64 setup guide
      -aster v7 6x64 system requirements
      -aster v7 6x64 alternative
      -aster v7 6x64 vs softxpand
      -aster v7 6x64 vs ibik workstation
      -aster v7 6x64 vs userful desktop
      -aster v7 6x64 vs betwin es
      -aster v7 6x64 vs friendly seats
      -aster v7 6x64 vs multiseat gaming
      -aster v7 6x64 vs miniframe softxpand duo pro
      -aster v7 6x64 vs ncomputing l300
      -aster v7 6x64 vs thinsoft winconnect server xp
      -aster v7 6x64 vs microsoft multipoint server
      -aster v7 6x64 vs vmware horizon view client
      -aster v7 6x64 vs citrix xendesktop
      -aster v7 6x64 vs be twn pc duo s2h2
      -aster v7 6x64 vs userful multiseat linux
      -aster v7 6x64 vs ibik terminal server pro
      -aster v7 6x64 vs elusiva terminal server pro
      -aster v7 6x64 vs thinstation live cd/usb
      -aster v7 6x64 vs linux mint mate edition
      -aster v7 6x64 vs ubuntu mate edition
      -aster v7 6x64 vs zorin os core edition
      -aster v7 6x64 vs elementary os loki edition
      -aster v7 6x64 vs linux lite edition
      -aster v7 6x64 vs peppermint os edition
      -aster v7 6x64 vs lubuntu edition
      -aster v7 6x64 vs xubuntu edition
      -aster v7 6x64 vs kubuntu edition
      -aster v7 6x64 vs linuxfx edition
      -aster v7 6x64 vs deepin linux edition
      -aster v7 6x64 vs manjaro linux edition
      -aster v7 6x64 vs solus linux edition
      -aster v7 6x64 vs mx linux edition
      -aster v7 6x64 vs pop os edition
      -aster v7 6x64 vs fedora workstation edition

      -

      Download and install Aster V7

      -

      First, you need to download the latest version of Aster V7 from the official website of ASTER or from other authorized sources. Then, you need to install it on your PC by following the instructions on the screen. You may need to restart your PC after the installation.

      -

      Activate Aster V7 with the key

      -

      Second, you need to activate Aster V7 with the key that you have purchased. To do this, you need to open the ASTER Control Panel and click on the "Enter Key" button. Then, you need to enter your name, email address and the key in the corresponding fields. After that, click on the "Activate" button and wait for a few seconds until the activation is completed.

      -

      Configure Aster V7 settings

      -

      Third, you need to configure Aster V7 settings according to your preferences and needs. You can do this by using the ASTER Control Panel or by right-clicking on the ASTER icon in the system tray. You can adjust various parameters such as display resolution, color depth, sound volume, keyboard layout, mouse speed and more. You can also assign different wallpapers, screensavers and themes for each workstation.

      -

      Connect multiple monitors, keyboards and mice

      -

      Fourth, you need to connect multiple monitors, keyboards and mice to your PC using HDMI, VGA, DVI or USB ports. You can use any combination of devices that suits your situation. For example, you can use one monitor with two keyboards and two mice for two users; or two monitors with one keyboard and one mouse for one user; or three monitors with three keyboards and three mice for three users; etc.

      -

      Benefits of using Aster V7 6x64 key

      -

      Using Aster V7 6x64 key can bring many benefits to you. Here are some of them:

      -

      Save money and space

      -

      With Aster V7 6x64 key, you don't need to buy multiple PCs for multiple users. You can save money on hardware costs, electricity bills and maintenance fees. You also don't need to occupy much space for multiple PCs. You can save space on your desk or in your room.

      -

      Increase productivity and collaboration

      -

      With Aster V7 6x64 key, you can increase your productivity and collaboration with other users. You can work on different tasks simultaneously without interfering with each other. You can also share files, folders and printers easily among different workstations. You can communicate with other users via chat or voice call using built-in or external microphones and speakers.

      -

      Enjoy gaming and entertainment

      -

      With Aster V7 6x64 key, you can enjoy gaming and entertainment with other users. You can play multiplayer games online or offline using different controllers such as keyboards, mice, joysticks or gamepads. You can also watch movies or videos on a big screen with surround sound using different media players such as VLC or Windows Media Player.

      -

      Conclusion

      -

      In conclusion, Aster V7 6x64 key is a great software that allows you to create multiple workstations from one PC. It enables you to connect multiple monitors, keyboards and mice to your PC and assign them to different users. Each user can work independently on their own desktop, run their own applications and access their own files. Using Aster V7 6x64 key can save money and space; increase productivity and collaboration; enjoy gaming and entertainment.

      -

      Summary of the main points

      -
        -
      • Aster V7 is a software that allows you to create multiple workstations from one PC.
      • -
      • Aster V7 6x64 key is an activation code that you need to use Aster V7 on a 64-bit Windows system.
      • -
      • You need Aster V7 6x64 key if you want to use Aster V7 without limitations.
      • -
      • You can use Aster V7 6x64 key by downloading and installing Aster V7; activating it with the key; configuring its settings; connecting multiple monitors, keyboards and mice.
      • -
      • You can benefit from using Aster V7 6x64 key by saving money and space; increasing productivity and collaboration; enjoying gaming and entertainment.
      • -
      -

      Call to action

      -

      If you are interested in using Aster V7 6x64 key, don't hesitate any longer. Visit the official website of ASTER today and get your own copy of this amazing software. You will not regret it!

      -

      Frequently Asked Questions

      -
        -
      1. How many workstations can I create with Aster V7 6x64 key?
      2. -

        The number of workstations that you can create with Aster V7 6x64 key depends on your hardware configuration. The maximum number is six workstations for one PC.

        -
      3. Can I use different operating systems for different workstations?
      4. - ```html your PC. However, you can use different versions of Windows such as Windows 7, 8 or 10.

        -
      5. Can I use Aster V7 6x64 key on multiple PCs?
      6. -

        No, you cannot use Aster V7 6x64 key on multiple PCs. The key is valid for one PC only. If you want to use Aster V7 on another PC, you need to buy another key.

        -
      7. Is Aster V7 6x64 key safe and legal?
      8. -

        Yes, Aster V7 6x64 key is safe and legal. It is a genuine product that is developed and distributed by ASTER, a reputable company that has been in the market for over 10 years. It does not contain any viruses, malware or spyware. It does not violate any laws or regulations.

        -
      9. What if I have any problems or questions about Aster V7 6x64 key?
      10. -

        If you have any problems or questions about Aster V7 6x64 key, you can contact the customer support team of ASTER via email or phone. They will be happy to assist you and solve your issues. You can also visit the FAQ section or the forum of ASTER for more information and tips.

        -
      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Cash Cash - The Beat Goes On (2012).md b/spaces/raedeXanto/academic-chatgpt-beta/Cash Cash - The Beat Goes On (2012).md deleted file mode 100644 index 1b76cb4d70d5fb609d4a357fd894234970f65ff6..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Cash Cash - The Beat Goes On (2012).md +++ /dev/null @@ -1,18 +0,0 @@ - -

      Cash Cash: How The Beat Goes On Became Their Breakthrough Album

      -

      Cash Cash is an electronic music group from New Jersey, USA, that consists of brothers Jean Paul Makhlouf and Alex Makhlouf, and Samuel Frisch. They are known for their catchy dance-pop songs, energetic live shows, and remixes for artists like Katy Perry, Bruno Mars, and Kelly Clarkson.

      -

      But before they became international stars, they had to overcome many challenges and setbacks in their musical journey. One of their most pivotal moments was the release of their third studio album, The Beat Goes On, in 2012.

      -

      Cash Cash - The Beat Goes On (2012)


      Download Filehttps://tinourl.com/2uL0fh



      -

      The Beat Goes On was a self-produced and self-released album that showcased Cash Cash's versatility and creativity. It featured 11 tracks that ranged from electro-house to dubstep to pop-rock, with influences from Michael Jackson, Daft Punk, and The Beatles.

      -

      The album was only licensed in Japan as a full-length LP, but it was also available worldwide as a 6-track EP on Cash Cash Music. The album spawned two singles: "Michael Jackson (The Beat Goes On)" and "I Like It Loud", which were both released by Dutch label Spinnin' Records.

      -

      "Michael Jackson (The Beat Goes On)" was a tribute to the King of Pop, with samples of his iconic vocals and melodies. The song became a hit in Japan and the Netherlands, reaching the top 10 on the Dance Top 30 chart and the top 30 on the Dutch Top 40 chart.

      -

      "I Like It Loud" was a party anthem that featured Cash Cash's signature vocoder vocals and synth hooks. The song was also used in several commercials and TV shows, such as MTV's Jersey Shore and ABC's Dancing with the Stars.

      -

      The Beat Goes On was a critical and commercial success for Cash Cash, earning them recognition and respect in the electronic music scene. It also opened up new opportunities for them to collaborate with other artists and labels, such as Big Beat Records and Atlantic Records.

      -

      The Beat Goes On was the album that proved that Cash Cash had what it takes to make it big in the music industry. It was the album that made them stand out from the crowd and showed their passion and talent for making music. It was the album that made the beat go on for Cash Cash.

      - -

      The Beat Goes On also received positive reviews from critics and fans, who praised Cash Cash's production skills, songwriting abilities, and musical diversity. Some of the highlights of the album include "Still Got It", a funky electro-pop song with a catchy chorus and guitar riffs; "Mama Told Me", a retro-inspired synth-pop song with a nostalgic vibe and a guest appearance by J.Trill; "History", a melodic and emotional ballad with piano and strings; and "One Last Song", a powerful and uplifting finale with a rock edge.

      -

      -

      The album also showcased Cash Cash's talent for remixing their own songs, as they included extended versions of "Michael Jackson (The Beat Goes On)" and "I Like It Loud" on the EP edition. The remixes added more energy and dynamics to the original tracks, making them suitable for clubs and festivals.

      -

      The Beat Goes On was a milestone for Cash Cash, as it marked their transition from a pop-rock band to an electronic music group. It also demonstrated their ability to adapt to the changing trends and tastes of the music industry, while staying true to their own vision and style. The album was a testament to their hard work, dedication, and passion for making music.

      7b8c122e87
      -
      -
      \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version !!TOP!!.md b/spaces/raedeXanto/academic-chatgpt-beta/Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version !!TOP!!.md deleted file mode 100644 index aad755c8ca31b7d1420f87cde31bddd4654f16eb..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version !!TOP!!.md +++ /dev/null @@ -1,156 +0,0 @@ - -

      Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version

      -

      If you are looking for a powerful and easy-to-use anti-malware software that can protect your PC from various threats, you may have heard of Gridinsoft Anti-Malware. But what if you want to enjoy the full features of this software without paying for a license? That's where Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version comes in handy.

      -

      Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version


      Download >> https://tinourl.com/2uL0ai



      -

      Introduction

      -

      In this article, we will explain what Gridinsoft Anti-Malware is, what Crackingpatching is, why you need Gridinsoft Anti-Malware 4.0.5 Patch, and how to install and use it. We will also discuss the pros and cons of using this patch, and give you some tips and warnings before you decide to use it.

      -

      What is Gridinsoft Anti-Malware?

      -

      Gridinsoft Anti-Malware is a software that can detect and remove various types of malware from your PC, such as viruses, trojans, spyware, adware, ransomware, rootkits, worms, keyloggers, etc. It can also clean up your browser from annoying ads, pop-ups, redirects, and other unwanted elements.

      -

      Gridinsoft Anti-Malware has a powerful scanning and detection engine that can find even the most hidden and stubborn malware on your system. It also has a user-friendly and customizable interface that allows you to adjust the scan modes, settings, and preferences according to your needs.

      -

      Gridinsoft Anti-Malware offers a free trial version that can scan your PC and remove some malware, but it has some limitations in terms of functionality and duration. To unlock the full features of this software, you need to purchase a license that costs $29.95 for one month or $39.95 for one year.

      -

      What is Crackingpatching?

      -

      Crackingpatching is a website that provides patches, cracks, keygens, serial keys, activators, loaders, and other tools that can modify or bypass the license verification or activation process of various software products.

      -

      Crackingpatching claims that its patches are tested and working on different versions of Windows operating systems, and that they are safe and virus-free. However, there is no guarantee that these patches are legitimate or legal.

      -

      How to download Gridinsoft Anti-Malware 4.0.5 with crack
      -Gridinsoft Anti-Malware 4.0.5 license key generator
      -Gridinsoft Anti-Malware 4.0.5 full version free download
      -Gridinsoft Anti-Malware 4.0.5 patch by crackingpatching.com
      -Gridinsoft Anti-Malware 4.0.5 review and features
      -Gridinsoft Anti-Malware 4.0.5 activation code
      -Gridinsoft Anti-Malware 4.0.5 serial key
      -Gridinsoft Anti-Malware 4.0.5 cracked version download
      -Gridinsoft Anti-Malware 4.0.5 latest update
      -Gridinsoft Anti-Malware 4.0.5 system requirements
      -Gridinsoft Anti-Malware 4.0.5 installation guide
      -Gridinsoft Anti-Malware 4.0.5 comparison with other anti-malware software
      -Gridinsoft Anti-Malware 4.0.5 coupon code and discount
      -Gridinsoft Anti-Malware 4.0.5 trial version download
      -Gridinsoft Anti-Malware 4.0.5 pros and cons
      -Gridinsoft Anti-Malware 4.0.5 customer support and feedback
      -Gridinsoft Anti-Malware 4.0.5 malware removal tool
      -Gridinsoft Anti-Malware 4.0.5 best alternative
      -Gridinsoft Anti-Malware 4.0.5 for Windows 10/8/7
      -Gridinsoft Anti-Malware 4.0.5 for Mac OS X
      -Gridinsoft Anti-Malware 4.0.5 for Linux
      -Gridinsoft Anti-Malware 4.0.5 for Android
      -Gridinsoft Anti-Malware 4.0.5 for iOS
      -Gridinsoft Anti-Malware 4.0.5 for Chromebook
      -Gridinsoft Anti-Malware 4.0.5 online scan
      -Gridinsoft Anti-Malware 4.0.5 offline installer
      -Gridinsoft Anti-Malware 4.0.5 portable version
      -Gridinsoft Anti-Malware 4.0.5 lifetime license
      -Gridinsoft Anti-Malware 4.0.5 refund policy
      -Gridinsoft Anti-Malware 4.0.5 testimonials and ratings
      -Gridinsoft Anti-Malware 4.0.5 video tutorial and demo
      -Gridinsoft Anti-Malware 4.0.5 changelog and release notes
      -Gridinsoft Anti-Malware 4.0.5 FAQs and tips
      -Gridinsoft Anti-Malware 4.0.5 blog and news
      -Gridinsoft Anti-Malware 4.0.5 forum and community
      -Gridinsoft Anti-Malware 4.0.5 affiliate program and earnings
      -Gridinsoft Anti-Malware 4.0.5 giveaway and contest
      -Gridinsoft Anti-Malware 4

      -

      Crackingpatching also warns its users that using its patches may violate the license agreement or terms of service of the original software developers or vendors, and that they are solely responsible for any consequences or damages that may arise from using them.

      -

      Why do you need Gridinsoft Anti-Malware 4.0.5 Patch?

      -

      If you want to use Gridinsoft Anti-Malware without paying for a license or without any restrictions or limitations, you may need Gridinsoft Anti-Malware 4.0.5 Patch.

      -

      Gridinsoft Anti-Malware 4.0.5 Patch is a tool that can crack or activate the full version of Gridinsoft Anti-Malware 4.0.5 on your PC.

      -

      With this patch, you can enjoy all the features of Gridinsoft Anti-Malware 4.0.5 without any expiration date or trial period.

      -

      You can also update the software regularly without losing the activation status.

      -

      Features of Gridinsoft Anti-Malware 4.0.5 Patch

      -

      Gridinsoft Anti-Malware 4.0.5 Patch has some features that make it attractive for users who want to use Gridinsoft Anti-Malware for free.

      -

      Powerful scanning and detection engine

      -

      The patch does not affect the performance or quality of the scanning and detection engine of Gridinsoft Anti-Malware.

      -

      You can still scan your PC with different modes (standard scan, quick scan, full scan, removable scan) and find all kinds of malware on your system.

      -

      User-friendly and customizable interface

      -

      The patch does not change the appearance or functionality of the interface of Gridinsoft Anti-Malware.

      -

      You can still access all the options and settings of the software from the main window or the menu bar.

      -

      You can also customize the interface according to your preferences by changing the language, theme, font size, sound effects, etc.

      -

      Flexible and versatile settings

      -

      The patch does not limit or disable any settings or features of Gridinsoft Anti-Malware.

      -

      You can still adjust the scan parameters (such as file types, file size, heuristic rules), quarantine options (such as restore, delete), protection options (such as real-time protection), update options (such as automatic update), etc.

      -

      Comprehensive and detailed reports

      -

      The patch does not interfere with the generation or display of reports by Gridinsoft Anti-Malware.

      -

      You can still view the scan results (such as detected items, scan duration), quarantine history (such as quarantined items, restored items), log files (such as scan logs, update logs), etc.

      -

      How to install and use Gridinsoft Anti-Malware 4.0.5 Patch?

      -

      If you want to use Gridinsoft Anti-Malware 4.0.5 Patch, you need to follow these steps:

      -

      Download the patch from Crackingpatching website

      -

      The first step is to download the patch from Crackingpatching website. You can find it by searching for "Gridinsoft Anti-Malware 4.0.5 Patch" on their homepage or using this link:

      - https://crackingpatching.com/2018/08/gridinsoft-anti-malware-405-patch.html -

      The patch file is named "GridinSoft.AntiMalware.v4.x.Patch.zip" and has a size of about 1 MB. You need to extract it using a program like WinRAR or 7-Zip.

      -

      Install Gridinsoft Anti-Malware 4.0.5 on your PC

      -

      The next step is to install Gridinsoft Anti-Malware 4.0.5 on your PC. You can download it from their official website or using this link:

      - https://gridinsoft.com/download-antimalware/ -

      The installation file is named "gsam-405-setup.exe" and has a size of about 80 MB. You need to run it as administrator and follow the instructions on the screen. You can choose the installation folder, language, and shortcuts according to your preferences. You don't need to enter any license key or activate the software at this point.

      -

      Run the patch as administrator and apply it

      -

      The final step is to run the patch as administrator and apply it to crack or activate the full version of Gridinsoft ```html 5 Patch. You need to locate the patch file that you extracted earlier and run it as administrator. You will see a window like this:

      - Gridinsoft Anti-Malware 4.0.5 Patch window -

      You need to click on the "Patch" button and select the installation folder of Gridinsoft Anti-Malware 4.0.5 on your PC. The default folder is "C:\Program Files\GridinSoft Anti-Malware". You will see a message like this:

      - Gridinsoft Anti-Malware 4.0.5 Patch message -

      You need to click on "OK" and wait for the patching process to finish. You will see another message like this:

      - Gridinsoft Anti-Malware 4.0.5 Patch message -

      You need to click on "OK" again and close the patch window. You have successfully applied the patch to Gridinsoft Anti-Malware 4.0.5.

      -

      Enjoy the full version of Gridinsoft Anti-Malware

      -

      The last step is to enjoy the full version of Gridinsoft Anti-Malware 4.0.5 on your PC. You can launch the software from the desktop shortcut or the start menu and scan your PC for malware.

      -

      You will notice that the software is activated and does not ask for any license key or activation code. You will also see that all the features are unlocked and available for use.

      -

      You can update the software regularly without losing the activation status. You can also use the software without any expiration date or trial period.

      -

      Pros and cons of Gridinsoft Anti-Malware 4.0.5 Patch

      -

      Gridinsoft Anti-Malware 4.0.5 Patch has some pros and cons that you should consider before using it.

      -

      Pros

      -
        -
      • Effective and reliable malware removal: The patch does not compromise the quality or performance of Gridinsoft Anti-Malware's scanning and detection engine. You can still remove various types of malware from your PC with ease and confidence.
      • -
      • Fast and easy to use: The patch is simple and straightforward to use. You just need to download, extract, run, and apply it in a few minutes. You don't need any technical skills or knowledge to use it.
      • -
      • Free to download and use: The patch is free to download and use from Crackingpatching website. You don't need to pay for a license or subscription to use Gridinsoft Anti-Malware 4.0.5.
      • -
      -

      Cons

      -
        -
      • May cause false positives or compatibility issues: The patch may alter some files or settings of Gridinsoft Anti-Malware that may cause false positives or compatibility issues with other software or hardware on your PC. You may need to whitelist or exclude some items from scanning or protection to avoid these problems.
      • -
      • May violate the license agreement of Gridinsoft Anti-Malware: The patch may violate the license agreement or terms of service of Gridinsoft Anti-Malware that you agreed to when you installed the software on your PC. You may lose your right to use the software legally or get technical support from the developers or vendors.
      • -
      • May expose your PC to security risks or legal troubles: The patch may expose your PC to security risks or legal troubles by downloading or using it from an untrusted source or by modifying or bypassing the license verification or activation process of Gridinsoft Anti-Malware. You may get infected with malware, hacked, sued, fined, or arrested for using it.
      • -
      -

      Conclusion

      -

      In conclusion, Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version is a tool that can crack or activate the full version of Gridinsoft Anti-Malware 4.0.5 on your PC for free.

      -

      It has some features that make it attractive for users who want to use Gridinsoft Anti-Malware without paying for a license or without any restrictions or limitations.

      -

      However, it also has some drawbacks that make it risky or illegal for users who want to use Gridinsoft Anti-Malware safely and legally.

      -

      Therefore, you should weigh the pros and cons of using this patch carefully before you decide to use it.

      -

      If you want to use Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version, you can follow these steps:

      -
        -
      1. Download the patch from Crackingpatching website.
      2. -
      3. Install Gridinsoft Anti-Malware 4.0.5 on your PC.
      4. -
      5. Run the patch as administrator and apply it.
      6. -
      7. Enjoy the full version of Gridinsoft Anti-Malware.
      8. -
      -

      If you want to use Gridinsoft Anti-Malware legally and safely, you can follow these steps:

      -
        -
      1. Purchase a license from Gridinsoft website.
      2. -
      3. Activate the software with your license key.
      4. -
      5. Enjoy the full features of Gridinsoft Anti-Malware.
      6. -
      -

      Frequently Asked Questions

      -

      Here are some frequently asked questions about Gridinsoft Anti-Malware 4.0.5 Patch - Crackingpatching Full Version:

      -

      Is Gridinsoft Anti-Malware 4.0.5 Patch safe?

      -

      The safety of Gridinsoft Anti-Malware 4.0.5 Patch depends on where you download it from and how you use it.

      -

      If you download it from Crackingpatching website, which claims that its patches are tested and virus-free, you may not get infected with malware by downloading or using it.

      -

      However, if you download it from other sources that may be malicious or unreliable, you may get infected with malware by downloading or using it.

      -

      If you use it properly and carefully, following the instructions and warnings provided by Crackingpatching website, you may not encounter any problems or issues by using it.

      -

      However, if you use it improperly or carelessly, ignoring the instructions and warnings provided by Crackingpatching website, you may encounter some problems or issues by using it.

      -

      Is Gridinsoft Anti-Malware 4.0.5 Patch legal?

      -

      The legality of Gridinsoft Anti-Malware 4.0.5 Patch depends on where you live and how you use it.

      -

      If you live in a country or region that does not have strict laws or regulations regarding software piracy or intellectual property rights, you may not face any legal troubles by using it.

      -

      However, if you live in a country or region that has strict laws or regulations regarding software piracy or intellectual property rights, you may face some legal troubles by using it.

      -

      If you use it for personal or educational purposes only, without distributing or selling it to others, you may not face any legal troubles by using it.

      -

      However, if you use it for commercial or illegal purposes, such as distributing or selling it to others, you may face some legal troubles by using

      Does Gridinsoft Anti-Malware 4.0.5 Patch work?

      -

      The effectiveness of Gridinsoft Anti-Malware 4.0.5 Patch depends on the version and compatibility of Gridinsoft Anti-Malware and the patch.

      -

      If you use the patch for Gridinsoft Anti-Malware 4.0.5, which is the latest version as of writing this article, you may not have any problems or issues by using it.

      -

      However, if you use the patch for other versions of Gridinsoft Anti-Malware, which may be outdated or incompatible, you may have some problems or issues by using it.

      -

      Where can I get Gridinsoft Anti-Malware 4.0.5 Patch?

      -

      You can get Gridinsoft Anti-Malware 4.0.5 Patch from Crackingpatching website, which is the source of this article.

      -

      You can find it by searching for "Gridinsoft Anti-Malware 4.0.5 Patch" on their homepage or using this link:

      - https://crackingpatching.com/2018/08/gridinsoft-anti-malware-405-patch.html -

      You can also get Gridinsoft Anti-Malware 4.0.5 Patch from other sources that may provide similar patches, but you should be careful and cautious about their reliability and safety.

      -

      What are the alternatives to Gridinsoft Anti-Malware 4.0.5 Patch?

      -

      If you don't want to use Gridinsoft Anti-Malware 4.0.5 Patch for any reason, you have some alternatives to choose from.

      -

      One alternative is to purchase a license from Gridinsoft website and activate the software legally and safely.

      -

      Another alternative is to use other anti-malware software that may offer similar or better features and performance than Gridinsoft Anti-Malware.

      -

      Some examples of other anti-malware software are Malwarebytes, SpyHunter, Emsisoft, HitmanPro, etc.

      -

      ed

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/params_model.py b/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/params_model.py deleted file mode 100644 index 3e356472fb5a27f370cb3920976a11d12a76c1b7..0000000000000000000000000000000000000000 --- a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/params_model.py +++ /dev/null @@ -1,11 +0,0 @@ - -## Model parameters -model_hidden_size = 256 -model_embedding_size = 256 -model_num_layers = 3 - - -## Training parameters -learning_rate_init = 1e-4 -speakers_per_batch = 64 -utterances_per_speaker = 10 diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Calculo Vectorial De Moises Lazaro.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Calculo Vectorial De Moises Lazaro.md deleted file mode 100644 index 21b9ac4e6ac69398d2507c1e1b9088025ce8dc3e..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Calculo Vectorial De Moises Lazaro.md +++ /dev/null @@ -1,12 +0,0 @@ -

      Calculo Vectorial De Moises Lazaro


      Downloadhttps://urlgoal.com/2uCJB7



      -
      -384367663 350019104 Analisis Matematico II Moises Lazaro Carrion PDF - Free ... oe ere cate 'tn Cain a Pt ayo gen an de 'ANALSIS MATEMATICOML Spear MOE $28. 00. -MORE-MORE. -Oe ere cate 'tn Cain a Pt ayo gen an de 'ANALSIS MATEMATICOML Spear MOE $28.00. -This is a simple and easy to use app that will help you to learn how to make beautiful 3D illustrations. -It has lots of beautiful and simple images to use as reference. -Moreover, it has an option to change the color of the edges of each shape. -You can use it as your first step in creating awesome 3D illustrations. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/robinhad/ukrainian-stt/app.py b/spaces/robinhad/ukrainian-stt/app.py deleted file mode 100644 index 65687961bfa04a00bcba2ed6e20b7986d59f668f..0000000000000000000000000000000000000000 --- a/spaces/robinhad/ukrainian-stt/app.py +++ /dev/null @@ -1,114 +0,0 @@ -from io import BytesIO -from typing import Tuple -import wave -import gradio as gr -import numpy as np -from pydub.audio_segment import AudioSegment -import requests -from os.path import exists -from stt import Model -from datetime import datetime -from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor -import torch - -# download model -version = "v0.4" -storage_url = f"https://github.com/robinhad/voice-recognition-ua/releases/download/{version}" -model_name = "uk.tflite" -scorer_name = "kenlm.scorer" -model_link = f"{storage_url}/{model_name}" -scorer_link = f"{storage_url}/{scorer_name}" - -model = Wav2Vec2ForCTC.from_pretrained("robinhad/wav2vec2-xls-r-300m-uk")#.to("cuda") -processor = Wav2Vec2Processor.from_pretrained("robinhad/wav2vec2-xls-r-300m-uk") -# TODO: download config.json, pytorch_model.bin, preprocessor_config.json, tokenizer_config.json, vocab.json, added_tokens.json, special_tokens.json - -def download(url, file_name): - if not exists(file_name): - print(f"Downloading {file_name}") - r = requests.get(url, allow_redirects=True) - with open(file_name, 'wb') as file: - file.write(r.content) - else: - print(f"Found {file_name}. Skipping download...") - - -def deepspeech(audio: np.array, use_scorer=False): - ds = Model(model_name) - if use_scorer: - ds.enableExternalScorer("kenlm.scorer") - - result = ds.stt(audio) - - return result - -def wav2vec2(audio: np.array): - input_dict = processor(audio, sampling_rate=16000, return_tensors="pt", padding=True) - with torch.no_grad(): - output = model(input_dict.input_values.float()) - - logits = output.logits - - pred_ids = torch.argmax(logits, dim=-1)[0] - - return processor.decode(pred_ids) - -def inference(audio: Tuple[int, np.array]): - print("=============================") - print(f"Time: {datetime.utcnow()}.`") - - output_audio = _convert_audio(audio[1], audio[0]) - - fin = wave.open(output_audio, 'rb') - audio = np.frombuffer(fin.readframes(fin.getnframes()), np.int16) - fin.close() - - transcripts = [] - - transcripts.append(wav2vec2(audio)) - print(f"Wav2Vec2: `{transcripts[-1]}`") - transcripts.append(deepspeech(audio, use_scorer=True)) - print(f"Deepspeech with LM: `{transcripts[-1]}`") - transcripts.append(deepspeech(audio)) - print(f"Deepspeech: `{transcripts[-1]}`") - return tuple(transcripts) - - -def _convert_audio(audio_data: np.array, sample_rate: int): - audio_limit = sample_rate * 60 * 2 # limit audio to 2 minutes max - if audio_data.shape[0] > audio_limit: - audio_data = audio_data[0:audio_limit] - source_audio = BytesIO() - source_audio.write(audio_data) - source_audio.seek(0) - output_audio = BytesIO() - wav_file: AudioSegment = AudioSegment.from_raw( - source_audio, - channels=1, - sample_width=audio_data.dtype.itemsize, - frame_rate=sample_rate - ) - wav_file.export(output_audio, "wav", codec="pcm_s16le", parameters=["-ar", "16k"]) - output_audio.seek(0) - return output_audio - -with open("README.md") as file: - article = file.read() - article = article[article.find("---\n", 4) + 5::] - -iface = gr.Interface( - fn=inference, - inputs=[ - gr.inputs.Audio(type="numpy", - label="Аудіо", optional=False), - ], - outputs=[gr.outputs.Textbox(label="Wav2Vec2"), gr.outputs.Textbox(label="DeepSpeech with LM"), gr.outputs.Textbox(label="DeepSpeech")], - title="🇺🇦 Ukrainian Speech-to-Text models", - theme="huggingface", - description="Україномовний🇺🇦 Speech-to-Text за допомогою Coqui STT", - article=article, -) - -download(model_link, model_name) -download(scorer_link, scorer_name) -iface.launch() diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/yolox_head.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/yolox_head.py deleted file mode 100644 index f317e14760b2948609309016e6b4a87eae2e26a8..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/yolox_head.py +++ /dev/null @@ -1,493 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, - bias_init_with_prob) -from mmcv.ops.nms import batched_nms -from mmcv.runner import force_fp32 - -from mmdet.core import (MlvlPointGenerator, bbox_xyxy_to_cxcywh, - build_assigner, build_sampler, multi_apply, - reduce_mean) -from ..builder import HEADS, build_loss -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin - - -@HEADS.register_module() -class YOLOXHead(BaseDenseHead, BBoxTestMixin): - """YOLOXHead head used in `YOLOX `_. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - feat_channels (int): Number of hidden channels in stacking convs. - Default: 256 - stacked_convs (int): Number of stacking convs of the head. - Default: 2. - strides (tuple): Downsample factor of each feature map. - use_depthwise (bool): Whether to depthwise separable convolution in - blocks. Default: False - dcn_on_last_conv (bool): If true, use dcn in the last layer of - towers. Default: False. - conv_bias (bool | str): If specified as `auto`, it will be decided by - the norm_cfg. Bias of conv will be set as True if `norm_cfg` is - None, otherwise False. Default: "auto". - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - act_cfg (dict): Config dict for activation layer. Default: None. - loss_cls (dict): Config of classification loss. - loss_bbox (dict): Config of localization loss. - loss_obj (dict): Config of objectness loss. - loss_l1 (dict): Config of L1 loss. - train_cfg (dict): Training config of anchor head. - test_cfg (dict): Testing config of anchor head. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - num_classes, - in_channels, - feat_channels=256, - stacked_convs=2, - strides=[8, 16, 32], - use_depthwise=False, - dcn_on_last_conv=False, - conv_bias='auto', - conv_cfg=None, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish'), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - reduction='sum', - loss_weight=1.0), - loss_bbox=dict( - type='IoULoss', - mode='square', - eps=1e-16, - reduction='sum', - loss_weight=5.0), - loss_obj=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - reduction='sum', - loss_weight=1.0), - loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0), - train_cfg=None, - test_cfg=None, - init_cfg=dict( - type='Kaiming', - layer='Conv2d', - a=math.sqrt(5), - distribution='uniform', - mode='fan_in', - nonlinearity='leaky_relu')): - - super().__init__(init_cfg=init_cfg) - self.num_classes = num_classes - self.cls_out_channels = num_classes - self.in_channels = in_channels - self.feat_channels = feat_channels - self.stacked_convs = stacked_convs - self.strides = strides - self.use_depthwise = use_depthwise - self.dcn_on_last_conv = dcn_on_last_conv - assert conv_bias == 'auto' or isinstance(conv_bias, bool) - self.conv_bias = conv_bias - self.use_sigmoid_cls = True - - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - - self.loss_cls = build_loss(loss_cls) - self.loss_bbox = build_loss(loss_bbox) - self.loss_obj = build_loss(loss_obj) - - self.use_l1 = False # This flag will be modified by hooks. - self.loss_l1 = build_loss(loss_l1) - - self.prior_generator = MlvlPointGenerator(strides, offset=0) - - self.test_cfg = test_cfg - self.train_cfg = train_cfg - - self.sampling = False - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # sampling=False so use PseudoSampler - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - - self.fp16_enabled = False - self._init_layers() - - def _init_layers(self): - self.multi_level_cls_convs = nn.ModuleList() - self.multi_level_reg_convs = nn.ModuleList() - self.multi_level_conv_cls = nn.ModuleList() - self.multi_level_conv_reg = nn.ModuleList() - self.multi_level_conv_obj = nn.ModuleList() - for _ in self.strides: - self.multi_level_cls_convs.append(self._build_stacked_convs()) - self.multi_level_reg_convs.append(self._build_stacked_convs()) - conv_cls, conv_reg, conv_obj = self._build_predictor() - self.multi_level_conv_cls.append(conv_cls) - self.multi_level_conv_reg.append(conv_reg) - self.multi_level_conv_obj.append(conv_obj) - - def _build_stacked_convs(self): - """Initialize conv layers of a single level head.""" - conv = DepthwiseSeparableConvModule \ - if self.use_depthwise else ConvModule - stacked_convs = [] - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - if self.dcn_on_last_conv and i == self.stacked_convs - 1: - conv_cfg = dict(type='DCNv2') - else: - conv_cfg = self.conv_cfg - stacked_convs.append( - conv( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - bias=self.conv_bias)) - return nn.Sequential(*stacked_convs) - - def _build_predictor(self): - """Initialize predictor layers of a single level head.""" - conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1) - conv_reg = nn.Conv2d(self.feat_channels, 4, 1) - conv_obj = nn.Conv2d(self.feat_channels, 1, 1) - return conv_cls, conv_reg, conv_obj - - def init_weights(self): - super(YOLOXHead, self).init_weights() - # Use prior in model initialization to improve stability - bias_init = bias_init_with_prob(0.01) - for conv_cls, conv_obj in zip(self.multi_level_conv_cls, - self.multi_level_conv_obj): - conv_cls.bias.data.fill_(bias_init) - conv_obj.bias.data.fill_(bias_init) - - def forward_single(self, x, cls_convs, reg_convs, conv_cls, conv_reg, - conv_obj): - """Forward feature of a single scale level.""" - - cls_feat = cls_convs(x) - reg_feat = reg_convs(x) - - cls_score = conv_cls(cls_feat) - bbox_pred = conv_reg(reg_feat) - objectness = conv_obj(reg_feat) - - return cls_score, bbox_pred, objectness - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - Returns: - tuple[Tensor]: A tuple of multi-level predication map, each is a - 4D-tensor of shape (batch_size, 5+num_classes, height, width). - """ - - return multi_apply(self.forward_single, feats, - self.multi_level_cls_convs, - self.multi_level_reg_convs, - self.multi_level_conv_cls, - self.multi_level_conv_reg, - self.multi_level_conv_obj) - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) - def get_bboxes(self, - cls_scores, - bbox_preds, - objectnesses, - img_metas=None, - cfg=None, - rescale=False, - with_nms=True): - """Transform network outputs of a batch into bbox results. - Args: - cls_scores (list[Tensor]): Classification scores for all - scale levels, each is a 4D-tensor, has shape - (batch_size, num_priors * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for all - scale levels, each is a 4D-tensor, has shape - (batch_size, num_priors * 4, H, W). - objectnesses (list[Tensor], Optional): Score factor for - all scale level, each is a 4D-tensor, has shape - (batch_size, 1, H, W). - img_metas (list[dict], Optional): Image meta info. Default None. - cfg (mmcv.Config, Optional): Test / postprocessing configuration, - if None, test_cfg would be used. Default None. - rescale (bool): If True, return boxes in original image space. - Default False. - with_nms (bool): If True, do nms before return boxes. - Default True. - Returns: - list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where the first 4 columns - are bounding box positions (tl_x, tl_y, br_x, br_y) and the - 5-th column is a score between 0 and 1. The second item is a - (n,) tensor where each item is the predicted class label of - the corresponding box. - """ - assert len(cls_scores) == len(bbox_preds) == len(objectnesses) - cfg = self.test_cfg if cfg is None else cfg - scale_factors = np.array( - [img_meta['scale_factor'] for img_meta in img_metas]) - - num_imgs = len(img_metas) - featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] - mlvl_priors = self.prior_generator.grid_priors( - featmap_sizes, - dtype=cls_scores[0].dtype, - device=cls_scores[0].device, - with_stride=True) - - # flatten cls_scores, bbox_preds and objectness - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, - self.cls_out_channels) - for cls_score in cls_scores - ] - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) - for bbox_pred in bbox_preds - ] - flatten_objectness = [ - objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) - for objectness in objectnesses - ] - - flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() - flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) - flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() - flatten_priors = torch.cat(mlvl_priors) - - flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) - - if rescale: - flatten_bboxes[..., :4] /= flatten_bboxes.new_tensor( - scale_factors).unsqueeze(1) - - result_list = [] - for img_id in range(len(img_metas)): - cls_scores = flatten_cls_scores[img_id] - score_factor = flatten_objectness[img_id] - bboxes = flatten_bboxes[img_id] - - result_list.append( - self._bboxes_nms(cls_scores, bboxes, score_factor, cfg)) - - return result_list - - def _bbox_decode(self, priors, bbox_preds): - xys = (bbox_preds[..., :2] * priors[:, 2:]) + priors[:, :2] - whs = bbox_preds[..., 2:].exp() * priors[:, 2:] - - tl_x = (xys[..., 0] - whs[..., 0] / 2) - tl_y = (xys[..., 1] - whs[..., 1] / 2) - br_x = (xys[..., 0] + whs[..., 0] / 2) - br_y = (xys[..., 1] + whs[..., 1] / 2) - - decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1) - return decoded_bboxes - - def _bboxes_nms(self, cls_scores, bboxes, score_factor, cfg): - max_scores, labels = torch.max(cls_scores, 1) - valid_mask = score_factor * max_scores >= cfg.score_thr - - bboxes = bboxes[valid_mask] - scores = max_scores[valid_mask] * score_factor[valid_mask] - labels = labels[valid_mask] - - if labels.numel() == 0: - return bboxes, labels - else: - dets, keep = batched_nms(bboxes, scores, labels, cfg.nms) - return dets, labels[keep] - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) - def loss(self, - cls_scores, - bbox_preds, - objectnesses, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - Args: - cls_scores (list[Tensor]): Box scores for each scale level, - each is a 4D-tensor, the channel number is - num_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level, each is a 4D-tensor, the channel number is - num_priors * 4. - objectnesses (list[Tensor], Optional): Score factor for - all scale level, each is a 4D-tensor, has shape - (batch_size, 1, H, W). - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - """ - num_imgs = len(img_metas) - featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] - mlvl_priors = self.prior_generator.grid_priors( - featmap_sizes, - dtype=cls_scores[0].dtype, - device=cls_scores[0].device, - with_stride=True) - - flatten_cls_preds = [ - cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, - self.cls_out_channels) - for cls_pred in cls_scores - ] - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) - for bbox_pred in bbox_preds - ] - flatten_objectness = [ - objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) - for objectness in objectnesses - ] - - flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) - flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) - flatten_objectness = torch.cat(flatten_objectness, dim=1) - flatten_priors = torch.cat(mlvl_priors) - flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) - - (pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets, - num_fg_imgs) = multi_apply( - self._get_target_single, flatten_cls_preds.detach(), - flatten_objectness.detach(), - flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1), - flatten_bboxes.detach(), gt_bboxes, gt_labels) - - # The experimental results show that ‘reduce_mean’ can improve - # performance on the COCO dataset. - num_pos = torch.tensor( - sum(num_fg_imgs), - dtype=torch.float, - device=flatten_cls_preds.device) - num_total_samples = max(reduce_mean(num_pos), 1.0) - - pos_masks = torch.cat(pos_masks, 0) - cls_targets = torch.cat(cls_targets, 0) - obj_targets = torch.cat(obj_targets, 0) - bbox_targets = torch.cat(bbox_targets, 0) - if self.use_l1: - l1_targets = torch.cat(l1_targets, 0) - - loss_bbox = self.loss_bbox( - flatten_bboxes.view(-1, 4)[pos_masks], - bbox_targets) / num_total_samples - loss_obj = self.loss_obj(flatten_objectness.view(-1, 1), - obj_targets) / num_total_samples - loss_cls = self.loss_cls( - flatten_cls_preds.view(-1, self.num_classes)[pos_masks], - cls_targets) / num_total_samples - - loss_dict = dict( - loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj) - - if self.use_l1: - loss_l1 = self.loss_l1( - flatten_bbox_preds.view(-1, 4)[pos_masks], - l1_targets) / num_total_samples - loss_dict.update(loss_l1=loss_l1) - - return loss_dict - - @torch.no_grad() - def _get_target_single(self, cls_preds, objectness, priors, decoded_bboxes, - gt_bboxes, gt_labels): - """Compute classification, regression, and objectness targets for - priors in a single image. - Args: - cls_preds (Tensor): Classification predictions of one image, - a 2D-Tensor with shape [num_priors, num_classes] - objectness (Tensor): Objectness predictions of one image, - a 1D-Tensor with shape [num_priors] - priors (Tensor): All priors of one image, a 2D-Tensor with shape - [num_priors, 4] in [cx, xy, stride_w, stride_y] format. - decoded_bboxes (Tensor): Decoded bboxes predictions of one image, - a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, - br_x, br_y] format. - gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor - with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. - gt_labels (Tensor): Ground truth labels of one image, a Tensor - with shape [num_gts]. - """ - - num_priors = priors.size(0) - num_gts = gt_labels.size(0) - gt_bboxes = gt_bboxes.to(decoded_bboxes.dtype) - # No target - if num_gts == 0: - cls_target = cls_preds.new_zeros((0, self.num_classes)) - bbox_target = cls_preds.new_zeros((0, 4)) - l1_target = cls_preds.new_zeros((0, 4)) - obj_target = cls_preds.new_zeros((num_priors, 1)) - foreground_mask = cls_preds.new_zeros(num_priors).bool() - return (foreground_mask, cls_target, obj_target, bbox_target, - l1_target, 0) - - # YOLOX uses center priors with 0.5 offset to assign targets, - # but use center priors without offset to regress bboxes. - offset_priors = torch.cat( - [priors[:, :2] + priors[:, 2:] * 0.5, priors[:, 2:]], dim=-1) - - assign_result = self.assigner.assign( - cls_preds.sigmoid() * objectness.unsqueeze(1).sigmoid(), - offset_priors, decoded_bboxes, gt_bboxes, gt_labels) - - sampling_result = self.sampler.sample(assign_result, priors, gt_bboxes) - pos_inds = sampling_result.pos_inds - num_pos_per_img = pos_inds.size(0) - - pos_ious = assign_result.max_overlaps[pos_inds] - # IOU aware classification score - cls_target = F.one_hot(sampling_result.pos_gt_labels, - self.num_classes) * pos_ious.unsqueeze(-1) - obj_target = torch.zeros_like(objectness).unsqueeze(-1) - obj_target[pos_inds] = 1 - bbox_target = sampling_result.pos_gt_bboxes - l1_target = cls_preds.new_zeros((num_pos_per_img, 4)) - if self.use_l1: - l1_target = self._get_l1_target(l1_target, bbox_target, - priors[pos_inds]) - foreground_mask = torch.zeros_like(objectness).to(torch.bool) - foreground_mask[pos_inds] = 1 - return (foreground_mask, cls_target, obj_target, bbox_target, - l1_target, num_pos_per_img) - - def _get_l1_target(self, l1_target, gt_bboxes, priors, eps=1e-8): - """Convert gt bboxes to center offset and log width height.""" - gt_cxcywh = bbox_xyxy_to_cxcywh(gt_bboxes) - l1_target[:, :2] = (gt_cxcywh[:, :2] - priors[:, :2]) / priors[:, 2:] - l1_target[:, 2:] = torch.log(gt_cxcywh[:, 2:] / priors[:, 2:] + eps) - return l1_target diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/bbox_heads/sabl_head.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/bbox_heads/sabl_head.py deleted file mode 100644 index 0ce986b9a29ed2264e48ac4df89b407dfc66eeca..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/bbox_heads/sabl_head.py +++ /dev/null @@ -1,596 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule, force_fp32 - -from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms -from mmdet.models.builder import HEADS, build_loss -from mmdet.models.losses import accuracy - - -@HEADS.register_module() -class SABLHead(BaseModule): - """Side-Aware Boundary Localization (SABL) for RoI-Head. - - Side-Aware features are extracted by conv layers - with an attention mechanism. - Boundary Localization with Bucketing and Bucketing Guided Rescoring - are implemented in BucketingBBoxCoder. - - Please refer to https://arxiv.org/abs/1912.04260 for more details. - - Args: - cls_in_channels (int): Input channels of cls RoI feature. \ - Defaults to 256. - reg_in_channels (int): Input channels of reg RoI feature. \ - Defaults to 256. - roi_feat_size (int): Size of RoI features. Defaults to 7. - reg_feat_up_ratio (int): Upsample ratio of reg features. \ - Defaults to 2. - reg_pre_kernel (int): Kernel of 2D conv layers before \ - attention pooling. Defaults to 3. - reg_post_kernel (int): Kernel of 1D conv layers after \ - attention pooling. Defaults to 3. - reg_pre_num (int): Number of pre convs. Defaults to 2. - reg_post_num (int): Number of post convs. Defaults to 1. - num_classes (int): Number of classes in dataset. Defaults to 80. - cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024. - reg_offset_out_channels (int): Hidden and output channel \ - of reg offset branch. Defaults to 256. - reg_cls_out_channels (int): Hidden and output channel \ - of reg cls branch. Defaults to 256. - num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1. - num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0. - reg_class_agnostic (bool): Class agnostic regression or not. \ - Defaults to True. - norm_cfg (dict): Config of norm layers. Defaults to None. - bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'. - loss_cls (dict): Config of classification loss. - loss_bbox_cls (dict): Config of classification loss for bbox branch. - loss_bbox_reg (dict): Config of regression loss for bbox branch. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - num_classes, - cls_in_channels=256, - reg_in_channels=256, - roi_feat_size=7, - reg_feat_up_ratio=2, - reg_pre_kernel=3, - reg_post_kernel=3, - reg_pre_num=2, - reg_post_num=1, - cls_out_channels=1024, - reg_offset_out_channels=256, - reg_cls_out_channels=256, - num_cls_fcs=1, - num_reg_fcs=0, - reg_class_agnostic=True, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', - num_buckets=14, - scale_factor=1.7), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_bbox_reg=dict( - type='SmoothL1Loss', beta=0.1, loss_weight=1.0), - init_cfg=None): - super(SABLHead, self).__init__(init_cfg) - self.cls_in_channels = cls_in_channels - self.reg_in_channels = reg_in_channels - self.roi_feat_size = roi_feat_size - self.reg_feat_up_ratio = int(reg_feat_up_ratio) - self.num_buckets = bbox_coder['num_buckets'] - assert self.reg_feat_up_ratio // 2 >= 1 - self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio - assert self.up_reg_feat_size == bbox_coder['num_buckets'] - self.reg_pre_kernel = reg_pre_kernel - self.reg_post_kernel = reg_post_kernel - self.reg_pre_num = reg_pre_num - self.reg_post_num = reg_post_num - self.num_classes = num_classes - self.cls_out_channels = cls_out_channels - self.reg_offset_out_channels = reg_offset_out_channels - self.reg_cls_out_channels = reg_cls_out_channels - self.num_cls_fcs = num_cls_fcs - self.num_reg_fcs = num_reg_fcs - self.reg_class_agnostic = reg_class_agnostic - assert self.reg_class_agnostic - self.norm_cfg = norm_cfg - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.loss_cls = build_loss(loss_cls) - self.loss_bbox_cls = build_loss(loss_bbox_cls) - self.loss_bbox_reg = build_loss(loss_bbox_reg) - - self.cls_fcs = self._add_fc_branch(self.num_cls_fcs, - self.cls_in_channels, - self.roi_feat_size, - self.cls_out_channels) - - self.side_num = int(np.ceil(self.num_buckets / 2)) - - if self.reg_feat_up_ratio > 1: - self.upsample_x = nn.ConvTranspose1d( - reg_in_channels, - reg_in_channels, - self.reg_feat_up_ratio, - stride=self.reg_feat_up_ratio) - self.upsample_y = nn.ConvTranspose1d( - reg_in_channels, - reg_in_channels, - self.reg_feat_up_ratio, - stride=self.reg_feat_up_ratio) - - self.reg_pre_convs = nn.ModuleList() - for i in range(self.reg_pre_num): - reg_pre_conv = ConvModule( - reg_in_channels, - reg_in_channels, - kernel_size=reg_pre_kernel, - padding=reg_pre_kernel // 2, - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU')) - self.reg_pre_convs.append(reg_pre_conv) - - self.reg_post_conv_xs = nn.ModuleList() - for i in range(self.reg_post_num): - reg_post_conv_x = ConvModule( - reg_in_channels, - reg_in_channels, - kernel_size=(1, reg_post_kernel), - padding=(0, reg_post_kernel // 2), - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU')) - self.reg_post_conv_xs.append(reg_post_conv_x) - self.reg_post_conv_ys = nn.ModuleList() - for i in range(self.reg_post_num): - reg_post_conv_y = ConvModule( - reg_in_channels, - reg_in_channels, - kernel_size=(reg_post_kernel, 1), - padding=(reg_post_kernel // 2, 0), - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU')) - self.reg_post_conv_ys.append(reg_post_conv_y) - - self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1) - self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1) - - self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1) - self.relu = nn.ReLU(inplace=True) - - self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs, - self.reg_in_channels, 1, - self.reg_cls_out_channels) - self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs, - self.reg_in_channels, 1, - self.reg_offset_out_channels) - self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1) - self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1) - - if init_cfg is None: - self.init_cfg = [ - dict( - type='Xavier', - layer='Linear', - distribution='uniform', - override=[ - dict(type='Normal', name='reg_conv_att_x', std=0.01), - dict(type='Normal', name='reg_conv_att_y', std=0.01), - dict(type='Normal', name='fc_reg_cls', std=0.01), - dict(type='Normal', name='fc_cls', std=0.01), - dict(type='Normal', name='fc_reg_offset', std=0.001) - ]) - ] - if self.reg_feat_up_ratio > 1: - self.init_cfg += [ - dict( - type='Kaiming', - distribution='normal', - override=[ - dict(name='upsample_x'), - dict(name='upsample_y') - ]) - ] - - @property - def custom_cls_channels(self): - return getattr(self.loss_cls, 'custom_cls_channels', False) - - @property - def custom_activation(self): - return getattr(self.loss_cls, 'custom_activation', False) - - @property - def custom_accuracy(self): - return getattr(self.loss_cls, 'custom_accuracy', False) - - def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size, - fc_out_channels): - in_channels = in_channels * roi_feat_size * roi_feat_size - branch_fcs = nn.ModuleList() - for i in range(num_branch_fcs): - fc_in_channels = (in_channels if i == 0 else fc_out_channels) - branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels)) - return branch_fcs - - def cls_forward(self, cls_x): - cls_x = cls_x.view(cls_x.size(0), -1) - for fc in self.cls_fcs: - cls_x = self.relu(fc(cls_x)) - cls_score = self.fc_cls(cls_x) - return cls_score - - def attention_pool(self, reg_x): - """Extract direction-specific features fx and fy with attention - methanism.""" - reg_fx = reg_x - reg_fy = reg_x - reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid() - reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid() - reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2) - reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3) - reg_fx = (reg_fx * reg_fx_att).sum(dim=2) - reg_fy = (reg_fy * reg_fy_att).sum(dim=3) - return reg_fx, reg_fy - - def side_aware_feature_extractor(self, reg_x): - """Refine and extract side-aware features without split them.""" - for reg_pre_conv in self.reg_pre_convs: - reg_x = reg_pre_conv(reg_x) - reg_fx, reg_fy = self.attention_pool(reg_x) - - if self.reg_post_num > 0: - reg_fx = reg_fx.unsqueeze(2) - reg_fy = reg_fy.unsqueeze(3) - for i in range(self.reg_post_num): - reg_fx = self.reg_post_conv_xs[i](reg_fx) - reg_fy = self.reg_post_conv_ys[i](reg_fy) - reg_fx = reg_fx.squeeze(2) - reg_fy = reg_fy.squeeze(3) - if self.reg_feat_up_ratio > 1: - reg_fx = self.relu(self.upsample_x(reg_fx)) - reg_fy = self.relu(self.upsample_y(reg_fy)) - reg_fx = torch.transpose(reg_fx, 1, 2) - reg_fy = torch.transpose(reg_fy, 1, 2) - return reg_fx.contiguous(), reg_fy.contiguous() - - def reg_pred(self, x, offset_fcs, cls_fcs): - """Predict bucketing estimation (cls_pred) and fine regression (offset - pred) with side-aware features.""" - x_offset = x.view(-1, self.reg_in_channels) - x_cls = x.view(-1, self.reg_in_channels) - - for fc in offset_fcs: - x_offset = self.relu(fc(x_offset)) - for fc in cls_fcs: - x_cls = self.relu(fc(x_cls)) - offset_pred = self.fc_reg_offset(x_offset) - cls_pred = self.fc_reg_cls(x_cls) - - offset_pred = offset_pred.view(x.size(0), -1) - cls_pred = cls_pred.view(x.size(0), -1) - - return offset_pred, cls_pred - - def side_aware_split(self, feat): - """Split side-aware features aligned with orders of bucketing - targets.""" - l_end = int(np.ceil(self.up_reg_feat_size / 2)) - r_start = int(np.floor(self.up_reg_feat_size / 2)) - feat_fl = feat[:, :l_end] - feat_fr = feat[:, r_start:].flip(dims=(1, )) - feat_fl = feat_fl.contiguous() - feat_fr = feat_fr.contiguous() - feat = torch.cat([feat_fl, feat_fr], dim=-1) - return feat - - def bbox_pred_split(self, bbox_pred, num_proposals_per_img): - """Split batch bbox prediction back to each image.""" - bucket_cls_preds, bucket_offset_preds = bbox_pred - bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0) - bucket_offset_preds = bucket_offset_preds.split( - num_proposals_per_img, 0) - bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds)) - return bbox_pred - - def reg_forward(self, reg_x): - outs = self.side_aware_feature_extractor(reg_x) - edge_offset_preds = [] - edge_cls_preds = [] - reg_fx = outs[0] - reg_fy = outs[1] - offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs, - self.reg_cls_fcs) - offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs, - self.reg_cls_fcs) - offset_pred_x = self.side_aware_split(offset_pred_x) - offset_pred_y = self.side_aware_split(offset_pred_y) - cls_pred_x = self.side_aware_split(cls_pred_x) - cls_pred_y = self.side_aware_split(cls_pred_y) - edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1) - edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1) - - return (edge_cls_preds, edge_offset_preds) - - def forward(self, x): - - bbox_pred = self.reg_forward(x) - cls_score = self.cls_forward(x) - - return cls_score, bbox_pred - - def get_targets(self, sampling_results, gt_bboxes, gt_labels, - rcnn_train_cfg): - pos_proposals = [res.pos_bboxes for res in sampling_results] - neg_proposals = [res.neg_bboxes for res in sampling_results] - pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] - pos_gt_labels = [res.pos_gt_labels for res in sampling_results] - cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals, - pos_gt_bboxes, pos_gt_labels, - rcnn_train_cfg) - (labels, label_weights, bucket_cls_targets, bucket_cls_weights, - bucket_offset_targets, bucket_offset_weights) = cls_reg_targets - return (labels, label_weights, (bucket_cls_targets, - bucket_offset_targets), - (bucket_cls_weights, bucket_offset_weights)) - - def bucket_target(self, - pos_proposals_list, - neg_proposals_list, - pos_gt_bboxes_list, - pos_gt_labels_list, - rcnn_train_cfg, - concat=True): - (labels, label_weights, bucket_cls_targets, bucket_cls_weights, - bucket_offset_targets, bucket_offset_weights) = multi_apply( - self._bucket_target_single, - pos_proposals_list, - neg_proposals_list, - pos_gt_bboxes_list, - pos_gt_labels_list, - cfg=rcnn_train_cfg) - - if concat: - labels = torch.cat(labels, 0) - label_weights = torch.cat(label_weights, 0) - bucket_cls_targets = torch.cat(bucket_cls_targets, 0) - bucket_cls_weights = torch.cat(bucket_cls_weights, 0) - bucket_offset_targets = torch.cat(bucket_offset_targets, 0) - bucket_offset_weights = torch.cat(bucket_offset_weights, 0) - return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, - bucket_offset_targets, bucket_offset_weights) - - def _bucket_target_single(self, pos_proposals, neg_proposals, - pos_gt_bboxes, pos_gt_labels, cfg): - """Compute bucketing estimation targets and fine regression targets for - a single image. - - Args: - pos_proposals (Tensor): positive proposals of a single image, - Shape (n_pos, 4) - neg_proposals (Tensor): negative proposals of a single image, - Shape (n_neg, 4). - pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals - of a single image, Shape (n_pos, 4). - pos_gt_labels (Tensor): gt labels assigned to positive proposals - of a single image, Shape (n_pos, ). - cfg (dict): Config of calculating targets - - Returns: - tuple: - - - labels (Tensor): Labels in a single image. \ - Shape (n,). - - label_weights (Tensor): Label weights in a single image.\ - Shape (n,) - - bucket_cls_targets (Tensor): Bucket cls targets in \ - a single image. Shape (n, num_buckets*2). - - bucket_cls_weights (Tensor): Bucket cls weights in \ - a single image. Shape (n, num_buckets*2). - - bucket_offset_targets (Tensor): Bucket offset targets \ - in a single image. Shape (n, num_buckets*2). - - bucket_offset_targets (Tensor): Bucket offset weights \ - in a single image. Shape (n, num_buckets*2). - """ - num_pos = pos_proposals.size(0) - num_neg = neg_proposals.size(0) - num_samples = num_pos + num_neg - labels = pos_gt_bboxes.new_full((num_samples, ), - self.num_classes, - dtype=torch.long) - label_weights = pos_proposals.new_zeros(num_samples) - bucket_cls_targets = pos_proposals.new_zeros(num_samples, - 4 * self.side_num) - bucket_cls_weights = pos_proposals.new_zeros(num_samples, - 4 * self.side_num) - bucket_offset_targets = pos_proposals.new_zeros( - num_samples, 4 * self.side_num) - bucket_offset_weights = pos_proposals.new_zeros( - num_samples, 4 * self.side_num) - if num_pos > 0: - labels[:num_pos] = pos_gt_labels - label_weights[:num_pos] = 1.0 - (pos_bucket_offset_targets, pos_bucket_offset_weights, - pos_bucket_cls_targets, - pos_bucket_cls_weights) = self.bbox_coder.encode( - pos_proposals, pos_gt_bboxes) - bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets - bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights - bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets - bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights - if num_neg > 0: - label_weights[-num_neg:] = 1.0 - return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, - bucket_offset_targets, bucket_offset_weights) - - def loss(self, - cls_score, - bbox_pred, - rois, - labels, - label_weights, - bbox_targets, - bbox_weights, - reduction_override=None): - losses = dict() - if cls_score is not None: - avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) - losses['loss_cls'] = self.loss_cls( - cls_score, - labels, - label_weights, - avg_factor=avg_factor, - reduction_override=reduction_override) - losses['acc'] = accuracy(cls_score, labels) - - if bbox_pred is not None: - bucket_cls_preds, bucket_offset_preds = bbox_pred - bucket_cls_targets, bucket_offset_targets = bbox_targets - bucket_cls_weights, bucket_offset_weights = bbox_weights - # edge cls - bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num) - bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num) - bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num) - losses['loss_bbox_cls'] = self.loss_bbox_cls( - bucket_cls_preds, - bucket_cls_targets, - bucket_cls_weights, - avg_factor=bucket_cls_targets.size(0), - reduction_override=reduction_override) - - losses['loss_bbox_reg'] = self.loss_bbox_reg( - bucket_offset_preds, - bucket_offset_targets, - bucket_offset_weights, - avg_factor=bucket_offset_targets.size(0), - reduction_override=reduction_override) - - return losses - - @force_fp32(apply_to=('cls_score', 'bbox_pred')) - def get_bboxes(self, - rois, - cls_score, - bbox_pred, - img_shape, - scale_factor, - rescale=False, - cfg=None): - if isinstance(cls_score, list): - cls_score = sum(cls_score) / float(len(cls_score)) - scores = F.softmax(cls_score, dim=1) if cls_score is not None else None - - if bbox_pred is not None: - bboxes, confidences = self.bbox_coder.decode( - rois[:, 1:], bbox_pred, img_shape) - else: - bboxes = rois[:, 1:].clone() - confidences = None - if img_shape is not None: - bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1) - bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1) - - if rescale and bboxes.size(0) > 0: - if isinstance(scale_factor, float): - bboxes /= scale_factor - else: - bboxes /= torch.from_numpy(scale_factor).to(bboxes.device) - - if cfg is None: - return bboxes, scores - else: - det_bboxes, det_labels = multiclass_nms( - bboxes, - scores, - cfg.score_thr, - cfg.nms, - cfg.max_per_img, - score_factors=confidences) - - return det_bboxes, det_labels - - @force_fp32(apply_to=('bbox_preds', )) - def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): - """Refine bboxes during training. - - Args: - rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, - and bs is the sampled RoIs per image. - labels (Tensor): Shape (n*bs, ). - bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \ - (n*bs, num_buckets*2)]. - pos_is_gts (list[Tensor]): Flags indicating if each positive bbox - is a gt bbox. - img_metas (list[dict]): Meta info of each image. - - Returns: - list[Tensor]: Refined bboxes of each image in a mini-batch. - """ - img_ids = rois[:, 0].long().unique(sorted=True) - assert img_ids.numel() == len(img_metas) - - bboxes_list = [] - for i in range(len(img_metas)): - inds = torch.nonzero( - rois[:, 0] == i, as_tuple=False).squeeze(dim=1) - num_rois = inds.numel() - - bboxes_ = rois[inds, 1:] - label_ = labels[inds] - edge_cls_preds, edge_offset_preds = bbox_preds - edge_cls_preds_ = edge_cls_preds[inds] - edge_offset_preds_ = edge_offset_preds[inds] - bbox_pred_ = [edge_cls_preds_, edge_offset_preds_] - img_meta_ = img_metas[i] - pos_is_gts_ = pos_is_gts[i] - - bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, - img_meta_) - # filter gt bboxes - pos_keep = 1 - pos_is_gts_ - keep_inds = pos_is_gts_.new_ones(num_rois) - keep_inds[:len(pos_is_gts_)] = pos_keep - - bboxes_list.append(bboxes[keep_inds.type(torch.bool)]) - - return bboxes_list - - @force_fp32(apply_to=('bbox_pred', )) - def regress_by_class(self, rois, label, bbox_pred, img_meta): - """Regress the bbox for the predicted class. Used in Cascade R-CNN. - - Args: - rois (Tensor): shape (n, 4) or (n, 5) - label (Tensor): shape (n, ) - bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \ - (n, num_buckets *2)] - img_meta (dict): Image meta info. - - Returns: - Tensor: Regressed bboxes, the same shape as input rois. - """ - assert rois.size(1) == 4 or rois.size(1) == 5 - - if rois.size(1) == 4: - new_rois, _ = self.bbox_coder.decode(rois, bbox_pred, - img_meta['img_shape']) - else: - bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred, - img_meta['img_shape']) - new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) - - return new_rois diff --git a/spaces/rohan13/coursera-qa-bot/utils_old.py b/spaces/rohan13/coursera-qa-bot/utils_old.py deleted file mode 100644 index 68a16dfbc2ed6762b8645fe94abde768290b7b53..0000000000000000000000000000000000000000 --- a/spaces/rohan13/coursera-qa-bot/utils_old.py +++ /dev/null @@ -1,271 +0,0 @@ -import os -import pickle -import re -from typing import List, Union - -import faiss -from langchain import OpenAI, LLMChain -from langchain.agents import ConversationalAgent -from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser -from langchain.chains import ConversationalRetrievalChain -from langchain.document_loaders import DirectoryLoader, TextLoader, UnstructuredHTMLLoader -from langchain.embeddings import OpenAIEmbeddings -from langchain.memory import ConversationBufferWindowMemory -from langchain.prompts import BaseChatPromptTemplate -from langchain.schema import AgentAction, AgentFinish, HumanMessage -from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores.faiss import FAISS - -os.environ['OPENAI_API_KEY'] = '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' - -pickle_file = "open_ai.pkl" -index_file = "open_ai.index" - -gpt_3_5 = OpenAI(model_name='gpt-4',temperature=0) - -embeddings = OpenAIEmbeddings() - -chat_history = [] - -memory = ConversationBufferWindowMemory(memory_key="chat_history") - -gpt_3_5_index = None - -class CustomOutputParser(AgentOutputParser): - - def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]: - # Check if agent replied without using tools - if "AI:" in llm_output: - return AgentFinish(return_values={"output": llm_output.split("AI:")[-1].strip()}, - log=llm_output) - # Check if agent should finish - if "Final Answer:" in llm_output: - return AgentFinish( - # Return values is generally always a dictionary with a single `output` key - # It is not recommended to try anything else at the moment :) - return_values={"output": llm_output.split("Final Answer:")[-1].strip()}, - log=llm_output, - ) - # Parse out the action and action input - regex = r"Action: (.*?)[\n]*Action Input:[\s]*(.*)" - match = re.search(regex, llm_output, re.DOTALL) - if not match: - raise ValueError(f"Could not parse LLM output: `{llm_output}`") - action = match.group(1).strip() - action_input = match.group(2) - # Return the action and action input - return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output) - -# Set up a prompt template -class CustomPromptTemplate(BaseChatPromptTemplate): - # The template to use - template: str - # The list of tools available - tools: List[Tool] - - def format_messages(self, **kwargs) -> str: - # Get the intermediate steps (AgentAction, Observation tuples) - # Format them in a particular way - intermediate_steps = kwargs.pop("intermediate_steps") - thoughts = "" - for action, observation in intermediate_steps: - thoughts += action.log - thoughts += f"\nObservation: {observation}\nThought: " - # Set the agent_scratchpad variable to that value - kwargs["agent_scratchpad"] = thoughts - # Create a tools variable from the list of tools provided - kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools]) - # Create a list of tool names for the tools provided - kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools]) - formatted = self.template.format(**kwargs) - return [HumanMessage(content=formatted)] - -def get_search_index(): - global gpt_3_5_index - if os.path.isfile(pickle_file) and os.path.isfile(index_file) and os.path.getsize(pickle_file) > 0: - # Load index from pickle file - with open(pickle_file, "rb") as f: - search_index = pickle.load(f) - else: - search_index = create_index() - - gpt_3_5_index = search_index - - -def create_index(): - source_chunks = create_chunk_documents() - search_index = search_index_from_docs(source_chunks) - faiss.write_index(search_index.index, index_file) - # Save index to pickle file - with open(pickle_file, "wb") as f: - pickle.dump(search_index, f) - return search_index - - -def search_index_from_docs(source_chunks): - # print("source chunks: " + str(len(source_chunks))) - # print("embeddings: " + str(embeddings)) - search_index = FAISS.from_documents(source_chunks, embeddings) - return search_index - - -def get_html_files(): - loader = DirectoryLoader('docs', glob="**/*.html", loader_cls=UnstructuredHTMLLoader, recursive=True) - document_list = loader.load() - return document_list - - -def fetch_data_for_embeddings(): - document_list = get_text_files() - document_list.extend(get_html_files()) - print("document list" + str(len(document_list))) - return document_list - - -def get_text_files(): - loader = DirectoryLoader('docs', glob="**/*.txt", loader_cls=TextLoader, recursive=True) - document_list = loader.load() - return document_list - - -def create_chunk_documents(): - sources = fetch_data_for_embeddings() - - splitter = CharacterTextSplitter(separator=" ", chunk_size=800, chunk_overlap=0) - - source_chunks = splitter.split_documents(sources) - - print("sources" + str(len(source_chunks))) - - return source_chunks - - -def get_qa_chain(gpt_3_5_index): - global gpt_3_5 - return ConversationalRetrievalChain.from_llm(gpt_3_5, chain_type="stuff", get_chat_history=get_chat_history, - retriever=gpt_3_5_index.as_retriever(), return_source_documents=True, verbose=True) - -def get_chat_history(inputs) -> str: - res = [] - for human, ai in inputs: - res.append(f"Human:{human}\nAI:{ai}") - return "\n".join(res) - - -def generate_answer(question) -> str: - global chat_history, gpt_3_5_index - gpt_3_5_chain = get_qa_chain(gpt_3_5_index) - result = gpt_3_5_chain( - {"question": question, "chat_history": chat_history, "vectordbkwargs": {"search_distance": 0.6}}) - chat_history = [(question, result["answer"])] - sources = [] - - for document in result['source_documents']: - source = document.metadata['source'] - sources.append(source.split('/')[-1].split('.')[0]) - - source = ',\n'.join(set(sources)) - return result['answer'] + '\nSOURCES: ' + source - - -def get_agent_chain(prompt, tools): - global gpt_3_5 - llm_chain = LLMChain(llm=gpt_3_5, prompt=prompt) - agent = ConversationalAgent(llm_chain=llm_chain, tools=tools, verbose=True) - agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory, - intermediate_steps=True) - return agent_chain - - -def get_prompt_and_tools(): - tools = get_tools() - - prefix = """Have a conversation with a human, answering the following questions as best you can. Always try to use Vectorstore first. Your name is Coursera Bot because your knowledge base is Coursera course. You have access to the following tools:""" - suffix = """Begin! If you used vectorstore tool, ALWAYS return a "SOURCES" part in your answer" - - {chat_history} - Question: {input} - {agent_scratchpad} - sources:""" - prompt = ConversationalAgent.create_prompt( - tools, - prefix=prefix, - suffix=suffix, - input_variables=["input", "chat_history", "agent_scratchpad"] - ) - return prompt, tools - - -def get_tools(): - tools = [ - Tool( - name="Vectorstore", - func=generate_answer, - description="useful for when you need to answer questions about the coursera course on 3D Printing.", - return_direct=True - )] - return tools - -def get_custom_agent(prompt, tools): - - llm_chain = LLMChain(llm=gpt_3_5, prompt=prompt) - - output_parser = CustomOutputParser() - tool_names = [tool.name for tool in tools] - agent = LLMSingleActionAgent( - llm_chain=llm_chain, - output_parser=output_parser, - stop=["\nObservation:"], - allowed_tools=tool_names - ) - agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory, - intermediate_steps=True) - return agent_executor - -def get_prompt_and_tools_for_custom_agent(): - template = """ - Have a conversation with a human, answering the following questions as best you can. - ALWAYS try to use Vectorstore first. - You are a teaching assistant for a Coursera Course: The 3D Printing Evolution and can answer any question about that using vectorstore . You have access to the following tools: - - {tools} - - ALWAYS use one of the 2 formats listed below to respond. - - To answer for the new input, use the following format: - - New Input: the input question you must answer - Thought: Do I need to use a tool? Yes - Action: the action to take, should be one of [{tool_names}] - Action Input: the input to the action - Observation: the result of the action - ... (this Thought/Action/Action Input/Observation can repeat N times) - Thought: I now know the final answer - Final Answer: the final answer to the original input question. SOURCES: the sources referred to find the final answer - - - When you have a response to say to the Human and DO NOT need to use a tool: - 1. DO NOT return "SOURCES" if you did not use any tool. - 2. You MUST use this format: - ``` - Thought: Do I need to use a tool? No - AI: [your response here] - ``` - - Begin! Remember to speak as a personal assistant when giving your final answer. - ALWAYS return a "SOURCES" part in your answer, if you used any tool. - - Previous conversation history: - {chat_history} - New input: {input} - {agent_scratchpad} - SOURCES:""" - tools = get_tools() - prompt = CustomPromptTemplate( - template=template, - tools=tools, - # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically - # This includes the `intermediate_steps` variable because that is needed - input_variables=["input", "intermediate_steps", "chat_history"] - ) - return prompt, tools diff --git a/spaces/rorallitri/biomedical-language-models/SERIAL-NUMBER-MYSTIC-THUMBS.md b/spaces/rorallitri/biomedical-language-models/SERIAL-NUMBER-MYSTIC-THUMBS.md deleted file mode 100644 index b16ce65289a0363c22d01a84b02f56d956801479..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/SERIAL-NUMBER-MYSTIC-THUMBS.md +++ /dev/null @@ -1,84 +0,0 @@ -## SERIAL NUMBER MYSTIC THUMBS - - - - - - - - - -**SERIAL NUMBER MYSTIC THUMBS [https://vittuv.com/2txUbD](https://vittuv.com/2txUbD)** - - - - - - - - - - - - Here is a possible title and article with SEO optimization and HTML formatting for the keyword "SERIAL NUMBER MYSTIC THUMBS": - -# How to Find the Serial Number of Your Mystic Thumbs Software - - - -Mystic Thumbs is a powerful and versatile software that allows you to generate thumbnails for various file formats, customize them with different options and tools, and preview them in Windows Explorer. If you have purchased a license for Mystic Thumbs, you will need to enter your serial number to activate the software and enjoy its full features. - - - -But where can you find your serial number? Here are some tips to help you locate it: - - - -- Check your email inbox. When you buy Mystic Thumbs online, you will receive an email confirmation with your order details and your serial number. Make sure to check your spam or junk folder if you don't see it in your inbox. - -- Check your online account. If you have created an account on the MysticCoder.net website, you can log in and access your order history and serial number. Simply go to [https://www.mysticcoder.net/account/](https://www.mysticcoder.net/account/) and enter your email and password. - -- Check your software installation. If you have already installed Mystic Thumbs on your computer, you can find your serial number in the software itself. Open Mystic Thumbs from the Start menu or the system tray icon, and click on the "About" button. You will see your serial number displayed on the bottom right corner of the window. - - - -If you still can't find your serial number, or if you have lost it, you can contact the MysticCoder.net support team at [support@mysticcoder.net](mailto:support@mysticcoder.net). They will help you retrieve your serial number as soon as possible. - - - -We hope this article has helped you find your serial number for Mystic Thumbs. If you have any questions or feedback about the software, feel free to leave a comment below or visit the [MysticCoder.net forum](https://www.mysticcoder.net/forum/). Thank you for choosing Mystic Thumbs! - -Here is a possible continuation of the article: - -## How to Use Mystic Thumbs to Create and Customize Thumbnails - - - -Now that you have activated your Mystic Thumbs software with your serial number, you can start using it to create and customize thumbnails for various file formats. Here are some basic steps to get you started: - - - -1. Open Windows Explorer and navigate to the folder where you have your files. You will see the default thumbnails generated by Windows for each file. - -2. Right-click on any file and select "Mystic Thumbs" from the context menu. This will open the Mystic Thumbs control panel, where you can see a preview of the thumbnail and various options and tools to modify it. - -3. Use the sliders and buttons to adjust the size, transparency, gamma, border, icon, and other settings of the thumbnail. You can also use the presets menu to apply a predefined style to the thumbnail. - -4. Click on the "Apply" button to save your changes and update the thumbnail in Windows Explorer. You can also click on the "Reset" button to restore the original thumbnail. - -5. Repeat these steps for any other files that you want to create or customize thumbnails for. You can also select multiple files and apply the same settings to all of them at once. - - - -Mystic Thumbs supports a wide range of file formats, including images, videos, documents, archives, fonts, and more. You can also use Mystic Thumbs to generate thumbnails for files that Windows does not support by default, such as PSD, SVG, RAW, PDF, and more. - - - -Mystic Thumbs is a powerful and versatile software that allows you to generate thumbnails for various file formats, customize them with different options and tools, and preview them in Windows Explorer. If you have purchased a license for Mystic Thumbs, you will need to enter your serial number to activate the software and enjoy its full features. - - dfd1c89656 - - - - - diff --git a/spaces/rorallitri/biomedical-language-models/logs/!!LINK!! Downloadchotabheemmasterofshaolindvdriptorrent.md b/spaces/rorallitri/biomedical-language-models/logs/!!LINK!! Downloadchotabheemmasterofshaolindvdriptorrent.md deleted file mode 100644 index 23abdd487ca038a599b7ce11e2b8b9027ac5fe29..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/!!LINK!! Downloadchotabheemmasterofshaolindvdriptorrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

      downloadchotabheemmasterofshaolindvdriptorrent


      Download File ····· https://tinurll.com/2uzlHg



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/rorallitri/biomedical-language-models/logs/Daz3d Vue 3D Ocean HDRi Pack 1 Moon Learn from the Experts How to Use HDRI for Realistic Ocean Renders.md b/spaces/rorallitri/biomedical-language-models/logs/Daz3d Vue 3D Ocean HDRi Pack 1 Moon Learn from the Experts How to Use HDRI for Realistic Ocean Renders.md deleted file mode 100644 index 8a0f6f899a3129a27f2885fca8fbafa2411b5bf1..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Daz3d Vue 3D Ocean HDRi Pack 1 Moon Learn from the Experts How to Use HDRI for Realistic Ocean Renders.md +++ /dev/null @@ -1,6 +0,0 @@ -

      accurender nxt keygen for mac


      DOWNLOAD - https://tinurll.com/2uzlpv



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/rorallitri/biomedical-language-models/logs/How to Get the Mp3 Songs of Malayalam Movie Athadu for Free A Simple Guide.md b/spaces/rorallitri/biomedical-language-models/logs/How to Get the Mp3 Songs of Malayalam Movie Athadu for Free A Simple Guide.md deleted file mode 100644 index eb9c7b405353e28d200f3e92a4b60c6dc862bf7a..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/How to Get the Mp3 Songs of Malayalam Movie Athadu for Free A Simple Guide.md +++ /dev/null @@ -1,5 +0,0 @@ -
      -

      We have gathered huge Collection of Kaapa Song Ringtones Pagalworld, Mobcup, Kaapa Theme Music, Kaapa Instrumental Ringtones, Kaapa OST, Kaapa Movie Bgm in Mp3 and Much More for download at free of cost.

      -

      Malayalam Movie Athadu Mp3 Free Download


      DOWNLOAD ———>>> https://tinurll.com/2uzm1w



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/runwayml/stable-diffusion-inpainting/app.py b/spaces/runwayml/stable-diffusion-inpainting/app.py deleted file mode 100644 index b8dac436efd69d776dec7b17b29ba26476db0f3e..0000000000000000000000000000000000000000 --- a/spaces/runwayml/stable-diffusion-inpainting/app.py +++ /dev/null @@ -1,118 +0,0 @@ -import gradio as gr - -from io import BytesIO -import requests -import PIL -from PIL import Image -import numpy as np -import os -import uuid -import torch -from torch import autocast -import cv2 -from matplotlib import pyplot as plt -from torchvision import transforms -from diffusers import DiffusionPipeline - -from share_btn import community_icon_html, loading_icon_html, share_js - -pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16).to("cuda") - -def read_content(file_path: str) -> str: - """read the content of target file - """ - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - - return content - -def predict(dict, prompt=""): - init_image = dict["image"].convert("RGB").resize((512, 512)) - mask = dict["mask"].convert("RGB").resize((512, 512)) - output = pipe(prompt = prompt, image=init_image, mask_image=mask,guidance_scale=7.5) - return output.images[0], gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) - - -css = ''' -.container {max-width: 1150px;margin: auto;padding-top: 1.5rem} -#image_upload{min-height:400px} -#image_upload [data-testid="image"], #image_upload [data-testid="image"] > div{min-height: 400px} -#mask_radio .gr-form{background:transparent; border: none} -#word_mask{margin-top: .75em !important} -#word_mask textarea:disabled{opacity: 0.3} -.footer {margin-bottom: 45px;margin-top: 35px;text-align: center;border-bottom: 1px solid #e5e5e5} -.footer>p {font-size: .8rem; display: inline-block; padding: 0 10px;transform: translateY(10px);background: white} -.dark .footer {border-color: #303030} -.dark .footer>p {background: #0b0f19} -.acknowledgments h4{margin: 1.25em 0 .25em 0;font-weight: bold;font-size: 115%} -#image_upload .touch-none{display: flex} -@keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } -} -#share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; -} -#share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; -} -#share-btn * { - all: unset; -} -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} -#share-btn-container .wrap { - display: none !important; -} -''' - -image_blocks = gr.Blocks(css=css) -with image_blocks as demo: - gr.HTML(read_content("header.html")) - with gr.Group(): - with gr.Box(): - with gr.Row(): - with gr.Column(): - image = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="pil", label="Upload").style(height=400) - with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True): - prompt = gr.Textbox(placeholder = 'Your prompt (what you want in place of what is erased)', show_label=False, elem_id="input-text") - btn = gr.Button("Inpaint!").style( - margin=False, - rounded=(False, True, True, False), - full_width=False, - ) - with gr.Column(): - image_out = gr.Image(label="Output", elem_id="output-img").style(height=400) - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html, visible=False) - loading_icon = gr.HTML(loading_icon_html, visible=False) - share_button = gr.Button("Share to community", elem_id="share-btn", visible=False) - - - btn.click(fn=predict, inputs=[image, prompt], outputs=[image_out, community_icon, loading_icon, share_button]) - share_button.click(None, [], [], _js=share_js) - - - - gr.HTML( - """ - -
      -

      LICENSE

      - The model is licensed with a CreativeML Open RAIL-M license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read the license

      -

      Biases and content acknowledgment

      - Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the LAION-5B dataset, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the model card

      -
      - """ - ) - -image_blocks.launch() \ No newline at end of file diff --git a/spaces/russellc/comparing-captioning-models/app.py b/spaces/russellc/comparing-captioning-models/app.py deleted file mode 100644 index 5c23349e0471b091c177fe373d87079b082a62c2..0000000000000000000000000000000000000000 --- a/spaces/russellc/comparing-captioning-models/app.py +++ /dev/null @@ -1,75 +0,0 @@ -import gradio as gr -from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, VisionEncoderDecoderModel -import torch - -torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg') -torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png') -torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg') - -git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco") -git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco") - -git_processor_large = AutoProcessor.from_pretrained("microsoft/git-large-coco") -git_model_large = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco") - -blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") -blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") - -blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large") -blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large") - -vitgpt_processor = AutoImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning") -vitgpt_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning") -vitgpt_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning") - -device = "cuda" if torch.cuda.is_available() else "cpu" - -git_model_base.to(device) -blip_model_base.to(device) -git_model_large.to(device) -blip_model_large.to(device) -vitgpt_model.to(device) - -def generate_caption(processor, model, image, tokenizer=None): - inputs = processor(images=image, return_tensors="pt").to(device) - - generated_ids = model.generate(pixel_values=inputs.pixel_values, max_length=50) - - if tokenizer is not None: - generated_caption = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] - else: - generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - - return generated_caption - - -def generate_captions(image): - caption_git_base = generate_caption(git_processor_base, git_model_base, image) - - caption_git_large = generate_caption(git_processor_large, git_model_large, image) - - caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image) - - caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image) - - caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer) - - return caption_git_base, caption_git_large, caption_blip_base, caption_blip_large, caption_vitgpt - - -examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]] -outputs = [gr.outputs.Textbox(label="Caption generated by GIT-base"), gr.outputs.Textbox(label="Caption generated by GIT-large"), gr.outputs.Textbox(label="Caption generated by BLIP-base"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by ViT+GPT-2")] - -title = "Interactive demo: comparing image captioning models" -description = "Gradio Demo to compare GIT, BLIP and ViT+GPT2, 3 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below." -article = "

      BLIP docs | GIT docs

      " - -interface = gr.Interface(fn=generate_captions, - inputs=gr.inputs.Image(type="pil"), - outputs=outputs, - examples=examples, - title=title, - description=description, - article=article, - enable_queue=True) -interface.launch(debug=True) \ No newline at end of file diff --git a/spaces/s3nh/senh-WizardVicuna-Uncensored-3B-0719-GGML/app.py b/spaces/s3nh/senh-WizardVicuna-Uncensored-3B-0719-GGML/app.py deleted file mode 100644 index 87ef7002e12656496654def9a1f112ca63cdd4a7..0000000000000000000000000000000000000000 --- a/spaces/s3nh/senh-WizardVicuna-Uncensored-3B-0719-GGML/app.py +++ /dev/null @@ -1,400 +0,0 @@ -import os -import platform -import random -import time -from dataclasses import asdict, dataclass -from pathlib import Path - -import gradio as gr -import psutil -from about_time import about_time -from ctransformers import AutoModelForCausalLM -from dl_hf_model import dl_hf_model -from loguru import logger - - -URL = "https://huggingface.co/s3nh/WizardVicuna-Uncensored-3B-0719-GGML/resolve/main/WizardVicuna-Uncensored-3B-0719.ggmlv3.q4_1.bin" # 4.05G - -_ = ( - "golay" in platform.node() - or "okteto" in platform.node() - or Path("/kaggle").exists() - # or psutil.cpu_count(logical=False) < 4 - or 1 # run 7b in hf -) - -if _: - url = "https://huggingface.co/s3nh/WizardVicuna-Uncensored-3B-0719-GGML/resolve/main/WizardVicuna-Uncensored-3B-0719.ggmlv3.q4_1.bin" # 2.87G - - -prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request. -### Instruction: {user_prompt} -### Response: -""" - -prompt_template = """System: You are a helpful, -respectful and honest assistant. Always answer as -helpfully as possible, while being safe. Your answers -should not include any harmful, unethical, racist, -sexist, toxic, dangerous, or illegal content. Please -ensure that your responses are socially unbiased and -positive in nature. If a question does not make any -sense, or is not factually coherent, explain why instead -of answering something not correct. If you don't know -the answer to a question, please don't share false -information. -User: {prompt} -Assistant: """ - -prompt_template = """System: You are a helpful assistant. -User: {prompt} -Assistant: """ - -prompt_template = """Question: {question} -Answer: Let's work this out in a step by step way to be sure we have the right answer.""" - -prompt_template = """[INST] <> -You are a helpful, respectful and honest assistant. Always answer as helpfully as possible assistant. Think step by step. -<> -What NFL team won the Super Bowl in the year Justin Bieber was born? -[/INST]""" - -prompt_template = """[INST] <> -You are an unhelpful assistant. Always answer as helpfully as possible. Think step by step. <> -{question} [/INST] -""" - -prompt_template = """[INST] <> -You are a helpful assistant. -<> -{question} [/INST] -""" - -prompt_template = """### HUMAN: -{question} -### RESPONSE:""" - - -prompt_template = """<|prompt|>:{question} -<|answer|>:""" - - -prompt_template = """ -HUMAN: {question} -ASSISTANT:""" - - -_ = [elm for elm in prompt_template.splitlines() if elm.strip()] -stop_string = [elm.split(":")[0] + ":" for elm in _][-2] - -logger.debug(f"{stop_string=} not used") - -_ = psutil.cpu_count(logical=False) - 1 -cpu_count: int = int(_) if _ else 1 -logger.debug(f"{cpu_count=}") - -LLM = None - -try: - model_loc, file_size = dl_hf_model(url) -except Exception as exc_: - logger.error(exc_) - raise SystemExit(1) from exc_ - -LLM = AutoModelForCausalLM.from_pretrained( - model_loc, - model_type="llama", -) - -logger.info(f"done load llm {model_loc=} {file_size=}G") - -os.environ["TZ"] = "Asia/Shanghai" -try: - time.tzset() - - logger.warning("Windows, cant run time.tzset()") -except Exception: - logger.warning("Windows, cant run time.tzset()") - - -@dataclass -class GenerationConfig: - temperature: float = 0.7 - top_k: int = 50 - top_p: float = 0.9 - repetition_penalty: float = 1.0 - max_new_tokens: int = 512 - seed: int = 42 - reset: bool = False - stream: bool = True - # threads: int = cpu_count - # stop: list[str] = field(default_factory=lambda: [stop_string]) - - -def generate( - question: str, - llm=LLM, - config: GenerationConfig = GenerationConfig(), -): - """Run model inference, will return a Generator if streaming is true.""" - - - prompt = prompt_template.format(question=question) - - return llm( - prompt, - **asdict(config), - ) - - -logger.debug(f"{asdict(GenerationConfig())=}") - - -def user(user_message, history): - history.append([user_message, None]) - return user_message, history - - -def user1(user_message, history): - history.append([user_message, None]) - return "", history - -def bot_(history): - user_message = history[-1][0] - resp = random.choice(["How are you?", "I love you", "I'm very hungry"]) - bot_message = user_message + ": " + resp - history[-1][1] = "" - for character in bot_message: - history[-1][1] += character - time.sleep(0.02) - yield history - - history[-1][1] = resp - yield history - - -def bot(history): - user_message = history[-1][0] - response = [] - - logger.debug(f"{user_message=}") - - with about_time() as atime: - flag = 1 - prefix = "" - then = time.time() - - logger.debug("about to generate") - - config = GenerationConfig(reset=True) - for elm in generate(user_message, config=config): - if flag == 1: - logger.debug("in the loop") - prefix = f"({time.time() - then:.2f}s) " - flag = 0 - print(prefix, end="", flush=True) - logger.debug(f"{prefix=}") - print(elm, end="", flush=True) - - response.append(elm) - history[-1][1] = prefix + "".join(response) - yield history - - _ = ( - f"(time elapsed: {atime.duration_human}, " - f"{atime.duration/len(''.join(response)):.2f}s/char)" - ) - - history[-1][1] = "".join(response) + f"\n{_}" - yield history - - -def predict_api(prompt): - logger.debug(f"{prompt=}") - try: - # user_prompt = prompt - config = GenerationConfig( - temperature=0.2, - top_k=10, - top_p=0.9, - repetition_penalty=1.0, - max_new_tokens=512, # adjust as needed - seed=42, - reset=True, - stream=False, - ) - - response = generate( - prompt, - config=config, - ) - - logger.debug(f"api: {response=}") - except Exception as exc: - logger.error(exc) - response = f"{exc=}" - return response - - -css = """ - .importantButton { - background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important; - border: none !important; - } - .importantButton:hover { - background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important; - border: none !important; - } - .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;} - .xsmall {font-size: x-small;} -""" -etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """ -examples_list = [ - ["Send an email requesting that people use language models responsibly."], - ["Write a shouting match between Julius Caesar and Napoleon"], - ["Write a theory to explain why cat never existed"], - ["write a story about a grain of sand as it watches millions of years go by"], - ["What are 3 popular chess openings?"], - ["write a conversation between the sun and pluto"], - ["Did you know that Yann LeCun dropped a rap album last year? We listened to it andhere’s what we thought:"], -] - -logger.info("start block") - -with gr.Blocks( - title=f"{Path(model_loc).name}", - theme=gr.themes.Soft(text_size="sm", spacing_size="sm"), - css=css, -) as block: - # buff_var = gr.State("") - with gr.Accordion("🎈 Info", open=False): - # gr.HTML( - # """
      Duplicate and spin a CPU UPGRADE to avoid the queue
      """ - # ) - gr.Markdown( - f"""
      {Path(model_loc).name}
      - Most examples are meant for another model. - You probably should try to test - some related prompts.""", - elem_classes="xsmall", - ) - - # chatbot = gr.Chatbot().style(height=700) # 500 - chatbot = gr.Chatbot(height=500) - - # buff = gr.Textbox(show_label=False, visible=True) - - with gr.Row(): - with gr.Column(scale=5): - msg = gr.Textbox( - label="Chat Message Box", - placeholder="Ask me anything (press Shift+Enter or click Submit to send)", - show_label=False, - # container=False, - lines=6, - max_lines=30, - show_copy_button=True, - # ).style(container=False) - ) - with gr.Column(scale=1, min_width=50): - with gr.Row(): - submit = gr.Button("Submit", elem_classes="xsmall") - stop = gr.Button("Stop", visible=True) - clear = gr.Button("Clear History", visible=True) - with gr.Row(visible=False): - with gr.Accordion("Advanced Options:", open=False): - with gr.Row(): - with gr.Column(scale=2): - system = gr.Textbox( - label="System Prompt", - value=prompt_template, - show_label=False, - container=False, - # ).style(container=False) - ) - with gr.Column(): - with gr.Row(): - change = gr.Button("Change System Prompt") - reset = gr.Button("Reset System Prompt") - - with gr.Accordion("Example Inputs", open=True): - examples = gr.Examples( - examples=examples_list, - inputs=[msg], - examples_per_page=40, - ) - - # with gr.Row(): - with gr.Accordion("Disclaimer", open=True): - _ = Path(model_loc).name - gr.Markdown( - "Disclaimer: I AM NOT RESPONSIBLE FOR ANY PROMPT PROVIDED BY USER AND PROMPT RETURNED FROM THE MODEL. THIS APP SHOULD BE USED FOR EDUCATIONAL PURPOSE" - "WITHOUT ANY OFFENSIVE, AGGRESIVE INTENTS. {_} can produce factually incorrect output, and should not be relied on to produce " - f"factually accurate information. {_} was trained on various public datasets; while great efforts " - "have been taken to clean the pretraining data, it is possible that this model could generate lewd, " - "biased, or otherwise offensive outputs.", - elem_classes=["disclaimer"], - ) - - msg_submit_event = msg.submit( - # fn=conversation.user_turn, - fn=user, - inputs=[msg, chatbot], - outputs=[msg, chatbot], - queue=True, - show_progress="full", - # api_name=None, - ).then(bot, chatbot, chatbot, queue=True) - submit_click_event = submit.click( - # fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg - fn=user1, # clear msg - inputs=[msg, chatbot], - outputs=[msg, chatbot], - queue=True, - # queue=False, - show_progress="full", - # api_name=None, - ).then(bot, chatbot, chatbot, queue=True) - stop.click( - fn=None, - inputs=None, - outputs=None, - cancels=[msg_submit_event, submit_click_event], - queue=False, - ) - clear.click(lambda: None, None, chatbot, queue=False) - - with gr.Accordion("For Chat/Translation API", open=False, visible=False): - input_text = gr.Text() - api_btn = gr.Button("Go", variant="primary") - out_text = gr.Text() - - api_btn.click( - predict_api, - input_text, - out_text, - api_name="api", - ) - - # block.load(update_buff, [], buff, every=1) - # block.load(update_buff, [buff_var], [buff_var, buff], every=1) - -# concurrency_count=5, max_size=20 -# max_size=36, concurrency_count=14 -# CPU cpu_count=2 16G, model 7G -# CPU UPGRADE cpu_count=8 32G, model 7G - -# does not work -_ = """ -# _ = int(psutil.virtual_memory().total / 10**9 // file_size - 1) -# concurrency_count = max(_, 1) -if psutil.cpu_count(logical=False) >= 8: - # concurrency_count = max(int(32 / file_size) - 1, 1) -else: - # concurrency_count = max(int(16 / file_size) - 1, 1) -# """ - -concurrency_count = 1 -logger.info(f"{concurrency_count=}") - -block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True) \ No newline at end of file diff --git a/spaces/saad-k7/Jewelli-Chatbot/utils.py b/spaces/saad-k7/Jewelli-Chatbot/utils.py deleted file mode 100644 index fb36e462b95d1a65e2dd4e48e25bde1142e5095d..0000000000000000000000000000000000000000 --- a/spaces/saad-k7/Jewelli-Chatbot/utils.py +++ /dev/null @@ -1,76 +0,0 @@ -import openai -import os -from dotenv import load_dotenv -from datetime import datetime -import logging -import traceback -import json -# from db import get_product_details - -with open('keywords.json', 'r') as file: - keywords = str(json.load(file)) - -now = datetime.now() -date_strng = now.strftime("%d-%m-%Y_%H-%M-%S") - -if not os.path.exists('logs'): - os.makedirs('logs') - - -logging.basicConfig(filename='logs/{}.log'.format(date_strng), level=logging.INFO, format='%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s') -logging.info('Starting the application') - -load_dotenv() - -openai.api_key = os.getenv("OPENAI_API_KEY") - -context = f"""\ -You are a chatbot designed for "Jewelli", a jewelry shop. Your role is to understand \ -the customer's needs and guide them towards the right category of jewelry. You will \ -ask the customer questions about their preferences, occasion, and who the jewelry is for, \ -using a set of keywords related to our product categories. If the conversation strays \ -from jewelry, tactfully steer it back. Keep your responses succinct and engaging. \ -When you determine the customer's preferences, present them with a list of relevant \ -jewelry categories in the JSON format specified. The categories should correspond \ -with those in the "Product Categories and their Keywords" section. This is crucial \ -because the provided JSON format will be used to look up the corresponding items in our database. \ - -Product Categories and their keywords: -{keywords} - -JSON format: -{{ - "productCategory": "product category name", -}} - -""" -messages = [] -messages.append({"role": "system", "content": context}) - -def json_format(response): - dict_start = response.find('{') - dict_end = response.rfind('}') + 1 - json_string = response[dict_start:dict_end] - products_dict = json.loads(json_string) - return products_dict["productCategory"] - -def chatbot(user_message, history): - messages.append({"role": "user", "content": user_message}) - logging.info("Getting response from gpt-3.5-turbo") - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo-16k", - messages=messages, - temperature=0, - ) - messages.append({"role": "assistant", "content": response.choices[0].message.content}) - - try: - product_category = json_format(response.choices[0].message.content) - # image_url = get_product_details(product_category) - # if image_url: - # return (image_url, product_category) - except: - pass - - logging.info("Messages list: {}".format(messages)) - return response.choices[0].message.content \ No newline at end of file diff --git a/spaces/samcaicn/bingai/src/components/toaster.tsx b/spaces/samcaicn/bingai/src/components/toaster.tsx deleted file mode 100644 index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000 --- a/spaces/samcaicn/bingai/src/components/toaster.tsx +++ /dev/null @@ -1,3 +0,0 @@ -'use client' - -export { Toaster } from 'react-hot-toast' diff --git a/spaces/samuelinferences/transformers-can-do-bayesian-inference/prior-fitting/priors/ridge.py b/spaces/samuelinferences/transformers-can-do-bayesian-inference/prior-fitting/priors/ridge.py deleted file mode 100644 index 9e6035f203e286a3c38cfb4f11e653a2d45b112f..0000000000000000000000000000000000000000 --- a/spaces/samuelinferences/transformers-can-do-bayesian-inference/prior-fitting/priors/ridge.py +++ /dev/null @@ -1,38 +0,0 @@ -import random -import time - -import numpy as np -import torch -from torch import nn -from sklearn.linear_model import Ridge -from .utils import get_batch_to_dataloader - -def get_batch(batch_size, seq_len, num_features, noisy_std = .1): - m = torch.normal(0., .1, size=(batch_size,num_features)) - b = 0 # torch.rand(batch_size) - x = torch.rand(seq_len, batch_size,num_features) - y_non_noisy = torch.einsum('bf,tbf->tb',m,x) - y = y_non_noisy + torch.normal(torch.zeros_like(y_non_noisy),noisy_std) # noisy_std is alpha - return x, y, y_non_noisy - -DataLoader = get_batch_to_dataloader(get_batch) -DataLoader.num_outputs = 1 - - -def evaluate(x,y,y_non_noisy, alpha=0.): - start_time = time.time() - losses_after_t = [.0] - for t in range(1,len(x)): - loss_sum = 0. - for b_i in range(x.shape[1]): - clf = Ridge(alpha=alpha) - clf.fit(x[:t,b_i],y[:t,b_i]) - y_ = clf.predict(x[t,b_i].unsqueeze(0)) - l = nn.MSELoss()(y_non_noisy[t,b_i].unsqueeze(0),torch.tensor(y_)) - loss_sum += l - losses_after_t.append(loss_sum/x.shape[1]) - return torch.tensor(losses_after_t), time.time()-start_time - -if __name__ == '__main__': - for alpha in [.001,.01,.5,1.]: - print(alpha, evaluate(*get_batch(1000,10,noisy_std=.01),alpha=alpha)) \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/FULL Remote Utilities 5.6.0.6 Final Serial.md b/spaces/scedlatioru/img-to-music/example/FULL Remote Utilities 5.6.0.6 Final Serial.md deleted file mode 100644 index 479c0593e65e27346506a3652fe3af4d26fdf956..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/FULL Remote Utilities 5.6.0.6 Final Serial.md +++ /dev/null @@ -1,6 +0,0 @@ -

      FULL Remote Utilities 5.6.0.6 Final Serial


      DOWNLOADhttps://gohhs.com/2uEzjc



      -
      -FULL Remote Utilities 5.6.0.6 Final + Serial. 0 Reads 0 Votes 1 Part Story. lascygeded · By lascygeded Updated Dec 28, 2018 04:30PM. Read. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/scedlatioru/img-to-music/example/Omnisphere Crack Mac Lion !LINK!.md b/spaces/scedlatioru/img-to-music/example/Omnisphere Crack Mac Lion !LINK!.md deleted file mode 100644 index 9700f28609e0a73d09af33a2956b4ead2dfe95e5..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Omnisphere Crack Mac Lion !LINK!.md +++ /dev/null @@ -1,64 +0,0 @@ -

      omnisphere crack mac lion


      Download Filehttps://gohhs.com/2uEAvx



      - -The install version is 1.3.5 but there is a newer version (1.4.1) available. - -Is it OK to install the newer version to a Lion environment? Or are there any caveats I should be aware of? - -A: - -Yes, you can install OmniSymphony in a lion environment (macOS 10.7) and it will run just fine. - -Q: - -In iOS and Laravel, what is the best way to use HTTP methods? - -I'm new to iOS and I've just recently started with Laravel 5. It's my first time using HTTP methods and I'm getting confused. Can someone please help me with the best way to do something like this? - -Say I want to make an API call from my iOS app and I want the iOS app to make an API call on a GET and return a list of items, which is in turn, return a list of another items etc. - -My options are: - -Method 1: - -Make a GET to the API - -Handle each item and make a GET to the API for each item - -Return a JSONArray or something else - -Method 2: - -Make a GET for the API (which returns a list of items) - -As each item is returned, make a POST to the API for each item - -Method 3: - -Handle each item and make a GET for each item - -Method 4: - -Make a GET to the API (which returns a list of items) - -Method 5: - -What is the best way to do this and why? - -I'd do it like this: - -POST to /items - -In the response, include an item ID (ex: 19cad34) and a - -completed flag (ex: 1) - -In the next request, POST to /items/$item_id/complete - -In the response, include a request id (ex: 43ae3f7) and a status - -code (ex: 200) - -In the next 4fefd39f24
      -
      -
      -

      diff --git a/spaces/scedlatioru/img-to-music/example/Rs3extool2 Zip.md b/spaces/scedlatioru/img-to-music/example/Rs3extool2 Zip.md deleted file mode 100644 index 15669f124cf29fb49c2ba61b7ce4b474860d71fc..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Rs3extool2 Zip.md +++ /dev/null @@ -1,18 +0,0 @@ -

      Rs3extool2 Zip


      Download Ziphttps://gohhs.com/2uEzN4



      - -Check out our recommendations for this collection, selected by our editors! ... All rights reserved. © 2018 Thinkstock. -Read moreLearn our recommendations for this collection, selected by our editors! ... -All rights reserved. © 2018 Thinkstock. -Source: pixabay.com. -1. "Oh, are you looking at this?" -2. "Do you see what I see?" -3. "When did I start smoking?" -4. "When did I start shooting?" -5. "What is he wearing?" -6. "He doesn't look like me, but he doesn't. -I think I can tell he loves me." -7. "That was my idea for the next photo!" -8. "I don't have an answer to that question." 8a78ff9644
      -
      -
      -

      diff --git a/spaces/sdeeas/ChuanhuChatGPT/chatgpt - macOS.command b/spaces/sdeeas/ChuanhuChatGPT/chatgpt - macOS.command deleted file mode 100644 index fa015edca9e6916f24394813ce8ba77d2072e296..0000000000000000000000000000000000000000 --- a/spaces/sdeeas/ChuanhuChatGPT/chatgpt - macOS.command +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -echo Opening ChuanhuChatGPT... -cd "$(dirname "${BASH_SOURCE[0]}")" -nohup python3 ChuanhuChatbot.py >/dev/null 2>&1 & -sleep 5 -open http://127.0.0.1:7860 -echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/). If you kill ChuanhuChatbot, Use "pkill -f 'ChuanhuChatbot'" command in terminal. \ No newline at end of file diff --git a/spaces/sdeeas/ChuanhuChatGPT/modules/overwrites.py b/spaces/sdeeas/ChuanhuChatGPT/modules/overwrites.py deleted file mode 100644 index d17f56873c156e9fb883d35b50e2a28740f2cf90..0000000000000000000000000000000000000000 --- a/spaces/sdeeas/ChuanhuChatGPT/modules/overwrites.py +++ /dev/null @@ -1,101 +0,0 @@ -from __future__ import annotations -import logging - -from llama_index import Prompt -from typing import List, Tuple -import mdtex2html -from gradio_client import utils as client_utils - -from modules.presets import * -from modules.llama_func import * -from modules.config import render_latex - -def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]: - logging.debug("Compacting text chunks...🚀🚀🚀") - combined_str = [c.strip() for c in text_chunks if c.strip()] - combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)] - combined_str = "\n\n".join(combined_str) - # resplit based on self.max_chunk_overlap - text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1) - return text_splitter.split_text(combined_str) - - -def postprocess( - self, - y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple], - ) -> List[List[str | Dict | None]]: - """ - Parameters: - y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed. - Returns: - List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed. - """ - if y is None: - return [] - processed_messages = [] - for message_pair in y: - assert isinstance( - message_pair, (tuple, list) - ), f"Expected a list of lists or list of tuples. Received: {message_pair}" - assert ( - len(message_pair) == 2 - ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}" - - processed_messages.append( - [ - self._postprocess_chat_messages(message_pair[0], "user"), - self._postprocess_chat_messages(message_pair[1], "bot"), - ] - ) - return processed_messages - -def postprocess_chat_messages( - self, chat_message: str | Tuple | List | None, message_type: str - ) -> str | Dict | None: - if chat_message is None: - return None - elif isinstance(chat_message, (tuple, list)): - filepath = chat_message[0] - mime_type = client_utils.get_mimetype(filepath) - filepath = self.make_temp_copy_if_needed(filepath) - return { - "name": filepath, - "mime_type": mime_type, - "alt_text": chat_message[1] if len(chat_message) > 1 else None, - "data": None, # These last two fields are filled in by the frontend - "is_file": True, - } - elif isinstance(chat_message, str): - if message_type == "bot": - if not detect_converted_mark(chat_message): - chat_message = convert_mdtext(chat_message) - elif message_type == "user": - if not detect_converted_mark(chat_message): - chat_message = convert_asis(chat_message) - return chat_message - else: - raise ValueError(f"Invalid message for Chatbot component: {chat_message}") - -with open("./assets/custom.js", "r", encoding="utf-8") as f, \ - open("./assets/external-scripts.js", "r", encoding="utf-8") as f1: - customJS = f.read() - externalScripts = f1.read() - - -def reload_javascript(): - print("Reloading javascript...") - js = f'' - if render_latex: - js += """\ - - - """ - def template_response(*args, **kwargs): - res = GradioTemplateResponseOriginal(*args, **kwargs) - res.body = res.body.replace(b'', f'{js}'.encode("utf8")) - res.init_headers() - return res - - gr.routes.templates.TemplateResponse = template_response - -GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse \ No newline at end of file diff --git a/spaces/segments-tobias/conex/espnet2/iterators/multiple_iter_factory.py b/spaces/segments-tobias/conex/espnet2/iterators/multiple_iter_factory.py deleted file mode 100644 index 28e3d2dcb610b22f24bca1f7bfa73e216ac905a0..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/iterators/multiple_iter_factory.py +++ /dev/null @@ -1,37 +0,0 @@ -import logging -from typing import Callable -from typing import Collection -from typing import Iterator - -import numpy as np -from typeguard import check_argument_types - -from espnet2.iterators.abs_iter_factory import AbsIterFactory - - -class MultipleIterFactory(AbsIterFactory): - def __init__( - self, - build_funcs: Collection[Callable[[], AbsIterFactory]], - seed: int = 0, - shuffle: bool = False, - ): - assert check_argument_types() - self.build_funcs = list(build_funcs) - self.seed = seed - self.shuffle = shuffle - - def build_iter(self, epoch: int, shuffle: bool = None) -> Iterator: - if shuffle is None: - shuffle = self.shuffle - - build_funcs = list(self.build_funcs) - - if shuffle: - np.random.RandomState(epoch + self.seed).shuffle(build_funcs) - - for i, build_func in enumerate(build_funcs): - logging.info(f"Building {i}th iter-factory...") - iter_factory = build_func() - assert isinstance(iter_factory, AbsIterFactory), type(iter_factory) - yield from iter_factory.build_iter(epoch, shuffle) diff --git a/spaces/segments-tobias/conex/espnet2/samplers/build_batch_sampler.py b/spaces/segments-tobias/conex/espnet2/samplers/build_batch_sampler.py deleted file mode 100644 index 1b645b371f8aa46dc3b238a414fb6b207e933c0b..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/samplers/build_batch_sampler.py +++ /dev/null @@ -1,167 +0,0 @@ -from typing import List -from typing import Sequence -from typing import Tuple -from typing import Union - -from typeguard import check_argument_types -from typeguard import check_return_type - -from espnet2.samplers.abs_sampler import AbsSampler -from espnet2.samplers.folded_batch_sampler import FoldedBatchSampler -from espnet2.samplers.length_batch_sampler import LengthBatchSampler -from espnet2.samplers.num_elements_batch_sampler import NumElementsBatchSampler -from espnet2.samplers.sorted_batch_sampler import SortedBatchSampler -from espnet2.samplers.unsorted_batch_sampler import UnsortedBatchSampler - - -BATCH_TYPES = dict( - unsorted="UnsortedBatchSampler has nothing in paticular feature and " - "just creates mini-batches which has constant batch_size. " - "This sampler doesn't require any length " - "information for each feature. " - "'key_file' is just a text file which describes each sample name." - "\n\n" - " utterance_id_a\n" - " utterance_id_b\n" - " utterance_id_c\n" - "\n" - "The fist column is referred, so 'shape file' can be used, too.\n\n" - " utterance_id_a 100,80\n" - " utterance_id_b 400,80\n" - " utterance_id_c 512,80\n", - sorted="SortedBatchSampler sorts samples by the length of the first input " - " in order to make each sample in a mini-batch has close length. " - "This sampler requires a text file which describes the length for each sample " - "\n\n" - " utterance_id_a 1000\n" - " utterance_id_b 1453\n" - " utterance_id_c 1241\n" - "\n" - "The first element of feature dimensions is referred, " - "so 'shape_file' can be also used.\n\n" - " utterance_id_a 1000,80\n" - " utterance_id_b 1453,80\n" - " utterance_id_c 1241,80\n", - folded="FoldedBatchSampler supports variable batch_size. " - "The batch_size is decided by\n" - " batch_size = base_batch_size // (L // fold_length)\n" - "L is referred to the largest length of samples in the mini-batch. " - "This samples requires length information as same as SortedBatchSampler\n", - length="LengthBatchSampler supports variable batch_size. " - "This sampler makes mini-batches which have same number of 'bins' as possible " - "counting by the total lengths of each feature in the mini-batch. " - "This sampler requires a text file which describes the length for each sample. " - "\n\n" - " utterance_id_a 1000\n" - " utterance_id_b 1453\n" - " utterance_id_c 1241\n" - "\n" - "The first element of feature dimensions is referred, " - "so 'shape_file' can be also used.\n\n" - " utterance_id_a 1000,80\n" - " utterance_id_b 1453,80\n" - " utterance_id_c 1241,80\n", - numel="NumElementsBatchSampler supports variable batch_size. " - "Just like LengthBatchSampler, this sampler makes mini-batches" - " which have same number of 'bins' as possible " - "counting by the total number of elements of each feature " - "instead of the length. " - "Thus this sampler requires the full information of the dimension of the features. " - "\n\n" - " utterance_id_a 1000,80\n" - " utterance_id_b 1453,80\n" - " utterance_id_c 1241,80\n", -) - - -def build_batch_sampler( - type: str, - batch_size: int, - batch_bins: int, - shape_files: Union[Tuple[str, ...], List[str]], - sort_in_batch: str = "descending", - sort_batch: str = "ascending", - drop_last: bool = False, - min_batch_size: int = 1, - fold_lengths: Sequence[int] = (), - padding: bool = True, - utt2category_file: str = None, -) -> AbsSampler: - """Helper function to instantiate BatchSampler. - - Args: - type: mini-batch type. "unsorted", "sorted", "folded", "numel", or, "length" - batch_size: The mini-batch size. Used for "unsorted", "sorted", "folded" mode - batch_bins: Used for "numel" model - shape_files: Text files describing the length and dimension - of each features. e.g. uttA 1330,80 - sort_in_batch: - sort_batch: - drop_last: - min_batch_size: Used for "numel" or "folded" mode - fold_lengths: Used for "folded" mode - padding: Whether sequences are input as a padded tensor or not. - used for "numel" mode - """ - assert check_argument_types() - if len(shape_files) == 0: - raise ValueError("No shape file are given") - - if type == "unsorted": - retval = UnsortedBatchSampler( - batch_size=batch_size, key_file=shape_files[0], drop_last=drop_last - ) - - elif type == "sorted": - retval = SortedBatchSampler( - batch_size=batch_size, - shape_file=shape_files[0], - sort_in_batch=sort_in_batch, - sort_batch=sort_batch, - drop_last=drop_last, - ) - - elif type == "folded": - if len(fold_lengths) != len(shape_files): - raise ValueError( - f"The number of fold_lengths must be equal to " - f"the number of shape_files: " - f"{len(fold_lengths)} != {len(shape_files)}" - ) - retval = FoldedBatchSampler( - batch_size=batch_size, - shape_files=shape_files, - fold_lengths=fold_lengths, - sort_in_batch=sort_in_batch, - sort_batch=sort_batch, - drop_last=drop_last, - min_batch_size=min_batch_size, - utt2category_file=utt2category_file, - ) - - elif type == "numel": - retval = NumElementsBatchSampler( - batch_bins=batch_bins, - shape_files=shape_files, - sort_in_batch=sort_in_batch, - sort_batch=sort_batch, - drop_last=drop_last, - padding=padding, - min_batch_size=min_batch_size, - ) - - elif type == "length": - retval = LengthBatchSampler( - batch_bins=batch_bins, - shape_files=shape_files, - sort_in_batch=sort_in_batch, - sort_batch=sort_batch, - drop_last=drop_last, - padding=padding, - min_batch_size=min_batch_size, - ) - - else: - raise ValueError(f"Not supported: {type}") - assert check_return_type(retval) - return retval diff --git a/spaces/segments/panoptic-segment-anything-api/segment_anything/README.md b/spaces/segments/panoptic-segment-anything-api/segment_anything/README.md deleted file mode 100644 index 6256d2b7f5a387988338d538df4e699eb17ba702..0000000000000000000000000000000000000000 --- a/spaces/segments/panoptic-segment-anything-api/segment_anything/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# Segment Anything - -**[Meta AI Research, FAIR](https://ai.facebook.com/research/)** - -[Alexander Kirillov](https://alexander-kirillov.github.io/), [Eric Mintun](https://ericmintun.github.io/), [Nikhila Ravi](https://nikhilaravi.com/), [Hanzi Mao](https://hanzimao.me/), Chloe Rolland, Laura Gustafson, [Tete Xiao](https://tetexiao.com), [Spencer Whitehead](https://www.spencerwhitehead.com/), Alex Berg, Wan-Yen Lo, [Piotr Dollar](https://pdollar.github.io/), [Ross Girshick](https://www.rossgirshick.info/) - -[[`Paper`](https://ai.facebook.com/research/publications/segment-anything/)] [[`Project`](https://segment-anything.com/)] [[`Demo`](https://segment-anything.com/demo)] [[`Dataset`](https://segment-anything.com/dataset/index.html)] [[`Blog`](https://ai.facebook.com/blog/segment-anything-foundation-model-image-segmentation/)] - -![SAM design](assets/model_diagram.png?raw=true) - -The **Segment Anything Model (SAM)** produces high quality object masks from input prompts such as points or boxes, and it can be used to generate masks for all objects in an image. It has been trained on a [dataset](https://segment-anything.com/dataset/index.html) of 11 million images and 1.1 billion masks, and has strong zero-shot performance on a variety of segmentation tasks. - -

      - - -

      - -## Installation - -The code requires `python>=3.8`, as well as `pytorch>=1.7` and `torchvision>=0.8`. Please follow the instructions [here](https://pytorch.org/get-started/locally/) to install both PyTorch and TorchVision dependencies. Installing both PyTorch and TorchVision with CUDA support is strongly recommended. - -Install Segment Anything: - -``` -pip install git+https://github.com/facebookresearch/segment-anything.git -``` - -or clone the repository locally and install with - -``` -git clone git@github.com:facebookresearch/segment-anything.git -cd segment-anything; pip install -e . -``` - -The following optional dependencies are necessary for mask post-processing, saving masks in COCO format, the example notebooks, and exporting the model in ONNX format. `jupyter` is also required to run the example notebooks. -``` -pip install opencv-python pycocotools matplotlib onnxruntime onnx -``` - - -## Getting Started - -First download a [model checkpoint](#model-checkpoints). Then the model can be used in just a few lines to get masks from a given prompt: - -``` -from segment_anything import build_sam, SamPredictor -predictor = SamPredictor(build_sam(checkpoint="")) -predictor.set_image() -masks, _, _ = predictor.predict() -``` - -or generate masks for an entire image: - -``` -from segment_anything import build_sam, SamAutomaticMaskGenerator -mask_generator = SamAutomaticMaskGenerator(build_sam(checkpoint="")) -masks = mask_generator_generate() -``` - -Additionally, masks can be generated for images from the command line: - -``` -python scripts/amg.py --checkpoint --input --output -``` - -See the examples notebooks on [using SAM with prompts](/notebooks/predictor_example.ipynb) and [automatically generating masks](/notebooks/automatic_mask_generator_example.ipynb) for more details. - -

      - - -

      - -## ONNX Export - -SAM's lightweight mask decoder can be exported to ONNX format so that it can be run in any environment that supports ONNX runtime, such as in-browser as showcased in the [demo](https://segment-anything.com/demo). Export the model with - -``` -python scripts/export_onnx_model.py --checkpoint --output -``` - -See the [example notebook](https://github.com/facebookresearch/segment-anything/blob/main/notebooks/onnx_model_example.ipynb) for details on how to combine image preprocessing via SAM's backbone with mask prediction using the ONNX model. It is recommended to use the latest stable version of PyTorch for ONNX export. - -## Model Checkpoints - -Three model versions of the model are available with different backbone sizes. These models can be instantiated by running -``` -from segment_anything import sam_model_registry -sam = sam_model_registry[""](checkpoint="") -``` -Click the links below to download the checkpoint for the corresponding model name. The default model in bold can also be instantiated with `build_sam`, as in the examples in [Getting Started](#getting-started). - -* **`default` or `vit_h`: [ViT-H SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth)** -* `vit_l`: [ViT-L SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth) -* `vit_b`: [ViT-B SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth) - -## License -The model is licensed under the [Apache 2.0 license](LICENSE). - -## Contributing - -See [contributing](CONTRIBUTING.md) and the [code of conduct](CODE_OF_CONDUCT.md). - -## Contributors - -The Segment Anything project was made possible with the help of many contributors (alphabetical): - -Aaron Adcock, Vaibhav Aggarwal, Morteza Behrooz, Cheng-Yang Fu, Ashley Gabriel, Ahuva Goldstand, Allen Goodman, Sumanth Gurram, Jiabo Hu, Somya Jain, Devansh Kukreja, Robert Kuo, Joshua Lane, Yanghao Li, Lilian Luong, Jitendra Malik, Mallika Malhotra, William Ngan, Omkar Parkhi, Nikhil Raina, Dirk Rowe, Neil Sejoor, Vanessa Stark, Bala Varadarajan, Bram Wasti, Zachary Winstrom diff --git a/spaces/sgxz/bingo/tailwind.config.js b/spaces/sgxz/bingo/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/sgxz/bingo/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/shibing624/nerpy/README.md b/spaces/shibing624/nerpy/README.md deleted file mode 100644 index 96615d02c8401b79edc0a3a778edbe36ba7679b0..0000000000000000000000000000000000000000 --- a/spaces/shibing624/nerpy/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Nerpy -emoji: 🏃 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/silentchen/layout-guidance/my_model/unet_2d_blocks.py b/spaces/silentchen/layout-guidance/my_model/unet_2d_blocks.py deleted file mode 100644 index 0a80f9f6176138ae37581ca9763e12ab26b416a5..0000000000000000000000000000000000000000 --- a/spaces/silentchen/layout-guidance/my_model/unet_2d_blocks.py +++ /dev/null @@ -1,1602 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import numpy as np -import torch -from torch import nn - -from .attention import AttentionBlock, Transformer2DModel -from diffusers.models.resnet import Downsample2D, FirDownsample2D, FirUpsample2D, ResnetBlock2D, Upsample2D - - -def get_down_block( - down_block_type, - num_layers, - in_channels, - out_channels, - temb_channels, - add_downsample, - resnet_eps, - resnet_act_fn, - attn_num_head_channels, - resnet_groups=None, - cross_attention_dim=None, - downsample_padding=None, -): - down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type - if down_block_type == "DownBlock2D": - return DownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - ) - elif down_block_type == "AttnDownBlock2D": - return AttnDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - attn_num_head_channels=attn_num_head_channels, - ) - elif down_block_type == "CrossAttnDownBlock2D": - if cross_attention_dim is None: - raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") - return CrossAttnDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - cross_attention_dim=cross_attention_dim, - attn_num_head_channels=attn_num_head_channels, - ) - elif down_block_type == "SkipDownBlock2D": - return SkipDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - downsample_padding=downsample_padding, - ) - elif down_block_type == "AttnSkipDownBlock2D": - return AttnSkipDownBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - downsample_padding=downsample_padding, - attn_num_head_channels=attn_num_head_channels, - ) - elif down_block_type == "DownEncoderBlock2D": - return DownEncoderBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - ) - elif down_block_type == "AttnDownEncoderBlock2D": - return AttnDownEncoderBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - add_downsample=add_downsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - downsample_padding=downsample_padding, - attn_num_head_channels=attn_num_head_channels, - ) - raise ValueError(f"{down_block_type} does not exist.") - - -def get_up_block( - up_block_type, - num_layers, - in_channels, - out_channels, - prev_output_channel, - temb_channels, - add_upsample, - resnet_eps, - resnet_act_fn, - attn_num_head_channels, - resnet_groups=None, - cross_attention_dim=None, -): - up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type - if up_block_type == "UpBlock2D": - return UpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - ) - elif up_block_type == "CrossAttnUpBlock2D": - if cross_attention_dim is None: - raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D") - return CrossAttnUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - cross_attention_dim=cross_attention_dim, - attn_num_head_channels=attn_num_head_channels, - ) - elif up_block_type == "AttnUpBlock2D": - return AttnUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - attn_num_head_channels=attn_num_head_channels, - ) - elif up_block_type == "SkipUpBlock2D": - return SkipUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - ) - elif up_block_type == "AttnSkipUpBlock2D": - return AttnSkipUpBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - prev_output_channel=prev_output_channel, - temb_channels=temb_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - attn_num_head_channels=attn_num_head_channels, - ) - elif up_block_type == "UpDecoderBlock2D": - return UpDecoderBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - ) - elif up_block_type == "AttnUpDecoderBlock2D": - return AttnUpDecoderBlock2D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - add_upsample=add_upsample, - resnet_eps=resnet_eps, - resnet_act_fn=resnet_act_fn, - resnet_groups=resnet_groups, - attn_num_head_channels=attn_num_head_channels, - ) - raise ValueError(f"{up_block_type} does not exist.") - - -class UNetMidBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - attention_type="default", - output_scale_factor=1.0, - **kwargs, - ): - super().__init__() - - self.attention_type = attention_type - resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) - - # there is always at least one resnet - resnets = [ - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ] - attentions = [] - - for _ in range(num_layers): - attentions.append( - AttentionBlock( - in_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - norm_num_groups=resnet_groups, - ) - ) - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - def forward(self, hidden_states, temb=None, encoder_states=None): - hidden_states = self.resnets[0](hidden_states, temb) - for attn, resnet in zip(self.attentions, self.resnets[1:]): - if self.attention_type == "default": - hidden_states = attn(hidden_states) - else: - hidden_states = attn(hidden_states, encoder_states) - hidden_states = resnet(hidden_states, temb) - - return hidden_states - - -class UNetMidBlock2DCrossAttn(nn.Module): - def __init__( - self, - in_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - attention_type="default", - output_scale_factor=1.0, - cross_attention_dim=1280, - **kwargs, - ): - super().__init__() - - self.attention_type = attention_type - self.attn_num_head_channels = attn_num_head_channels - resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) - - # there is always at least one resnet - resnets = [ - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ] - attentions = [] - - for _ in range(num_layers): - attentions.append( - Transformer2DModel( - attn_num_head_channels, - in_channels // attn_num_head_channels, - in_channels=in_channels, - num_layers=1, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - ) - ) - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=in_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - def set_attention_slice(self, slice_size): - if slice_size is not None and self.attn_num_head_channels % slice_size != 0: - raise ValueError( - f"Make sure slice_size {slice_size} is a divisor of " - f"the number of heads used in cross_attention {self.attn_num_head_channels}" - ) - if slice_size is not None and slice_size > self.attn_num_head_channels: - raise ValueError( - f"Chunk_size {slice_size} has to be smaller or equal to " - f"the number of heads used in cross_attention {self.attn_num_head_channels}" - ) - - for attn in self.attentions: - attn._set_attention_slice(slice_size) - - def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool): - for attn in self.attentions: - attn._set_use_memory_efficient_attention_xformers(use_memory_efficient_attention_xformers) - - def forward(self, hidden_states, temb=None, encoder_hidden_states=None): - hidden_states = self.resnets[0](hidden_states, temb) - mid_attn = [] - for layer_idx, (attn, resnet) in enumerate(zip(self.attentions, self.resnets[1:])): - hidden_states, cross_attn_prob = attn(hidden_states, encoder_hidden_states) - hidden_states = hidden_states.sample - hidden_states = resnet(hidden_states, temb) - mid_attn.append(cross_attn_prob) - return hidden_states, mid_attn - - -class AttnDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - attention_type="default", - output_scale_factor=1.0, - downsample_padding=1, - add_downsample=True, - ): - super().__init__() - resnets = [] - attentions = [] - - self.attention_type = attention_type - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - AttentionBlock( - out_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - norm_num_groups=resnet_groups, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - def forward(self, hidden_states, temb=None): - output_states = () - - for resnet, attn in zip(self.resnets, self.attentions): - hidden_states = resnet(hidden_states, temb) - hidden_states = attn(hidden_states) - output_states += (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - output_states += (hidden_states,) - - return hidden_states, output_states - - -class CrossAttnDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - cross_attention_dim=1280, - attention_type="default", - output_scale_factor=1.0, - downsample_padding=1, - add_downsample=True, - ): - super().__init__() - resnets = [] - attentions = [] - - self.attention_type = attention_type - self.attn_num_head_channels = attn_num_head_channels - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - Transformer2DModel( - attn_num_head_channels, - out_channels // attn_num_head_channels, - in_channels=out_channels, - num_layers=1, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - ) - ) - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - self.gradient_checkpointing = False - - def set_attention_slice(self, slice_size): - if slice_size is not None and self.attn_num_head_channels % slice_size != 0: - raise ValueError( - f"Make sure slice_size {slice_size} is a divisor of " - f"the number of heads used in cross_attention {self.attn_num_head_channels}" - ) - if slice_size is not None and slice_size > self.attn_num_head_channels: - raise ValueError( - f"Chunk_size {slice_size} has to be smaller or equal to " - f"the number of heads used in cross_attention {self.attn_num_head_channels}" - ) - - for attn in self.attentions: - attn._set_attention_slice(slice_size) - - def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool): - for attn in self.attentions: - attn._set_use_memory_efficient_attention_xformers(use_memory_efficient_attention_xformers) - - def forward(self, hidden_states, temb=None, encoder_hidden_states=None): - output_states = () - cross_attn_prob_list = [] - for layer_idx, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)): - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module, return_dict=None): - def custom_forward(*inputs): - if return_dict is not None: - return module(*inputs, return_dict=return_dict) - else: - return module(*inputs) - - return custom_forward - - hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states - )[0] - else: - hidden_states = resnet(hidden_states, temb) - tmp_hidden_states, cross_attn_prob = attn(hidden_states, encoder_hidden_states=encoder_hidden_states) - hidden_states = tmp_hidden_states.sample - - output_states += (hidden_states,) - cross_attn_prob_list.append(cross_attn_prob) - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - output_states += (hidden_states,) - - return hidden_states, output_states, cross_attn_prob_list - - -class DownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - self.gradient_checkpointing = False - - def forward(self, hidden_states, temb=None): - output_states = () - - for resnet in self.resnets: - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) - else: - hidden_states = resnet(hidden_states, temb) - - output_states += (hidden_states,) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - output_states += (hidden_states,) - - return hidden_states, output_states - - -class DownEncoderBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=None, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - def forward(self, hidden_states): - for resnet in self.resnets: - hidden_states = resnet(hidden_states, temb=None) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - return hidden_states - - -class AttnDownEncoderBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - output_scale_factor=1.0, - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - resnets = [] - attentions = [] - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=None, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - AttentionBlock( - out_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - norm_num_groups=resnet_groups, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - Downsample2D( - in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" - ) - ] - ) - else: - self.downsamplers = None - - def forward(self, hidden_states): - for resnet, attn in zip(self.resnets, self.attentions): - hidden_states = resnet(hidden_states, temb=None) - hidden_states = attn(hidden_states) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - return hidden_states - - -class AttnSkipDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - attention_type="default", - output_scale_factor=np.sqrt(2.0), - downsample_padding=1, - add_downsample=True, - ): - super().__init__() - self.attentions = nn.ModuleList([]) - self.resnets = nn.ModuleList([]) - - self.attention_type = attention_type - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - self.resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(in_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - self.attentions.append( - AttentionBlock( - out_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - ) - ) - - if add_downsample: - self.resnet_down = ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - use_in_shortcut=True, - down=True, - kernel="fir", - ) - self.downsamplers = nn.ModuleList([FirDownsample2D(in_channels, out_channels=out_channels)]) - self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) - else: - self.resnet_down = None - self.downsamplers = None - self.skip_conv = None - - def forward(self, hidden_states, temb=None, skip_sample=None): - output_states = () - - for resnet, attn in zip(self.resnets, self.attentions): - hidden_states = resnet(hidden_states, temb) - hidden_states = attn(hidden_states) - output_states += (hidden_states,) - - if self.downsamplers is not None: - hidden_states = self.resnet_down(hidden_states, temb) - for downsampler in self.downsamplers: - skip_sample = downsampler(skip_sample) - - hidden_states = self.skip_conv(skip_sample) + hidden_states - - output_states += (hidden_states,) - - return hidden_states, output_states, skip_sample - - -class SkipDownBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_pre_norm: bool = True, - output_scale_factor=np.sqrt(2.0), - add_downsample=True, - downsample_padding=1, - ): - super().__init__() - self.resnets = nn.ModuleList([]) - - for i in range(num_layers): - in_channels = in_channels if i == 0 else out_channels - self.resnets.append( - ResnetBlock2D( - in_channels=in_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(in_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - if add_downsample: - self.resnet_down = ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - use_in_shortcut=True, - down=True, - kernel="fir", - ) - self.downsamplers = nn.ModuleList([FirDownsample2D(in_channels, out_channels=out_channels)]) - self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) - else: - self.resnet_down = None - self.downsamplers = None - self.skip_conv = None - - def forward(self, hidden_states, temb=None, skip_sample=None): - output_states = () - - for resnet in self.resnets: - hidden_states = resnet(hidden_states, temb) - output_states += (hidden_states,) - - if self.downsamplers is not None: - hidden_states = self.resnet_down(hidden_states, temb) - for downsampler in self.downsamplers: - skip_sample = downsampler(skip_sample) - - hidden_states = self.skip_conv(skip_sample) + hidden_states - - output_states += (hidden_states,) - - return hidden_states, output_states, skip_sample - - -class AttnUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attention_type="default", - attn_num_head_channels=1, - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - resnets = [] - attentions = [] - - self.attention_type = attention_type - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - AttentionBlock( - out_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - norm_num_groups=resnet_groups, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None): - for resnet, attn in zip(self.resnets, self.attentions): - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - hidden_states = resnet(hidden_states, temb) - hidden_states = attn(hidden_states) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class CrossAttnUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - prev_output_channel: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - cross_attention_dim=1280, - attention_type="default", - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - resnets = [] - attentions = [] - - self.attention_type = attention_type - self.attn_num_head_channels = attn_num_head_channels - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - Transformer2DModel( - attn_num_head_channels, - out_channels // attn_num_head_channels, - in_channels=out_channels, - num_layers=1, - cross_attention_dim=cross_attention_dim, - norm_num_groups=resnet_groups, - ) - ) - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - self.gradient_checkpointing = False - - def set_attention_slice(self, slice_size): - if slice_size is not None and self.attn_num_head_channels % slice_size != 0: - raise ValueError( - f"Make sure slice_size {slice_size} is a divisor of " - f"the number of heads used in cross_attention {self.attn_num_head_channels}" - ) - if slice_size is not None and slice_size > self.attn_num_head_channels: - raise ValueError( - f"Chunk_size {slice_size} has to be smaller or equal to " - f"the number of heads used in cross_attention {self.attn_num_head_channels}" - ) - - for attn in self.attentions: - attn._set_attention_slice(slice_size) - - self.gradient_checkpointing = False - - def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool): - for attn in self.attentions: - attn._set_use_memory_efficient_attention_xformers(use_memory_efficient_attention_xformers) - - def forward( - self, - hidden_states, - res_hidden_states_tuple, - temb=None, - encoder_hidden_states=None, - upsample_size=None, - ): - cross_attn_prob_list = list() - for layer_idx, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)): - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module, return_dict=None): - def custom_forward(*inputs): - if return_dict is not None: - return module(*inputs, return_dict=return_dict) - else: - return module(*inputs) - - return custom_forward - - hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states - )[0] - else: - hidden_states = resnet(hidden_states, temb) - tmp_hidden_states, cross_attn_prob = attn(hidden_states, encoder_hidden_states=encoder_hidden_states) - hidden_states = tmp_hidden_states.sample - cross_attn_prob_list.append(cross_attn_prob) - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states, upsample_size) - - return hidden_states, cross_attn_prob_list - - -class UpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - self.gradient_checkpointing = False - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None): - for resnet in self.resnets: - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) - else: - hidden_states = resnet(hidden_states, temb) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states, upsample_size) - - return hidden_states - - -class UpDecoderBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - resnets = [] - - for i in range(num_layers): - input_channels = in_channels if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=input_channels, - out_channels=out_channels, - temb_channels=None, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - def forward(self, hidden_states): - for resnet in self.resnets: - hidden_states = resnet(hidden_states, temb=None) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class AttnUpDecoderBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - out_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - resnets = [] - attentions = [] - - for i in range(num_layers): - input_channels = in_channels if i == 0 else out_channels - - resnets.append( - ResnetBlock2D( - in_channels=input_channels, - out_channels=out_channels, - temb_channels=None, - eps=resnet_eps, - groups=resnet_groups, - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - attentions.append( - AttentionBlock( - out_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - norm_num_groups=resnet_groups, - ) - ) - - self.attentions = nn.ModuleList(attentions) - self.resnets = nn.ModuleList(resnets) - - if add_upsample: - self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) - else: - self.upsamplers = None - - def forward(self, hidden_states): - for resnet, attn in zip(self.resnets, self.attentions): - hidden_states = resnet(hidden_states, temb=None) - hidden_states = attn(hidden_states) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class AttnSkipUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_pre_norm: bool = True, - attn_num_head_channels=1, - attention_type="default", - output_scale_factor=np.sqrt(2.0), - upsample_padding=1, - add_upsample=True, - ): - super().__init__() - self.attentions = nn.ModuleList([]) - self.resnets = nn.ModuleList([]) - - self.attention_type = attention_type - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - self.resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(resnet_in_channels + res_skip_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.attentions.append( - AttentionBlock( - out_channels, - num_head_channels=attn_num_head_channels, - rescale_output_factor=output_scale_factor, - eps=resnet_eps, - ) - ) - - self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels) - if add_upsample: - self.resnet_up = ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(out_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - use_in_shortcut=True, - up=True, - kernel="fir", - ) - self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - self.skip_norm = torch.nn.GroupNorm( - num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True - ) - self.act = nn.SiLU() - else: - self.resnet_up = None - self.skip_conv = None - self.skip_norm = None - self.act = None - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None): - for resnet in self.resnets: - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - hidden_states = resnet(hidden_states, temb) - - hidden_states = self.attentions[0](hidden_states) - - if skip_sample is not None: - skip_sample = self.upsampler(skip_sample) - else: - skip_sample = 0 - - if self.resnet_up is not None: - skip_sample_states = self.skip_norm(hidden_states) - skip_sample_states = self.act(skip_sample_states) - skip_sample_states = self.skip_conv(skip_sample_states) - - skip_sample = skip_sample + skip_sample_states - - hidden_states = self.resnet_up(hidden_states, temb) - - return hidden_states, skip_sample - - -class SkipUpBlock2D(nn.Module): - def __init__( - self, - in_channels: int, - prev_output_channel: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_time_scale_shift: str = "default", - resnet_act_fn: str = "swish", - resnet_pre_norm: bool = True, - output_scale_factor=np.sqrt(2.0), - add_upsample=True, - upsample_padding=1, - ): - super().__init__() - self.resnets = nn.ModuleList([]) - - for i in range(num_layers): - res_skip_channels = in_channels if (i == num_layers - 1) else out_channels - resnet_in_channels = prev_output_channel if i == 0 else out_channels - - self.resnets.append( - ResnetBlock2D( - in_channels=resnet_in_channels + res_skip_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min((resnet_in_channels + res_skip_channels) // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - ) - ) - - self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels) - if add_upsample: - self.resnet_up = ResnetBlock2D( - in_channels=out_channels, - out_channels=out_channels, - temb_channels=temb_channels, - eps=resnet_eps, - groups=min(out_channels // 4, 32), - groups_out=min(out_channels // 4, 32), - dropout=dropout, - time_embedding_norm=resnet_time_scale_shift, - non_linearity=resnet_act_fn, - output_scale_factor=output_scale_factor, - pre_norm=resnet_pre_norm, - use_in_shortcut=True, - up=True, - kernel="fir", - ) - self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - self.skip_norm = torch.nn.GroupNorm( - num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True - ) - self.act = nn.SiLU() - else: - self.resnet_up = None - self.skip_conv = None - self.skip_norm = None - self.act = None - - def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None): - for resnet in self.resnets: - # pop res hidden states - res_hidden_states = res_hidden_states_tuple[-1] - res_hidden_states_tuple = res_hidden_states_tuple[:-1] - hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) - - hidden_states = resnet(hidden_states, temb) - - if skip_sample is not None: - skip_sample = self.upsampler(skip_sample) - else: - skip_sample = 0 - - if self.resnet_up is not None: - skip_sample_states = self.skip_norm(hidden_states) - skip_sample_states = self.act(skip_sample_states) - skip_sample_states = self.skip_conv(skip_sample_states) - - skip_sample = skip_sample + skip_sample_states - - hidden_states = self.resnet_up(hidden_states, temb) - - return hidden_states, skip_sample diff --git a/spaces/simonduerr/ProteinMPNNESM/ProteinMPNN/vanilla_proteinmpnn/examples/submit_example_3.sh b/spaces/simonduerr/ProteinMPNNESM/ProteinMPNN/vanilla_proteinmpnn/examples/submit_example_3.sh deleted file mode 100644 index 98ded3543e9adfa8dcc34deece3013109b9967d8..0000000000000000000000000000000000000000 --- a/spaces/simonduerr/ProteinMPNNESM/ProteinMPNN/vanilla_proteinmpnn/examples/submit_example_3.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --mem=32g -#SBATCH --gres=gpu:rtx2080:1 -#SBATCH -c 3 -#SBATCH --output=example_3.out - -source activate mlfold - -path_to_PDB="../PDB_complexes/pdbs/3HTN.pdb" - -output_dir="../PDB_complexes/example_3_outputs" -if [ ! -d $output_dir ] -then - mkdir -p $output_dir -fi - -chains_to_design="A B" - -python ../protein_mpnn_run.py \ - --pdb_path $path_to_PDB \ - --pdb_path_chains "$chains_to_design" \ - --out_folder $output_dir \ - --num_seq_per_target 2 \ - --sampling_temp "0.1" \ - --batch_size 1 diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Descargar Guardian Tales Hack APK y disfrutar de gemas ilimitadas armas y niveles.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Descargar Guardian Tales Hack APK y disfrutar de gemas ilimitadas armas y niveles.md deleted file mode 100644 index 6dc688bc9b05363bb08eb99df29c7a1e38fcf339..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Descargar Guardian Tales Hack APK y disfrutar de gemas ilimitadas armas y niveles.md +++ /dev/null @@ -1,127 +0,0 @@ -
      -

      Descargar Guardian Tales Hack APK: ¿Qué es y cómo funciona?

      -

      Guardian Tales es un divertido y adictivo juego de rol y acción para dispositivos Android que te lleva a una aventura épica por un mundo fantástico lleno de personajes carismáticos, puzzles desafiantes y combates estratégicos. Pero si quieres disfrutar al máximo de este juego sin tener que gastar dinero real o esperar horas para obtener recursos y desbloquear niveles, quizás te interese saber cómo descargar y cómo funciona el hack APK de Guardian Tales.

      -

      descargar guardian tales hack apk


      Download Filehttps://ssurll.com/2uNQvr



      -

      En este artículo, te explicaremos qué es un hack APK, qué ofrece el hack APK de Guardian Tales, cómo descargarlo e instalarlo en tu dispositivo, y qué precauciones debes tomar para evitar problemas de seguridad o sanciones de los desarrolladores del juego. ¡Sigue leyendo y descubre cómo convertirte en un maestro de Guardian Tales con el hack APK!

      -

      ¿Qué es Guardian Tales?

      -

      Guardian Tales es un juego de rol y acción desarrollado por Kakao Games y lanzado en julio de 2020 para Android e iOS. El juego tiene una estética retro inspirada en los clásicos del género, como The Legend of Zelda, Final Fantasy o Chrono Trigger, pero con un toque de humor y parodia.

      -

      El juego te permite crear tu propio héroe o heroína y explorar más de 10 mundos diferentes, cada uno con su propia historia, ambientación y personajes. En cada mundo, tendrás que resolver puzzles, superar obstáculos, recolectar objetos y enfrentarte a enemigos y jefes usando un sistema de combate dinámico y táctico.

      -

      descargar guardian tales mod apk unlimited gems
      -descargar guardian tales hack apk platinmods
      -descargar guardian tales mod apk latest version
      -descargar guardian tales hack apk android
      -descargar guardian tales mod apk 2023
      -descargar guardian tales hack apk ios
      -descargar guardian tales mod apk offline
      -descargar guardian tales hack apk no root
      -descargar guardian tales mod apk mega
      -descargar guardian tales hack apk mediafire
      -descargar guardian tales mod apk español
      -descargar guardian tales hack apk sin verificacion
      -descargar guardian tales mod apk dinero infinito
      -descargar guardian tales hack apk 2.70.0
      -descargar guardian tales mod apk vip
      -descargar guardian tales hack apk obb
      -descargar guardian tales mod apk free shopping
      -descargar guardian tales hack apk revdl
      -descargar guardian tales mod apk god mode
      -descargar guardian tales hack apk an1
      -descargar guardian tales mod apk unlimited everything
      -descargar guardian tales hack apk happymod
      -descargar guardian tales mod apk 2.71.0
      -descargar guardian tales hack apk pure
      -descargar guardian tales mod apk one hit kill
      -descargar guardian tales hack apk rexdl
      -descargar guardian tales mod apk unlimited stamina
      -descargar guardian tales hack apk uptodown
      -descargar guardian tales mod apk unlimited coins
      -descargar guardian tales hack apk 2.72.0
      -descargar guardian tales mod apk high damage
      -descargar guardian tales hack apk andropalace
      -descargar guardian tales mod apk unlimited weapons
      -descargar guardian tales hack apk blackmod
      -descargar guardian tales mod apk unlimited levels
      -descargar guardian tales hack apk apkpure
      -descargar guardian tales mod apk unlimited cards
      -descargar guardian tales hack apk 2.73.0
      -descargar guardian tales mod apk no ads
      -descargar guardian tales hack apk android 1
      -descargar guardian tales mod apk all characters unlocked
      -descargar guardian tales hack apk 2.74.0
      -descargar guardian tales mod apk anti ban
      -descargar guardian tales hack apk mob.org
      -descargar guardian tales mod apk auto win
      -descargar guardian tales hack apk 2.75.0
      -descargar guardian tales mod apk all costumes unlocked
      -descargar guardian tales hack apk apkmody

      -

      Además, el juego cuenta con un modo multijugador online, donde podrás cooperar o competir con otros jugadores en diversas modalidades, como mazmorras, arenas o guerras de gremios. También podrás personalizar tu base, tu equipo y tus compañeros de aventura, eligiendo entre más de 50 héroes y 200 armas diferentes.

      -

      Guardian Tales es un juego gratuito, pero ofrece compras dentro de la aplicación para obtener gemas, la moneda premium del juego, que te permiten acceder a más contenidos, ventajas y recompensas. Sin embargo, si no quieres gastar dinero real o esperar horas para conseguir gemas, puedes optar por usar el hack APK de Guardian Tales.

      -

      ¿Qué es un hack APK?

      -

      Un APK (Android Package Kit) es un archivo que contiene todos los componentes necesarios para instalar una aplicación en un dispositivo Android. Normalmente, las aplicaciones se descargan e instalan desde la tienda oficial de Google Play, pero también se pueden obtener desde otras fuentes externas o alternativas.

      -

      Un hack APK es un archivo APK modificado o alterado por terceros para modificar o añadir características a la aplicación original. Estas modificaciones pueden ser desde cambios estéticos o funcionales hasta trucos o trampas que dan ventajas al usuario en el juego.

      -

      El uso de un hack APK puede tener beneficios como acceder a contenidos exclusivos o premium, obtener recursos ilimitados o desbloquear niveles o modos de juego. Sin embargo, también puede tener riesgos como infectar el dispositivo con malware, provocar errores o fallos en el juego o infringir las normas y políticas de los desarrolladores del juego.

      -

      ¿Qué ofrece el hack APK de Guardian Tales?

      -

      El hack APK de Guardian Tales es una versión modificada del juego original que ofrece varias características y ventajas al usuario. Algunas de estas características son:

      -
        -
      • Gemas ilimitadas: las gemas son la moneda premium del juego que se pueden usar para comprar armas, héroes, cofres, tickets y otros objetos. Con el hack APK de Guardian Tales, podrás obtener todas las gemas que quieras sin tener que pagar ni esperar.
      • -
      • Armas ilimitadas: las armas son los objetos que determinan el poder y la habilidad de tu héroe en el combate. Con el hack APK de Guardian Tales, podrás obtener todas las armas que quieras sin tener que gastar gemas ni recursos.
      • -
      • Héroes ilimitados: los héroes son los personajes que te acompañan en tu aventura y que tienen sus propias características y habilidades especiales. Con el hack APK de Guardian Tales, podrás obtener todos los héroes que quieras sin tener que gastar gemas ni recursos.
      • -
      • Niveles desbloqueados: los niveles son los escenarios donde se desarrolla la historia y la acción del juego. Con el hack APK de Guardian Tales, podrás acceder a todos los niveles que quieras sin tener que completar los anteriores ni cumplir con los requisitos.
      • -
      • Modo dios: el modo dios es una función que te hace invencible e invulnerable en el juego. Con el hack APK de Guardian Tales, podrás activar el modo dios cuando quieras y derrotar a todos tus enemigos sin esfuerzo.
      • -
      -

      Estas son solo algunas de las características que ofrece el hack APK de Guardian Tales, pero hay muchas más que podrás descubrir por ti mismo al descargarlo e instalarlo en tu dispositivo.

      -

      ¿Cómo descargar e instalar el hack APK de Guardian Tales?

      -

      Para descargar e instalar el hack APK de Guardian Tales, necesitas cumplir con algunos requisitos previos y seguir unos pasos sencillos. A continuación, te los explicamos:

      -

      Requisitos previos

      -
        -
      • Tener un dispositivo Android compatible con el juego. El juego requiere Android 4.4 o superior y al menos 2 GB de RAM y 1.5 GB de espacio libre.
      • -
      • Habilitar la opción de instalar aplicaciones de fuentes desconocidas. Esta opción se encuentra en los ajustes de seguridad de tu dispositivo y te permite instalar archivos APK que no provienen de Google Play.
      • -
      • Descargar el archivo APK del hack de Guardian Tales desde una fuente confiable. Hay muchos sitios web que ofrecen archivos APK modificados, pero no todos son seguros ni funcionan correctamente. Te recomendamos que uses APK Mirror, un sitio web que verifica y actualiza los archivos APK que publica.
      • -
      -

      Pasos para descargar e instalar el hack APK

      -
        -
      1. Accede al sitio web de APK Mirror desde tu navegador y busca el hack APK de Guardian Tales. Asegúrate de que el archivo APK corresponda a la última versión del juego y tenga una fecha reciente.
      2. -
      3. Descarga el archivo APK en tu dispositivo. Puede que te aparezca una advertencia sobre los riesgos de descargar archivos desconocidos, pero puedes ignorarla si confías en la fuente.
      4. -
      5. Localiza el archivo APK en la carpeta de descargas o en la ubicación que hayas elegido y ábrelo para iniciar la instalación. Puede que te aparezca otra advertencia sobre los permisos que requiere la aplicación, pero puedes aceptarlos si quieres usar el hack.
      6. -
      7. Espera a que se complete la instalación y luego abre el juego desde el icono que se habrá creado en tu pantalla de inicio o en tu menú de aplicaciones.
      8. -
      9. Disfruta del hack APK de Guardian Tales y sus ventajas.
      10. -
      -

      ¿Es seguro usar el hack APK de Guardian Tales?

      -

      El uso del hack APK de Guardian Tales puede tener algunos riesgos y desventajas que debes tener en cuenta antes de decidirte a usarlo. Algunos de estos riesgos son:

      -
        -
      • Malware: algunos archivos APK pueden contener virus, troyanos, spyware u otros tipos de software malicioso que pueden dañar tu dispositivo, robar tu información personal o comprometer tu seguridad. Por eso, es importante que verifiques el archivo APK antes de instalarlo y que uses un antivirus o un escáner de malware para proteger tu dispositivo.
      • -
      • Bans: los desarrolladores del juego pueden detectar el uso del hack APK y sancionarte por violar las normas y condiciones del juego. Estas sanciones pueden ir desde la suspensión temporal o permanente de tu cuenta hasta la eliminación de tus datos o progreso en el juego. Por eso, es importante que evites abusar del hack APK y que uses medidas para ocultar tu actividad y evitar ser detectado.
      • -
      • Errores: los archivos APK modificados pueden causar problemas de compatibilidad, estabilidad o funcionamiento en el juego o en el dispositivo. Estos problemas pueden manifestarse como cierres inesperados, pantallas congeladas, pérdida de datos o rendimiento deficiente. Por eso, es importante que actualices el archivo APK con frecuencia y que lo desinstales si te causa demasiados inconvenientes.
      • -
      -

      Cómo evitar el malware de los archivos APK

      -

      Para evitar el malware de los archivos APK, puedes seguir estos consejos y herramientas:

      -
        -
      • Descarga los archivos APK solo desde fuentes confiables y verificadas, como APK Mirror, que revisan y actualizan los archivos que publican.
      • -
      • Verifica el archivo APK antes de instalarlo usando un escáner online como Metadefender o VirusTotal, que analizan el archivo con varios motores antivirus y te muestran los resultados.
      • Instala un antivirus o un escáner de malware en tu dispositivo y ejecútalo periódicamente para detectar y eliminar cualquier amenaza potencial. -
      -

      Cómo evitar las sanciones de los desarrolladores del juego

      -

      Para evitar las sanciones de los desarrolladores del juego, puedes seguir estos consejos y precauciones:

      -
        -
      • Usa un VPN (Virtual Private Network) para ocultar tu dirección IP y tu ubicación, y así evitar que los desarrolladores del juego rastreen tu actividad y te identifiquen como un usuario del hack APK.
      • -
      • No abuses del hack APK ni lo uses para molestar o perjudicar a otros jugadores, ya que esto puede llamar la atención de los desarrolladores del juego y provocar quejas o reportes de otros usuarios.
      • -
      • No compartas tu cuenta ni tu información personal con nadie, ya que esto puede comprometer tu seguridad y tu privacidad, y facilitar que los desarrolladores del juego te detecten y te sancionen.
      • -
      • No te conectes al juego desde diferentes dispositivos o redes, ya que esto puede generar inconsistencias o sospechas en tu cuenta y alertar a los desarrolladores del juego.
      • -
      -

      Conclusión

      -

      El hack APK de Guardian Tales es una opción tentadora para los usuarios que quieren disfrutar al máximo de este juego sin tener que gastar dinero real o esperar horas para obtener recursos y desbloquear niveles. El hack APK ofrece varias características y ventajas, como gemas ilimitadas, armas ilimitadas, héroes ilimitados, niveles desbloqueados y modo dios.

      -

      Sin embargo, el uso del hack APK también implica algunos riesgos y desventajas, como malware, bans, errores y problemas legales o éticos. Por eso, es importante que el usuario sea consciente de estos riesgos y tome las medidas necesarias para evitarlos o minimizarlos.

      -

      En definitiva, el hack APK de Guardian Tales puede ser una herramienta útil y divertida para los usuarios que quieren experimentar el juego de una forma diferente y más fácil, pero también puede ser una fuente de problemas y frustraciones si no se usa con responsabilidad y precaución. Por lo tanto, el usuario debe decidir si vale la pena usar el hack APK o no en función de sus preferencias, objetivos y riesgos.

      -

      Preguntas frecuentes sobre el hack APK de Guardian Tales

      -

      A continuación, respondemos algunas de las preguntas más frecuentes sobre el hack APK de Guardian Tales:

      -

      ¿El hack APK de Guardian Tales funciona en todos los dispositivos Android?

      -

      No, el hack APK de Guardian Tales solo funciona en algunos dispositivos Android que cumplen con los requisitos mínimos del juego y que tienen habilitada la opción de instalar aplicaciones de fuentes desconocidas. Si tu dispositivo no cumple con estos requisitos o no tiene esta opción activada, no podrás usar el hack APK.

      -

      ¿El hack APK de Guardian Tales afecta el rendimiento del juego o del dispositivo?

      -

      Sí, el hack APK de Guardian Tales puede afectar el rendimiento del juego o del dispositivo, ya que puede consumir más recursos, causar errores, o interferir con otras aplicaciones. Es recomendable usar el hack APK con moderación y solo cuando sea necesario.

      -

      ¿El hack APK de Guardian Tales se actualiza con las nuevas versiones del juego?

      -

      No necesariamente, el hack APK de Guardian Tales puede quedar obsoleto o incompatible con las nuevas versiones del juego que se lanzan periódicamente. Es importante verificar la fecha y la versión del hack APK antes de descargarlo e instalarlo.

      -

      ¿El hack APK de Guardian Tales es legal?

      -

      No, el hack APK de Guardian Tales no es legal, ya que viola los términos y condiciones del juego y del servicio de Google Play. El uso del hack APK puede implicar consecuencias legales o éticas para el usuario.

      -

      ¿Hay otras formas de obtener ventajas en el juego sin usar el hack APK?

      -

      Sí, hay otras formas de obtener ventajas en el juego sin usar el hack APK, como seguir las guías y consejos de otros jugadores expertos, participar en eventos y misiones especiales, o comprar gemas y otros recursos con dinero real.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Death Moto 3 MOD APK and Race with Fighting Bike Riders.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Death Moto 3 MOD APK and Race with Fighting Bike Riders.md deleted file mode 100644 index e9fc21e4ba6047e3fd9657892ee1d0aa3a80a085..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Death Moto 3 MOD APK and Race with Fighting Bike Riders.md +++ /dev/null @@ -1,95 +0,0 @@ - -

      Death Moto 3: Fighting Rider Mod APK - A Thrilling Racing Game

      -

      If you are a fan of racing games, you might have heard of Death Moto 3: Fighting Rider. This is a popular game that combines speed, action, and adventure in one exciting package. In this game, you will ride your motorcycle on various roads and terrains, while fighting against other riders and enemies. You will also be able to customize your motorcycle and equip it with different weapons, such as guns, rockets, and chainsaws. The game has stunning graphics and sound effects that will make you feel like you are in a real racing battle.

      -

      What is Death Moto 3: Fighting Rider?

      -

      Death Moto 3: Fighting Rider is a racing game developed by WEDO1.COM GAME. It is the third installment of the Death Moto series, which has been downloaded by millions of players worldwide. The game is available for both Android and iOS devices, and it is free to play. However, some items and features in the game require real money to purchase or unlock.

      -

      death moto 3 fighting rider mod apk


      Download Zip ===> https://ssurll.com/2uNYVA



      -

      Features of Death Moto 3: Fighting Rider

      -

      - Stunning graphics and sound effects

      -

      The game has amazing graphics that will make you feel immersed in the racing world. The game also has realistic sound effects that will enhance your gaming experience. You will hear the roar of your engine, the screech of your brakes, the blast of your weapons, and the screams of your enemies.

      -

      - Various motorcycles and weapons to choose from

      -

      The game offers a variety of motorcycles and weapons that you can use to customize your ride. You can choose from different models, colors, and styles of motorcycles, as well as different types of weapons, such as guns, rockets, chainsaws, and more. You can also upgrade your motorcycle and weapons to improve their performance and power.

      -

      - Challenging missions and enemies to face

      -

      The game has many missions that you can complete to earn coins and gems. These missions include racing against other riders, destroying enemy vehicles, collecting items, and more. You will also encounter different enemies on your way, such as zombies, robots, helicopters, tanks, and bosses. You will need to use your skills and weapons to defeat them.

      -

      - Online multiplayer mode and leaderboards

      -

      The game also has an online multiplayer mode where you can compete with other players from around the world. You can join or create a room and invite your friends or random players to join you. You can also chat with other players in the room and challenge them to a race. The game also has leaderboards where you can see your ranking among other players globally or locally.

      -

      What is Death Moto 3: Fighting Rider Mod APK?

      -

      Death Moto 3: Fighting Rider Mod APK is a modified version of the original game that gives you some extra benefits. With this mod apk, you will be able to enjoy the game without any limitations or restrictions.

      -

      Benefits of Death Moto 3: Fighting Rider Mod APK

      -

      - Unlimited coins and gems

      -

      With this mod apk, you will have unlimited coins and gems in your account. You can use these coins and gems to buy or unlock anything you want in the game, such as motorcycles, weapons, upgrades, and more. You will also be able to skip the ads that may interrupt your gameplay.

      -

      - All motorcycles and weapons unlocked

      -

      With this mod apk, you will have access to all the motorcycles and weapons in the game without having to complete any missions or pay any money. You can choose any motorcycle and weapon you like and enjoy their features and abilities.

      -

      - No ads and no root required

      -

      With this mod apk, you will not see any ads in the game that may annoy you or slow down your device. You will also not need to root your device to install this mod apk. You can simply download and install it without any hassle or risk.

      -

      death moto 3 mod apk unlimited money
      -death moto 3 fighting bike rider download
      -death moto 3 apk latest version
      -death moto 3 hack mod apk android
      -death moto 3 road rush racing game
      -death moto 3 mod apk revdl
      -death moto 3 fighting rider cheats
      -death moto 3 apk obb data
      -death moto 3 mod apk offline
      -death moto 3 fighting rider gameplay
      -death moto 3 mod apk rexdl
      -death moto 3 apk pure free
      -death moto 3 hack mod apk ios
      -death moto 3 fighting rider tips
      -death moto 3 apk mod menu
      -death moto 3 mod apk no ads
      -death moto 3 fighting bike rider review
      -death moto 3 apk uptodown
      -death moto 3 hack mod apk download
      -death moto 3 fighting rider online
      -death moto 3 mod apk unlimited gems
      -death moto 3 fighting bike rider hack
      -death moto 3 apk old version
      -death moto 3 mod apk happymod
      -death moto 3 fighting rider guide
      -death moto 3 mod apk unlimited coins
      -death moto 3 fighting bike rider modded
      -death moto 3 apk mirror link
      -death moto 3 mod apk android 1
      -death moto 3 fighting rider tricks
      -death moto 3 mod apk all unlocked
      -death moto 3 fighting bike rider update
      -death moto 3 apk full version
      -death moto 3 mod apk apkpure
      -death moto 3 fighting rider walkthrough
      -death moto 3 mod apk unlimited health
      -death moto 3 fighting bike rider cheats codes
      -death moto 3 apk for pc windows
      -death moto 3 mod apk free shopping
      -death moto 3 fighting rider levels

      -

      How to download and install Death Moto 3: Fighting Rider Mod APK?

      -

      If you want to download and install Death Moto 3: Fighting Rider Mod APK on your Android device, you can follow these simple steps:

      -

      Step-by-step guide for Android devices

      -

      - Enable unknown sources in your settings

      -

      Before you can install any mod apk file on your device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device's settings, then security, then unknown sources, and turn it on.

      -

      - Download the mod apk file from a trusted source

      -

      Next, you need to download the mod apk file from a trusted source. You can use the link below to download the latest version of Death Moto 3: Fighting Rider Mod APK. Make sure you have enough storage space on your device before downloading the file.

      -

      Download Death Moto 3: Fighting Rider Mod APK here

      -

      - Install the mod apk file and enjoy the game

      -

      Finally, you need to install the mod apk file on your device. To do this, locate the file in your downloads folder and tap on it. Follow the instructions on the screen and wait for the installation to finish. Once done, you can launch the game and enjoy all the benefits of the mod apk.

      -

      Conclusion

      -

      Death Moto 3: Fighting Rider is a thrilling racing game that will keep you entertained for hours. You can ride your motorcycle on various roads and terrains, while fighting against other riders and enemies. You can also customize your motorcycle and equip it with different weapons, such as guns, rockets, chainsaws, and more. The game has stunning graphics and sound effects that will make you feel like you are in a real racing battle.

      -

      If you want to enjoy the game without any limitations or restrictions, you can download and install Death Moto 3: Fighting Rider Mod APK on your Android device. With this mod apk, you will have unlimited coins and gems, all motorcycles and weapons unlocked, no ads, and no root required. You can buy or unlock anything you want in the game, such as motorcycles, weapons, upgrades, and more. You can also compete with other players from around the world in the online multiplayer mode and see your ranking on the leaderboards.

      -

      So what are you waiting for? Download Death Moto 3: Fighting Rider Mod APK now and experience the thrill of racing and fighting on your motorcycle!

      -

      FAQs

      -

      Here are some frequently asked questions about Death Moto 3: Fighting Rider Mod APK:

      - - - - - - -
      Q: Is Death Moto 3: Fighting Rider Mod APK safe to use?A: Yes, Death Moto 3: Fighting Rider Mod APK is safe to use as long as you download it from a trusted source. However, you should always be careful when installing any mod apk file on your device and make sure you have a backup of your data in case anything goes wrong.
      Q: Do I need an internet connection to play Death Moto 3: Fighting Rider Mod APK?A: No, you do not need an internet connection to play Death Moto 3: Fighting Rider Mod APK. You can play the game offline without any problem. However, if you want to play the online multiplayer mode or see the leaderboards, you will need an internet connection.
      Q: How can I update Death Moto 3: Fighting Rider Mod APK?A: To update Death Moto 3: Fighting Rider Mod APK, you will need to download and install the latest version of the mod apk file from a trusted source. You can use the link below to download the latest version of Death Moto 3: Fighting Rider Mod APK.
      Q: Can I play Death Moto 3: Fighting Rider Mod APK on my PC?A: Yes, you can play Death Moto 3: Fighting Rider Mod APK on your PC using an Android emulator. An Android emulator is a software that allows you to run Android apps on your PC. You can use any Android emulator of your choice, such as BlueStacks, NoxPlayer, or MEmu. You will need to download and install the emulator on your PC, then download and install the mod apk file on the emulator, and then launch the game and enjoy it.
      Q: What are some alternatives to Death Moto 3: Fighting Rider Mod APK?A: If you are looking for some alternatives to Death Moto 3: Fighting Rider Mod APK, you can try these games: -
        -
      • Death Moto 4 Mod APK: This is the fourth installment of the Death Moto series, which has more motorcycles, weapons, and enemies to choose from.
      • -
      • Road Rash Mod APK: This is a classic racing game that lets you ride your motorcycle and fight against other riders using various weapons, such as clubs, chains, and bats.
      • -
      • Traffic Rider Mod APK: This is a realistic racing game that lets you ride your motorcycle on endless highways and dodge the traffic. You can also upgrade your motorcycle and unlock new modes and features.
      • -
      -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download GTA 5 PPSSPP ISO Zip File (300MB) - The Ultimate Experience for GTA Fans.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download GTA 5 PPSSPP ISO Zip File (300MB) - The Ultimate Experience for GTA Fans.md deleted file mode 100644 index cc4fa91522620659d6554f21f78c458497bddc8d..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download GTA 5 PPSSPP ISO Zip File (300MB) - The Ultimate Experience for GTA Fans.md +++ /dev/null @@ -1,123 +0,0 @@ - -

      How to Download GTA 5 Zip PPSSPP

      -

      GTA 5 is one of the most popular and acclaimed games of all time. It offers an open-world experience with a captivating story, diverse characters, and tons of activities. But what if you want to play GTA 5 on your mobile device or PC without installing the full game? That's where PPSSPP comes in. PPSSPP is a PSP emulator that allows you to run PSP games on various platforms, including Android and Windows. In this article, we will show you how to download GTA 5 zip ppsspp and enjoy this amazing game on your device.

      -

      What is GTA 5?

      -

      GTA 5 is the fifth installment in the Grand Theft Auto series, developed by Rockstar Games. It was released in 2013 for PlayStation 3 and Xbox 360, and later for PlayStation 4, Xbox One, and PC. The game is set in Los Santos, a fictional city based on Los Angeles, and follows the lives of three protagonists: Michael, a retired bank robber; Franklin, a street hustler; and Trevor, a psychopathic criminal. The game allows you to switch between these characters at any time and explore the vast map, which includes urban areas, mountains, deserts, forests, and beaches. You can also engage in various missions, activities, side quests, and online multiplayer modes.

      -

      download gta 5 zip ppsspp


      Download Filehttps://ssurll.com/2uNVB3



      -

      GTA 5 is widely praised for its graphics, gameplay, story, soundtrack, and humor. It has won numerous awards and accolades, and has sold over 150 million copies worldwide. It is considered one of the best games ever made and a landmark in video game history.

      -

      What is PPSSPP?

      -

      PPSSPP is a PSP emulator that lets you play PSP games on different devices. PSP stands for PlayStation Portable, a handheld console that was released by Sony in 2004. PSP had a large library of games, ranging from action-adventure to sports to RPGs. However, PSP was discontinued in 2014, and many of its games are no longer available or compatible with modern devices.

      -

      That's where PPSSPP comes in handy. PPSSPP is an open-source project that was created by Henrik Rydgård in 2012. It allows you to run PSP games on your Android phone or tablet, Windows PC or laptop, Mac OS X computer, Linux system, iOS device, or even VR headset. PPSSPP can run your PSP games in HD resolution or higher, with improved graphics, sound, and performance. You can also save and load your game state at any point, customize your controls, use cheats and mods, and play online with other players.

      -

      How to Download GTA 5 Zip PPSSPP for Android

      -

      If you want to play GTA 5 on your Android device, you will need to download the PPSSPP emulator and the GTA 5 zip file. Here are the steps you need to follow:

      -
        -
      1. Download the PPSSPP emulator from the official website or the Google Play Store. Install it on your device and grant it the necessary permissions.
      2. -
      3. Download the GTA 5 zip file from a trusted source. You can find many websites that offer the GTA 5 zip file for PPSSPP, but be careful of fake or malicious links. You can use this link as an example, but make sure to scan it for viruses before downloading.
      4. -
      5. Extract the zip file using a file manager app. You will need a file manager app that can handle zip files, such as ZArchiver or ES File Explorer. Locate the GTA 5 zip file on your device and tap on it. Choose the option to extract it to a folder of your choice.
      6. -
      7. Launch the PPSSPP emulator and locate the GTA 5 ISO file. Open the PPSSPP app and tap on the game icon. Navigate to the folder where you extracted the GTA 5 zip file and select the GTA 5 ISO file. The game will start loading on your device.
      8. -
      9. Enjoy playing GTA 5 on your Android device. You can now experience the thrilling adventures of Michael, Franklin, and Trevor on your mobile screen. You can also adjust the settings of the emulator to optimize the graphics and performance of the game.
      10. -
      -

      How to Download GTA 5 Zip PPSSPP for Windows

      -

      If you want to play GTA 5 on your Windows PC or laptop, you will need to download the PPSSPP emulator and the GTA 5 zip file. Here are the steps you need to follow:

      -

      download gta 5 ppsspp iso file for android
      -download gta 5 ppsspp highly compressed zip file
      -download gta 5 ppsspp gold zip file
      -download gta 5 ppsspp emulator zip file
      -download gta 5 ppsspp full game zip file
      -download gta 5 ppsspp mod zip file
      -download gta 5 ppsspp lite zip file
      -download gta 5 ppsspp apk + data zip file
      -download gta 5 ppsspp offline zip file
      -download gta 5 ppsspp latest version zip file
      -download gta 5 ppsspp android zip file free
      -download gta 5 ppsspp zip file for pc
      -download gta 5 ppsspp zip file for ios
      -download gta 5 ppsspp zip file for psp
      -download gta 5 ppsspp zip file for windows
      -download gta 5 ppsspp zip file for mac
      -download gta 5 ppsspp zip file for linux
      -download gta 5 ppsspp zip file no verification
      -download gta 5 ppsspp zip file no password
      -download gta 5 ppsspp zip file no survey
      -download gta 5 ppsspp zip file with cheats
      -download gta 5 ppsspp zip file with mediafire link
      -download gta 5 ppsspp zip file with google drive link
      -download gta 5 ppsspp zip file with mega link
      -download gta 5 ppsspp zip file with zarchiver
      -how to download gta 5 ppsspp zip file on android
      -how to download gta 5 ppsspp zip file on pc
      -how to download gta 5 ppsspp zip file on ios
      -how to download gta 5 ppsspp zip file on psp
      -how to download gta 5 ppsspp zip file on windows
      -how to install gta 5 ppsspp zip file on android
      -how to install gta 5 ppsspp zip file on pc
      -how to install gta 5 ppsspp zip file on ios
      -how to install gta 5 ppsspp zip file on psp
      -how to install gta 5 ppsspp zip file on windows
      -how to play gta 5 ppsspp zip file on android
      -how to play gta 5 ppsspp zip file on pc
      -how to play gta 5 ppsspp zip file on ios
      -how to play gta 5 ppsspp zip file on psp
      -how to play gta 5 ppsspp zip file on windows
      -where to download gta 5 ppsspp zip file for android
      -where to download gta 5 ppsspp zip file for pc
      -where to download gta 5 ppsspp zip file for ios
      -where to download gta 5 ppsspp zip file for psp
      -where to download gta 5 ppsspp zip file for windows
      -best site to download gta 5 ppsspp zip file for android
      -best site to download gta 5 ppsspp zip file for pc
      -best site to download gta 5 ppsspp zip file for ios
      -best site to download gta 5 ppsspp zip file for psp

      -
        -
      1. Download the PPSSPP emulator from the official website or Steam. Install it on your PC and run it as an administrator.
      2. -
      3. Download the GTA 5 zip file from a trusted source. You can find many websites that offer the GTA 5 zip file for PPSSPP, but be careful of fake or malicious links. You can use this link as an example, but make sure to scan it for viruses before downloading.
      4. -
      5. Extract the zip file using a file archiver program. You will need a file archiver program that can handle zip files, such as WinRAR or 7-Zip. Locate the GTA 5 zip file on your PC and right-click on it. Choose the option to extract it to a folder of your choice.
      6. -
      7. Launch the PPSSPP emulator and locate the GTA 5 ISO file. Open the PPSSPP app and click on the game icon. Navigate to the folder where you extracted the GTA 5 zip file and select the GTA 5 ISO file. The game will start loading on your PC.
      8. -
      9. Enjoy playing GTA 5 on your Windows PC. You can now experience the thrilling adventures of Michael, Franklin, and Trevor on your big screen. You can also adjust the settings of the emulator to optimize the graphics and performance of the game.
      10. -
      -

      Tips and Tricks for Playing GTA 5 on PPSSPP

      -

      GTA 5 is a demanding game that requires a lot of resources to run smoothly. If you want to play GTA 5 on PPSSPP without any lag or glitches, you will need to tweak some settings of the emulator and the game. Here are some tips and tricks that can help you improve your gaming experience:

      -
        -
      • To optimize the graphics settings of PPSSPP, go to Settings > Graphics and change these options:
          -
        • Set Rendering Resolution to Auto (1:1)
        • -
        • Set Display Resolution (HW scaler) to Native Device Resolution
        • -
        • Set Mode to Buffered Rendering
        • -
        • Set Frame Skipping to Off or Auto
        • -
        • Set Texture Scaling Level to Auto
        • -
        • Set Texture Filtering Mode to Linear or Anisotropic Filtering
        • -
        • Set Anisotropic Filtering Level to 16x or 8x
        • -
        • Set Texture Scaling Type to xBRZ
        • -
        • Set Spline/Bezier Curves Quality to High
        • -
        • Enable Hardware Transform, Software Skinning, Vertex Cache, Lazy Texture Caching, and Retain Changed Textures
        • -
        • Disable Mipmapping, Low Quality Splines and Bezier Curves, and Timer Hack
        • -
        -
      • To optimize the performance settings of PPSSPP, go to Settings > System and change these options:
          -
        • Set Emulation Speed to Unlimited or Auto
        • -
        • Set Alternative Speed to Unlimited or Auto
        • -
        • Set I/O Timing Method to Fast or Host
        • -
        • Set CPU Clock to 0 or Auto
        • -
        • Enable Fast Memory, Multithreaded, I/O on Thread, and Force Real Clock Sync
        • -
        • Disable Atomic Audio Locks and Change Emulated PSP's CPU Clock
        • -
      • To use cheats and mods in GTA 5 on PPSSPP, you will need to download the cheat database file and the mod files from the internet. You can find many websites that offer cheats and mods for GTA 5 on PPSSPP, but be careful of fake or malicious links. You can use this link as an example, but make sure to scan it for viruses before downloading. -
      • To install the cheat database file, copy it to the PSP/Cheats folder on your device or PC. Then, launch the PPSSPP emulator and go to Settings > Tools > Cheats. Enable Cheats and select Edit Cheat File. You will see a list of cheats for GTA 5 that you can activate or deactivate.
      • -
      • To install the mod files, copy them to the PSP/Game folder on your device or PC. Then, launch the PPSSPP emulator and locate the GTA 5 ISO file. Select it and press Triangle. You will see a list of mods for GTA 5 that you can enable or disable.
      • -
      • To use cheats and mods in GTA 5 on PPSSPP, you will need to restart the game after activating or deactivating them. You can also use the cheat menu or the mod menu in the game to access more options and features.
      • -
      -

      Conclusion

      -

      GTA 5 is a masterpiece of gaming that you can enjoy on your device with the help of PPSSPP. PPSSPP is a PSP emulator that allows you to run PSP games on various platforms, including Android and Windows. In this article, we showed you how to download GTA 5 zip ppsspp and play it on your device. We also gave you some tips and tricks for optimizing the graphics and performance of the game, as well as using cheats and mods in GTA 5 on PPSSPP.

      -

      We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. And if you liked this article, please share it with your friends and family who might be interested in playing GTA 5 on PPSSPP. Thank you for reading!

      -

      Frequently Asked Questions

      -

      Here are some of the most common questions that people ask about GTA 5 on PPSSPP:

      -
        -
      1. Is GTA 5 on PPSSPP legal?
      2. -

        GTA 5 on PPSSPP is not an official release by Rockstar Games or Sony. It is a fan-made modification that requires you to download a zip file from the internet. Therefore, it is not legal to play GTA 5 on PPSSPP without owning a copy of the original game for PSP. However, if you already own a copy of GTA 5 for PSP, you can legally play it on PPSSPP as a backup.

        -
      3. Is GTA 5 on PPSSPP safe?
      4. -

        GTA 5 on PPSSPP is not guaranteed to be safe or secure. It depends on where you download the zip file from and whether it contains any viruses or malware. You should always scan any file that you download from the internet before opening it. You should also be careful of any links that claim to offer GTA 5 zip file for PPSSPP, as they might be fake or malicious.

        -
      5. Is GTA 5 on PPSSPP compatible with my device?
      6. GTA 5 on PPSSPP is compatible with most devices that can run PPSSPP emulator. However, GTA 5 is a very demanding game that requires a lot of resources to run smoothly. You will need a device that has at least 2 GB of RAM, 4 GB of storage space, and a decent processor and GPU. You will also need to adjust the settings of the emulator and the game to optimize the graphics and performance of GTA 5 on PPSSPP.

        -
      7. How can I update GTA 5 on PPSSPP?
      8. -

        GTA 5 on PPSSPP is not an official release by Rockstar Games or Sony. It is a fan-made modification that does not receive any updates or patches from the developers. Therefore, you cannot update GTA 5 on PPSSPP like you would update a normal game. However, you can check for any new versions or improvements of the zip file from the internet and download them if you want to try them out.

        -
      9. How can I play GTA 5 online on PPSSPP?
      10. -

        GTA 5 on PPSSPP does not support online multiplayer mode. It only supports single-player mode. However, you can play online with other players who are using PPSSPP emulator by using the ad hoc network feature. To do this, you will need to connect your device or PC to the same Wi-Fi network as the other players. Then, go to Settings > Networking and enable WLAN and Ad hoc server. You will also need to enter the same IP address as the other players. Then, launch GTA 5 on PPSSPP and select the multiplayer option in the game menu.

        -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Slots for Fun - Explore Thousands of Online Slot Games at Casino.org..md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Slots for Fun - Explore Thousands of Online Slot Games at Casino.org..md deleted file mode 100644 index 68ecd2d51bf2dc20707f80cf06ffeb799edf665a..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Slots for Fun - Explore Thousands of Online Slot Games at Casino.org..md +++ /dev/null @@ -1,116 +0,0 @@ - -

      Slot Free Games: How to Play and Win Online

      -

      Are you looking for a fun and exciting way to spend your free time? Do you love the thrill of spinning the reels and winning big prizes? If so, then you should try playing slot free games online. Slot free games are online casino games that let you play slots without risking any money. You can enjoy hundreds of different themes, features, and styles of slot games without spending a dime. In this article, we will tell you everything you need to know about slot free games, including what they are, why you should play them, how to find the best ones, what types and features they offer, and some tips and tricks for playing them. Read on and discover the amazing world of slot free games online.

      -

      slot free games


      Download Ziphttps://ssurll.com/2uNVXt



      -

      Introduction

      -

      What are slot free games?

      -

      Slot free games are online casino games that allow you to play slots for fun without wagering any money. They are also known as demo slots, free slots, or practice slots. They work exactly like real money slots, except that you use virtual coins instead of real cash. You can still win prizes, but they are not withdrawable. Slot free games are designed to give you a taste of what real money slots are like, without risking your own funds.

      -

      Why play slot free games?

      -

      There are many reasons why you might want to play slot free games online. Here are some of the most common ones:

      -
        -
      • You can have fun and entertainment without spending any money.
      • -
      • You can try out new games and see if you like them before playing for real money.
      • -
      • You can learn how the games work and what features they have without risking any losses.
      • -
      • You can practice your skills and strategies without worrying about the outcome.
      • -
      • You can enjoy the variety and diversity of slot themes, graphics, and sounds.
      • -
      -

      How to find the best slot free games?

      -

      There are thousands of slot free games available online, but not all of them are worth your time and attention. To find the best ones, you need to consider some factors, such as:

      -

      slot free games online
      -slot free games casino
      -slot free games no download
      -slot free games bonus
      -slot free games for fun
      -slot free games with jackpots
      -slot free games cleopatra
      -slot free games 777
      -slot free games wheel of fortune
      -slot free games vegas
      -slot free games machines
      -slot free games download
      -slot free games app
      -slot free games android
      -slot free games iphone
      -slot free games facebook
      -slot free games win real money
      -slot free games no deposit
      -slot free games no registration
      -slot free games no internet
      -slot free games offline
      -slot free games wizard of oz
      -slot free games quick hit
      -slot free games buffalo
      -slot free games monopoly
      -slot free games davinci diamonds
      -slot free games zeus
      -slot free games wolf run
      -slot free games lobstermania
      -slot free games triple diamond
      -slot free games golden goddess
      -slot free games cats
      -slot free games china shores
      -slot free games blazing 7s
      -slot free games hot shot
      -slot free games double down
      -slot free games caesars casino
      -slot free games house of fun
      -slot free games zynga
      -slot free games aristocrat
      -slot free games bally
      -slot free games igt
      -slot free games wms
      -slot free games konami
      -slot free games netent
      -slot free games microgaming
      -slot free games playtech
      -slot free games novomatic
      -slot free games betsoft

      -
        -
      • The quality and reputation of the software provider. You want to play games from reliable and trustworthy developers who create high-quality and fair games.
      • -
      • The theme and style of the game. You want to play games that suit your preferences and interests. Whether you like classic fruit machines, modern video slots, or anything in between, you can find it online.
      • -
      • The features and bonuses of the game. You want to play games that offer exciting and rewarding features, such as wilds, scatters, bonus rounds, free spins, multipliers, jackpots, etc.
      • -
      • The RTP and volatility of the game. You want to play games that have a high return to player (RTP) percentage and a low or medium volatility level. This means that the game will pay out more frequently and consistently.
      • -
      -

      One of the easiest ways to find the best slot free games is to visit reputable online casino sites that offer them. You can browse through their selection of games and choose the ones that appeal to you. You can also read reviews and ratings from other players to get an idea of what to expect from each game.

      -

      Types of slot free games

      -

      Classic slots

      -

      Classic slots are the simplest and most traditional type of slot games. They usually have three reels and one payline, although some may have more. They are inspired by the old-fashioned slot machines that you can find in land-based casinos. They usually feature symbols such as fruits, bars, bells, sevens, and stars. They are easy to play and understand, and they offer a nostalgic and retro feel. Some examples of classic slots are Triple Diamond, Fire Joker, and Break da Bank.

      -

      Video slots

      -

      Video slots are the most popular and common type of slot games online. They usually have five reels and multiple paylines, although some may have more or less. They are powered by advanced graphics and animations, and they offer a wide range of themes and features. You can find video slots based on movies, TV shows, music, sports, animals, fantasy, adventure, and more. They often include special symbols such as wilds and scatters, and bonus rounds such as free spins and mini-games. Some examples of video slots are Starburst, Gonzo's Quest, and Book of Dead.

      -

      Progressive slots

      -

      Progressive slots are a type of slot games that offer a chance to win a huge jackpot that keeps growing with every bet. They are connected to a network of other slots that share the same jackpot pool. A small percentage of each bet goes into the jackpot, which can reach millions of dollars. To win the jackpot, you usually need to land a specific combination of symbols or trigger a bonus feature. Some examples of progressive slots are Mega Moolah, Mega Fortune, and Jackpot Giant.

      -

      Megaways slots

      -

      Megaways slots are a type of slot games that use a unique mechanic that changes the number of symbols and ways to win on each spin. They are developed by Big Time Gaming and licensed to other providers. They usually have six reels and up to seven symbols per reel, which can create up to 117,649 ways to win. They also feature cascading reels, which means that winning symbols are replaced by new ones, creating more chances to win. Some examples of Megaways slots are Bonanza, Extra Chilli, and Monopoly Megaways.

      -

      Features of slot free games

      -

      Wilds and scatters

      -

      Wilds and scatters are special symbols that can enhance your gameplay and winnings. Wilds can substitute for other symbols (except scatters) to help you form winning combinations. Scatters can trigger bonus features (such as free spins) or pay out regardless of their position on the reels.

      -

      Bonus rounds and free spins

      -

      Bonus rounds and free spins are extra features that can be activated by landing certain symbols or combinations. Bonus rounds are mini-games that can vary depending on the theme and style of the slot. They can involve picking items, spinning wheels, matching cards, etc. Free spins are spins that you don't have to pay for, but can still win prizes from. They often come with additional features such as multipliers, expanding wilds, sticky wilds, etc.

      -

      Multipliers and jackpots

      -

      Multipliers and jackpots are features that can boost your payouts significantly. Multipliers can increase your winnings by a certain factor (such as 2x, 5x, 10x, etc.). They can apply to your base game wins or your bonus game wins. Jackpots are fixed or progressive prizes that can be won by landing specific symbols or triggering certain features.

      -

      RTP and volatility

      -

      RTP and volatility are two important factors that affect your chances of winning and losing in slot games. RTP stands for return to player and it is the percentage of money that is paid back to players over time. The higher the RTP, the more money you can expect to get back in the long run. Volatility refers to how often and how much you can win or lose in a slot game. The higher the volatility, the bigger but rarer the wins. The lower the volatility, the smaller but more frequent the wins.

      -

      Tips and tricks for playing slot free games

      -

      Choose your game wisely

      -

      One of the most important tips for playing slot free games is to choose a game that suits your preferences and goals. You should consider the theme, the features, the RTP, the volatility, and the jackpot of the game. You should also read the rules and the paytable of the game to understand how it works and what you can win. You can also check the reviews and ratings of other players to see their feedback and opinions.

      -

      Set a budget and stick to it

      -

      Another important tip for playing slot free games is to set a budget and stick to it. Even though you are not playing with real money, you should still be responsible and disciplined with your virtual coins. You should decide how much you are willing to spend and lose, and never go beyond your limit. You should also keep track of your wins and losses, and quit while you are ahead or before you lose too much.

      -

      Practice before you play for real money

      -

      A third important tip for playing slot free games is to practice before you play for real money. Slot free games are a great way to learn the ropes and improve your skills without risking anything. You can try out different games, features, and strategies, and see what works best for you. You can also get familiar with the rules and the payouts of each game, and prepare yourself for the real deal. Once you feel confident and ready, you can switch to real money slots and play for real prizes.

      -

      Have fun and enjoy the experience

      -

      A fourth important tip for playing slot free games is to have fun and enjoy the experience. Slot free games are meant to be entertaining and relaxing, not stressful or frustrating. You should not take them too seriously or get too attached to the outcome. You should also not chase your losses or get greedy with your wins. You should just enjoy the thrill of spinning the reels and winning some virtual coins, and appreciate the variety and quality of slot games online.

      -

      Conclusion

      -

      Slot free games are online casino games that let you play slots for fun without wagering any money. They offer many benefits, such as fun, entertainment, variety, learning, and practice. They also come in many types, such as classic slots, video slots, progressive slots, and Megaways slots. They also have many features, such as wilds, scatters, bonus rounds, free spins, multipliers, jackpots, RTP, and volatility. To play slot free games online, you need to choose a reputable online casino site that offers them, and pick a game that suits your preferences and goals. You also need to set a budget and stick to it, practice before you play for real money, and have fun and enjoy the experience.

      -

      FAQs

      -

      What are the best slot free games online?

      -

      The best slot free games online are the ones that match your tastes and expectations. However, some of the most popular and recommended ones are Starburst, Gonzo's Quest, Book of Dead, Mega Moolah, Bonanza, etc.

      -

      Can I win real money from slot free games?

      -

      No, you cannot win real money from slot free games. They are only for fun and entertainment purposes. However, some online casinos may offer no deposit bonuses or free spins that allow you to play slot free games for real money.

      -

      Are slot free games fair and random?

      -

      Yes, slot free games are fair and random. They use a random number generator (RNG) that ensures that each spin is independent and unpredictable. They also have a fixed RTP that indicates how much they pay back over time.

      -

      How can I improve my chances of winning in slot free games?

      -

      You cannot influence the outcome of slot free games, as they are based on luck and chance. However, you can improve your chances of winning by choosing games with a high RTP and a low or medium volatility level, and by managing your budget and playing responsibly. You can also practice and learn the rules and features of each game before playing for real money.

      -

      Where can I play slot free games online?

      -

      You can play slot free games online at many reputable and trustworthy online casino sites. Some of the best ones are 888 Casino, LeoVegas, Casumo, Betway, etc. You can also play slot free games on your mobile devices, as most online casinos are compatible with iOS and Android platforms.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Get Unlimited Resources and Features with RAID Shadow Legends Private Servers Mod Apk.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Get Unlimited Resources and Features with RAID Shadow Legends Private Servers Mod Apk.md deleted file mode 100644 index ddbd4948c8266aab2523236c4585eadc80470ee3..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Get Unlimited Resources and Features with RAID Shadow Legends Private Servers Mod Apk.md +++ /dev/null @@ -1,110 +0,0 @@ - -

      Raid Shadow Legends Mod Apk Private Server: Everything You Need to Know

      -

      If you are a fan of fantasy RPG games, you might have heard of Raid Shadow Legends. This is one of the most popular and immersive games in the genre, with stunning graphics, epic battles, and hundreds of champions to collect and customize. However, if you want to experience the game to its fullest potential, you might want to try a mod apk private server. In this article, we will explain what a mod apk private server is, why you should use it for Raid Shadow Legends, and how to download and install it on your device.

      -

      raid shadow legends mod apk private server


      Download Filehttps://ssurll.com/2uNXcs



      -

      What is Raid Shadow Legends?

      -

      Raid Shadow Legends is a free-to-play mobile game that was released in 2018 by Plarium Games. It is a turn-based fantasy RPG game that lets you explore a vast world, fight against enemies, and collect champions from 16 different factions. You can also join clans, participate in tournaments, and challenge other players in PvP battles. The game has over 500 champions with unique skills and abilities, as well as different artifacts and gear that you can equip them with. The game also features a rich story mode, where you can uncover the secrets of Teleria and face the Dark Lord Siroth.

      -

      What is a mod apk?

      -

      A mod apk is a modified version of an original app that has been altered by third-party developers to add or change some features. A mod apk can offer various benefits, such as unlocking premium content, removing ads, bypassing restrictions, or enhancing performance. However, not all mod apks are safe or legal, so you need to be careful when downloading them from unknown sources. Some mod apks may contain malware or viruses that can harm your device or compromise your data. Some mod apks may also violate the terms of service of the original app, which can result in bans or penalties.

      -

      What is a private server?

      -

      A private server is a server that is not hosted by the official developers of an app or game, but by independent individuals or groups. A private server can offer different features or gameplay than the official server, such as custom maps, modes, events, or items. A private server can also have fewer players or more resources than the official server, which can make the game easier or more fun. However, not all private servers are stable or secure, so you need to be careful when joining them. Some private servers may have bugs or glitches that can affect your game experience. Some private servers may also be shut down or deleted without notice.

      -

      Why use a mod apk private server for Raid Shadow Legends?

      -

      If you love playing Raid Shadow Legends, but you want to have more freedom and fun in the game, you might want to try a mod apk private server. A mod apk private server can offer many advantages over the official version of the game, such as:

      -

      Unlimited resources and gems

      -

      One of the main challenges of playing Raid Shadow Legends is managing your resources and gems, which are essential for upgrading your champions, buying artifacts, and accessing various features. However, with a mod apk private server, you can get unlimited resources and gems for free, without spending any real money or waiting for hours. This way, you can enjoy the game without any limitations or frustrations.

      -

      Access to all champions and factions

      -

      Another challenge of playing Raid Shadow Legends is collecting and unlocking all the champions and factions in the game. There are over 500 champions in the game, each with their own rarity, affinity, role, and faction. Some of them are very hard to obtain or require special events or missions. However, with a mod apk private server, you can access all the champions and factions in the game, without any restrictions or requirements. You can choose any champion you want and experiment with different combinations and strategies.

      -

      raid shadow legends mod apk private server download
      -raid shadow legends mod apk private server ios
      -raid shadow legends mod apk private server 2021
      -raid shadow legends mod apk private server unlimited gems
      -raid shadow legends mod apk private server latest version
      -raid shadow legends mod apk private server offline
      -raid shadow legends mod apk private server android
      -raid shadow legends mod apk private server no root
      -raid shadow legends mod apk private server free
      -raid shadow legends mod apk private server hack
      -raid shadow legends mod apk private server online
      -raid shadow legends mod apk private server reddit
      -raid shadow legends mod apk private server discord
      -raid shadow legends mod apk private server gameplay
      -raid shadow legends mod apk private server review
      -raid shadow legends mod apk private server link
      -raid shadow legends mod apk private server update
      -raid shadow legends mod apk private server install
      -raid shadow legends mod apk private server features
      -raid shadow legends mod apk private server guide
      -raid shadow legends mod apk private server cheats
      -raid shadow legends mod apk private server codes
      -raid shadow legends mod apk private server tutorial
      -raid shadow legends mod apk private server tips
      -raid shadow legends mod apk private server tricks
      -raid shadow legends mod apk private server best champions
      -raid shadow legends mod apk private server tier list
      -raid shadow legends mod apk private server clans
      -raid shadow legends mod apk private server factions
      -raid shadow legends mod apk private server dungeons
      -raid shadow legends mod apk private server arena
      -raid shadow legends mod apk private server campaign
      -raid shadow legends mod apk private server fusion
      -raid shadow legends mod apk private server events
      -raid shadow legends mod apk private server quests
      -raid shadow legends mod apk private server rewards
      -raid shadow legends mod apk private server support
      -raid shadow legends mod apk private server bugs
      -raid shadow legends mod apk private server issues
      -raid shadow legends mod apk private server fixes
      -raid shadow legends mod apk private server comparison
      -raid shadow legends mod apk private server benefits
      -raid shadow legends mod apk private server drawbacks
      -raid shadow legends mod apk private server alternatives
      -raid shadow legends mod apk private server fun facts
      -raid shadow legends mod apk private server memes
      -raid shadow legends mod apk private server videos
      -raid shadow legends mod apk private server screenshots
      -raid shadow legends mod apk private server ratings

      -

      Customization and personalization options

      -

      One of the fun aspects of playing Raid Shadow Legends is customizing and personalizing your champions and your account. You can equip your champions with different artifacts and gear, change their appearance and name, and create your own avatar and banner. However, some of these options are limited or costly in the official version of the game. With a mod apk private server, you can have more customization and personalization options for free, such as changing the skin color, hair style, or outfit of your champions, or creating your own unique items and effects.

      -

      Faster progress and leveling up

      -

      One of the goals of playing Raid Shadow Legends is progressing and leveling up in the game. You can increase your level by completing quests, missions, challenges, and battles. You can also improve your champions by leveling them up, ascending them, ranking them up, and mastering them. However, this process can be very slow and tedious in the official version of the game. With a mod apk private server, you can speed up your progress and leveling up in the game, by getting more experience points, rewards, and bonuses. You can also skip some of the boring or repetitive tasks and focus on the fun parts of the game.

      -

      No ads or in-app purchases

      -

      One of the drawbacks of playing Raid Shadow Legends is dealing with ads or in-app purchases. The game is free to play, but it has some features that require real money or watching ads to access. For example, you need to buy gems or energy to use some functions or modes in the game. You also need to watch ads to get some extra rewards or benefits. However, with a mod apk private server, you can avoid ads or in-app purchases completely. You can enjoy the game without any interruptions or distractions.

      -

      How to download and install a mod apk private server for Raid Shadow Legends?

      -

      If you are interested in trying a mod apk private server for Raid Shadow Legends, you need to follow these steps:

      -

      Find a reliable source for the mod apk file

      -

      The first step is to find a reliable source for the mod apk file that you want to download. There are many websites that offer mod apk files for various games and apps, but not all of them are trustworthy or safe. Some of them may have fake or outdated files that do not work or cause problems. Some of them may also have malicious files that can infect your device or steal your data. Therefore, you need to do some research and check the reviews and ratings of the website before downloading anything from it. You can also use antivirus software or VPN services to protect yourself from potential threats.

      -

      Enable unknown sources on your device settings

      -

      The next step is to enable unknown sources on your device settings. This is necessary because most devices do not allow installing apps from sources other than the official app store by default. To enable unknown sources, you need to go to your device settings, then security or privacy settings, then find the option that says unknown sources or allow installation from unknown sources. You need to turn on this option and confirm it when prompted.

      -

      Download and install the mod apk file

      -

      The final step is to download and install the mod apk file on your device. To do this, you need to go to the website where you found the mod apk file and click on the download button. You may need to wait for a few seconds or minutes until the download is complete. Then, you need to go to your device storage or file manager and find the downloaded file. You need to tap on it and follow the instructions on the screen to install it on your device.

      -

      Launch the game and enjoy the private server features

      -

      The last step is to launch the game and enjoy the private server features. To do this, you need to go to your device home screen or app drawer and find the icon of Raid Shadow Legends. You need to tap on it and open the game. You may need to allow some permissions or accept some terms and conditions before you can start playing. Once you are in the game, you can enjoy the private server features that we mentioned earlier, such as unlimited resources and gems, access to all champions and factions, customization and personalization options, faster progress and leveling up, and no ads or in-app purchases. You can also join or create your own clan, chat with other players, and participate in various events and modes.

      -

      Conclusion

      -

      Raid Shadow Legends is a fantastic fantasy RPG game that offers a lot of fun and excitement for its players. However, if you want to have more freedom and fun in the game, you might want to try a mod apk private server. A mod apk private server can give you many benefits, such as unlimited resources and gems, access to all champions and factions, customization and personalization options, faster progress and leveling up, and no ads or in-app purchases. To use a mod apk private server for Raid Shadow Legends, you need to find a reliable source for the mod apk file, enable unknown sources on your device settings, download and install the mod apk file, and launch the game and enjoy the private server features. We hope this article has helped you understand what a mod apk private server is, why you should use it for Raid Shadow Legends, and how to download and install it on your device. If you have any questions or comments, please feel free to leave them below. Thank you for reading!

      -

      FAQs

      -

      Here are some frequently asked questions about Raid Shadow Legends mod apk private server:

      -

      Is using a mod apk private server for Raid Shadow Legends safe?

      -

      Using a mod apk private server for Raid Shadow Legends is not completely safe, as there are some risks involved. Some of the risks are:

      -
        -
      • You may download a fake or malicious mod apk file that can harm your device or compromise your data.
      • -
      • You may violate the terms of service of the original game, which can result in bans or penalties.
      • -
      • You may lose your progress or data if the private server is shut down or deleted.
      • -
      • You may encounter bugs or glitches that can affect your game experience.
      • -
      -

      To minimize these risks, you should always download the mod apk file from a reliable source, use antivirus software or VPN services to protect yourself from potential threats, backup your data regularly, and avoid using your main account or device for the mod apk private server.

      -

      Is using a mod apk private server for Raid Shadow Legends legal?

      -

      Using a mod apk private server for Raid Shadow Legends is not legal, as it violates the intellectual property rights of the original developers of the game. Modifying or distributing an app or game without the permission of the developers is considered piracy or hacking, which can have legal consequences. Therefore, we do not endorse or encourage using a mod apk private server for Raid Shadow Legends. We only provide this information for educational purposes only. Use it at your own risk and discretion.

      -

      Can I play with other players on a mod apk private server for Raid Shadow Legends?

      -

      Yes, you can play with other players on a mod apk private server for Raid Shadow Legends. However, you can only play with other players who are also using the same mod apk private server as you. You cannot play with players who are using the official version of the game or a different mod apk private server. This means that you may have fewer players to interact with or compete with on a mod apk private server than on the official version of the game.

      -

      Can I switch between the official version of the game and the mod apk private server?

      -

      Yes, you can switch between the official version of the game and the mod apk private server. However, you need to be careful when doing so, as there are some things to consider:

      -
        -
      • You need to uninstall the official version of the game before installing the mod apk file, or vice versa. You cannot have both versions installed on your device at the same time.
      • -
      • You need to backup your data before switching between versions, as they may not be compatible or transferable. You may lose your progress or data if you switch without backing up.
      • -
      • You need to use different accounts for each version of the game, as they may not be linked or synced. You may get banned or penalized if you use the same account for both versions.
      • -
      -

      To switch between versions, you need to follow the same steps as downloading and installing a mod apk file, but with the opposite version of the game.

      -

      Where can I find more information about Raid Shadow Legends?

      -

      If you want to find more information about Raid Shadow Legends, you can visit the official website of the game, where you can find the latest news, updates, guides, tips, and tricks. You can also join the official social media pages and forums of the game, where you can interact with other players, developers, and moderators. You can also watch videos and streams of the game on YouTube, Twitch, or other platforms, where you can learn from other players or influencers. You can also check out some blogs or reviews of the game, where you can get some insights or opinions from experts or critics.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/tokenizer/sentencepiece/shuffle_corpus.py b/spaces/skf15963/summary/fengshen/tokenizer/sentencepiece/shuffle_corpus.py deleted file mode 100644 index 9b3bdf1fc55f3bdd78ca5d540f80d5b612188b68..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/tokenizer/sentencepiece/shuffle_corpus.py +++ /dev/null @@ -1,18 +0,0 @@ -import sys -import os -from tqdm import tqdm -sys.path.append('../../') - -if __name__ == '__main__': - from data.fs_datasets import load_dataset - dataset = load_dataset('wudao_180g', num_proc=100) - print('dataset loaded', flush=True) - - shuffle_ds = dataset['train'].shuffle(seed=42, writer_batch_size=1000) - print('dataset shuffled', flush=True) - need_size = len(shuffle_ds) - - f = open('shuffle_corpus_{}.txt'.format(need_size), 'w', encoding='utf-8') - for i in tqdm(range(0, need_size)): - f.write(shuffle_ds[i]['text'] + os.linesep) - f.close() diff --git a/spaces/slush0/petals-playground/prompt.py b/spaces/slush0/petals-playground/prompt.py deleted file mode 100644 index ca3d0302ac58906688660286090d4af459f35230..0000000000000000000000000000000000000000 --- a/spaces/slush0/petals-playground/prompt.py +++ /dev/null @@ -1,279 +0,0 @@ -import time -from datetime import datetime -import gradio as gr - -import chat_client - -CHAT_URL = "wss://chat.petals.dev/api/v2/generate" -#CHAT_URL='ws://localhost:8000/api/v2/generate' - - -def generate(state, *args): - # Save that we're in generating loop - state["generate"] = True - - try: - yield from _generate(state, *args) - finally: - state["generate"] = False - - -def _generate( - state, - prompt, - model, - endseq, - max_length, - do_sample, - top_k, - top_p, - temperature, - add_stoptoken, - copy_output, -): - - start = time.time() - cnt = 0 - - def stats(): - # Produces inline stats for generation speed - # sec/t or t/sec depending on the speed - if cnt == 0: - return "\u2026 | ? sec/t" - if cnt > time.time() - start: - items_per_sec = cnt / (time.time() - start) - return f" | {items_per_sec:.1f} t/sec" - sec_per_item = (time.time() - start) / cnt - return f" | {sec_per_item:.1f} sec/t" - - try: - client = chat_client.ModelClient(CHAT_URL) - client.open_session(model, max_length) - except Exception as e: - print(datetime.now(), str(e)[-500:]) - raise gr.Error(str(e)[-500:]) - - if add_stoptoken: - prompt += "" if "bloomz" in model else "\n\n" - - # Translate checkbox items to actual sequences - seq = [] - for s in endseq: - if s == "\\n": - seq.append("\n") - elif s == "": - seq.append("") - elif s == "? (question mark)": - seq.append("?") - elif s == ". (dot)": - seq.append(".") - - # only top_k or top_p can be set - if top_k == 0: - top_k = None - if top_p == 0: - top_p = None - if top_p and top_k: - top_k = None - - if not temperature: - temperature = 1.0 - - prompt2 = prompt - output = "" - - # This render prompt dialog immediately and - # don't wait to generator to return first result - yield [state, prompt2, stats()] - - try: - for out in client.generate( - prompt, - max_new_tokens=1, - do_sample=do_sample, - temperature=temperature, - top_k=top_k, - top_p=top_p, - stop_sequences=seq, - ): - - if not state["generate"]: - client.close_session() - return - - cnt += 1 - output += out - - if copy_output: - prompt2 += out - - yield state, prompt2, output + stats() - - # Avoid throwing exception by generate() - # to prevent UI errors. - if cnt >= max_length - 6: # FIXME bulgarian constant - break - - # Prints final result w/o statistics - yield state, prompt2, output - except Exception as e: - print(datetime.now(), str(e)[-500:]) - raise gr.Error(str(e)[-500:]) - - -def stop(state): - """Stops generating.""" - state.update({"generate": False}) - return state - - -# --------------------------------------------------------- -# Defining Gradio layout -with gr.Blocks() as iface_prompt: - gr.Markdown( - """**Useful for testing raw prompts with zero, - one or few-shot prompting.**""" - ) - - with gr.Row(): - model = gr.Radio( - ["stabilityai/StableBeluga2", "meta-llama/Llama-2-70b-chat-hf", "bigscience/bloomz", "bigscience/bloom"], value="stabilityai/StableBeluga2", label="Use model" - ) - - # Additional ending sequence, at which generation shoud stop - endseq = gr.CheckboxGroup( - ["\\n", "", "? (question mark)", ". (dot)"], - value=[""], - label="Extra end sequences", - ) - - # Maximum length of inference session - max_length = gr.Radio( - [64, 128, 256, 512, 1024, 2048], - value=512, - interactive=True, - label="Max length", - ) - - with gr.Row(): - with gr.Column(): - # Switch between sampling and greedy generation - do_sample = gr.Checkbox(value=True, interactive=True, label="do_sample") - - # Should the app append stop sequence at the end of prompt - # or should it leave the prompt open? - add_stoptoken = gr.Checkbox( - value=True, - interactive=True, - label="Automatically add eos token to the prompt.", - ) - - # Only one of top_k and top_p can be set. Requires "do_sample=True" to work. - top_k = gr.Number(value=0, precision=0, interactive=True, label="top_k") - top_p = gr.Number(value=0.9, precision=2, interactive=True, label="top_p") - # TODO num_beams - - # Generation temperature - temperature = gr.Number( - value=0.75, precision=2, interactive=True, label="Temperature" - ) - - prompt = gr.Textbox(lines=3, label="Prompt", placeholder="Prompt Here...") - state = gr.State({"generate": False}) - - with gr.Row(): - button_generate = gr.Button("Generate") - button_stop = gr.Button("Stop") - - # Automatically copy the output at the end of prompt - copy_output = gr.Checkbox(label="Output -> Prompt") - - output = gr.Textbox(lines=3, label="Output") - - # Define button actions - button_generate.click( - generate, - inputs=[ - state, - prompt, - model, - endseq, - max_length, - do_sample, - top_k, - top_p, - temperature, - add_stoptoken, - copy_output, - ], - outputs=[state, prompt, output], - ) - button_stop.click(stop, inputs=[state], outputs=[state]) - - examples = gr.Examples( - inputs=[prompt, model, do_sample, top_k, top_p, temperature, add_stoptoken], - examples=[ - [ - "The SQL command to extract all the users whose name starts with A is: ", - "stabilityai/StableBeluga2", - False, - 0, - 0, - 1, - False, - ], - [ - "// Returns every other value in the list as a new list.\n" - "def every_other(l):\n", - "stabilityai/StableBeluga2", - False, - 0, - 0, - 1, - False, - ], - [ - "The Spanish translation of thank you for your help is: ", - "stabilityai/StableBeluga2", - False, - 0, - 0, - 1, - False, - ], - [ - "A human talks to a powerful AI that follows the Human's instructions.\n" - "AI is talkative, friendly, positive and provides detailed answers to any question.\n" - "Human: Hi!\n" - "AI: Hi! How can I help you?\n" - "Human: What's the capital of Portugal?\n" - "AI: ", - "stabilityai/StableBeluga2", - True, - 0, - 0.9, - 0.75, - False, - ], - [ - "Here is a very polite and formal e-mail writing to staff that they are fired because of late delivery of the project and higher costs:\n" - "Dear staff,\n" - "it is with utmost ", - "stabilityai/StableBeluga2", - True, - 0, - 0.9, - 0.75, - False, - ], - [ - "Lorem ipsum dolor sit amet, ", - "stabilityai/StableBeluga2", - True, - 0, - 0.9, - 0.75, - False, - ], - ], - ) diff --git a/spaces/spexight/no.2/README.md b/spaces/spexight/no.2/README.md deleted file mode 100644 index cf56c00ed9dd4e02d1f724afd54f1caec791a993..0000000000000000000000000000000000000000 --- a/spaces/spexight/no.2/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: No.2 -emoji: 📉 -colorFrom: blue -colorTo: yellow -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/evaluation/eval_f0.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/evaluation/eval_f0.py deleted file mode 100644 index df721d683113b44957149cfc3cddaba36520a22c..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/evaluation/eval_f0.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -""" -Signal processing-based evaluation using waveforms -""" -import numpy as np -import os.path as op - -import torchaudio -import tqdm -from tabulate import tabulate - -from examples.speech_synthesis.utils import ( - gross_pitch_error, voicing_decision_error, f0_frame_error -) -from examples.speech_synthesis.evaluation.eval_sp import load_eval_spec - - -def difference_function(x, n, tau_max): - """ - Compute difference function of data x. This solution is implemented directly - with Numpy fft. - - - :param x: audio data - :param n: length of data - :param tau_max: integration window size - :return: difference function - :rtype: list - """ - - x = np.array(x, np.float64) - w = x.size - tau_max = min(tau_max, w) - x_cumsum = np.concatenate((np.array([0.]), (x * x).cumsum())) - size = w + tau_max - p2 = (size // 32).bit_length() - nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32) - size_pad = min(x * 2 ** p2 for x in nice_numbers if x * 2 ** p2 >= size) - fc = np.fft.rfft(x, size_pad) - conv = np.fft.irfft(fc * fc.conjugate())[:tau_max] - return x_cumsum[w:w - tau_max:-1] + x_cumsum[w] - x_cumsum[:tau_max] - \ - 2 * conv - - -def cumulative_mean_normalized_difference_function(df, n): - """ - Compute cumulative mean normalized difference function (CMND). - - :param df: Difference function - :param n: length of data - :return: cumulative mean normalized difference function - :rtype: list - """ - - # scipy method - cmn_df = df[1:] * range(1, n) / np.cumsum(df[1:]).astype(float) - return np.insert(cmn_df, 0, 1) - - -def get_pitch(cmdf, tau_min, tau_max, harmo_th=0.1): - """ - Return fundamental period of a frame based on CMND function. - - :param cmdf: Cumulative Mean Normalized Difference function - :param tau_min: minimum period for speech - :param tau_max: maximum period for speech - :param harmo_th: harmonicity threshold to determine if it is necessary to - compute pitch frequency - :return: fundamental period if there is values under threshold, 0 otherwise - :rtype: float - """ - tau = tau_min - while tau < tau_max: - if cmdf[tau] < harmo_th: - while tau + 1 < tau_max and cmdf[tau + 1] < cmdf[tau]: - tau += 1 - return tau - tau += 1 - - return 0 # if unvoiced - - -def compute_yin(sig, sr, w_len=512, w_step=256, f0_min=100, f0_max=500, - harmo_thresh=0.1): - """ - - Compute the Yin Algorithm. Return fundamental frequency and harmonic rate. - - https://github.com/NVIDIA/mellotron adaption of - https://github.com/patriceguyot/Yin - - :param sig: Audio signal (list of float) - :param sr: sampling rate (int) - :param w_len: size of the analysis window (samples) - :param w_step: size of the lag between two consecutives windows (samples) - :param f0_min: Minimum fundamental frequency that can be detected (hertz) - :param f0_max: Maximum fundamental frequency that can be detected (hertz) - :param harmo_thresh: Threshold of detection. The yalgorithmù return the - first minimum of the CMND function below this threshold. - - :returns: - - * pitches: list of fundamental frequencies, - * harmonic_rates: list of harmonic rate values for each fundamental - frequency value (= confidence value) - * argmins: minimums of the Cumulative Mean Normalized DifferenceFunction - * times: list of time of each estimation - :rtype: tuple - """ - - tau_min = int(sr / f0_max) - tau_max = int(sr / f0_min) - - # time values for each analysis window - time_scale = range(0, len(sig) - w_len, w_step) - times = [t/float(sr) for t in time_scale] - frames = [sig[t:t + w_len] for t in time_scale] - - pitches = [0.0] * len(time_scale) - harmonic_rates = [0.0] * len(time_scale) - argmins = [0.0] * len(time_scale) - - for i, frame in enumerate(frames): - # Compute YIN - df = difference_function(frame, w_len, tau_max) - cm_df = cumulative_mean_normalized_difference_function(df, tau_max) - p = get_pitch(cm_df, tau_min, tau_max, harmo_thresh) - - # Get results - if np.argmin(cm_df) > tau_min: - argmins[i] = float(sr / np.argmin(cm_df)) - if p != 0: # A pitch was found - pitches[i] = float(sr / p) - harmonic_rates[i] = cm_df[p] - else: # No pitch, but we compute a value of the harmonic rate - harmonic_rates[i] = min(cm_df) - - return pitches, harmonic_rates, argmins, times - - -def extract_f0(samples): - f0_samples = [] - for sample in tqdm.tqdm(samples): - if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]): - f0_samples.append(None) - continue - - # assume single channel - yref, sr = torchaudio.load(sample["ref"]) - ysyn, _sr = torchaudio.load(sample["syn"]) - yref, ysyn = yref[0], ysyn[0] - assert sr == _sr, f"{sr} != {_sr}" - - yref_f0 = compute_yin(yref, sr) - ysyn_f0 = compute_yin(ysyn, sr) - - f0_samples += [ - { - "ref": yref_f0, - "syn": ysyn_f0 - } - ] - - return f0_samples - - -def eval_f0_error(samples, distortion_fn): - results = [] - for sample in tqdm.tqdm(samples): - if sample is None: - results.append(None) - continue - # assume single channel - yref_f, _, _, yref_t = sample["ref"] - ysyn_f, _, _, ysyn_t = sample["syn"] - - yref_f = np.array(yref_f) - yref_t = np.array(yref_t) - ysyn_f = np.array(ysyn_f) - ysyn_t = np.array(ysyn_t) - - distortion = distortion_fn(yref_t, yref_f, ysyn_t, ysyn_f) - results.append((distortion.item(), - len(yref_f), - len(ysyn_f) - )) - return results - - -def eval_gross_pitch_error(samples): - return eval_f0_error(samples, gross_pitch_error) - - -def eval_voicing_decision_error(samples): - return eval_f0_error(samples, voicing_decision_error) - - -def eval_f0_frame_error(samples): - return eval_f0_error(samples, f0_frame_error) - - -def print_results(results, show_bin): - results = np.array(list(filter(lambda x: x is not None, results))) - - np.set_printoptions(precision=3) - - def _print_result(results): - res = { - "nutt": len(results), - "error": results[:, 0].mean(), - "std": results[:, 0].std(), - "dur_ref": int(results[:, 1].sum()), - "dur_syn": int(results[:, 2].sum()), - } - print(tabulate([res.values()], res.keys(), floatfmt=".4f")) - - print(">>>> ALL") - _print_result(results) - - if show_bin: - edges = [0, 200, 400, 600, 800, 1000, 2000, 4000] - for i in range(1, len(edges)): - mask = np.logical_and(results[:, 1] >= edges[i-1], - results[:, 1] < edges[i]) - if not mask.any(): - continue - bin_results = results[mask] - print(f">>>> ({edges[i-1]}, {edges[i]})") - _print_result(bin_results) - - -def main(eval_f0, gpe, vde, ffe, show_bin): - samples = load_eval_spec(eval_f0) - if gpe or vde or ffe: - f0_samples = extract_f0(samples) - - if gpe: - print("===== Evaluate Gross Pitch Error =====") - results = eval_gross_pitch_error(f0_samples) - print_results(results, show_bin) - if vde: - print("===== Evaluate Voicing Decision Error =====") - results = eval_voicing_decision_error(f0_samples) - print_results(results, show_bin) - if ffe: - print("===== Evaluate F0 Frame Error =====") - results = eval_f0_frame_error(f0_samples) - print_results(results, show_bin) - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("eval_f0") - parser.add_argument("--gpe", action="store_true") - parser.add_argument("--vde", action="store_true") - parser.add_argument("--ffe", action="store_true") - parser.add_argument("--show-bin", action="store_true") - args = parser.parse_args() - - main(args.eval_f0, args.gpe, args.vde, args.ffe, args.show_bin) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/README.md b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/README.md deleted file mode 100644 index 314984fcbb6825169193b21bd6bb3fca5fd2503b..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# Self-Training with Kaldi HMM Models -This folder contains recipes for self-training on pseudo phone transcripts and -decoding into phones or words with [kaldi](https://github.com/kaldi-asr/kaldi). - -To start, download and install kaldi follow its instruction, and place this -folder in `path/to/kaldi/egs`. - -## Training -Assuming the following has been prepared: -- `w2v_dir`: contains features `{train,valid}.{npy,lengths}`, real transcripts `{train,valid}.${label}`, and dict `dict.${label}.txt` -- `lab_dir`: contains pseudo labels `{train,valid}.txt` -- `arpa_lm`: Arpa-format n-gram phone LM for decoding -- `arpa_lm_bin`: Arpa-format n-gram phone LM for unsupervised model selection to be used with KenLM - -Set these variables in `train.sh`, as well as `out_dir`, the output directory, -and then run it. - -The output will be: -``` -==== WER w.r.t. real transcript (select based on unsupervised metric) -INFO:root:./out/exp/mono/decode_valid/scoring/14.0.0.tra.txt: score 0.9178 wer 28.71% lm_ppl 24.4500 gt_wer 25.57% -INFO:root:./out/exp/tri1/decode_valid/scoring/17.1.0.tra.txt: score 0.9257 wer 26.99% lm_ppl 30.8494 gt_wer 21.90% -INFO:root:./out/exp/tri2b/decode_valid/scoring/8.0.0.tra.txt: score 0.7506 wer 23.15% lm_ppl 25.5944 gt_wer 15.78% -``` -where `wer` is the word eror rate with respect to the pseudo label, `gt_wer` to -the ground truth label, `lm_ppl` the language model perplexity of HMM prediced -transcripts, and `score` is the unsupervised metric for model selection. We -choose the model and the LM parameter of the one with the lowest score. In the -example above, it is `tri2b`, `8.0.0`. - - -## Decoding into Phones -In `decode_phone.sh`, set `out_dir` the same as used in `train.sh`, set -`dec_exp` and `dec_lmparam` to the selected model and LM parameter (e.g. -`tri2b` and `8.0.0` in the above example). `dec_script` needs to be set -according to `dec_exp`: for mono/tri1/tri2b, use `decode.sh`; for tri3b, use -`decode_fmllr.sh`. - -The output will be saved at `out_dir/dec_data` - - -## Decoding into Words -`decode_word_step1.sh` prepares WFSTs for word decoding. Besides the variables -mentioned above, set -- `wrd_arpa_lm`: Arpa-format n-gram word LM for decoding -- `wrd_arpa_lm_bin`: Arpa-format n-gram word LM for unsupervised model selection - -`decode_word_step1.sh` decodes the `train` and `valid` split into word and runs -unsupervised model selection using the `valid` split. The output is like: -``` -INFO:root:./out/exp/tri2b/decodeword_valid/scoring/17.0.0.tra.txt: score 1.8693 wer 24.97% lm_ppl 1785.5333 gt_wer 31.45% -``` - -After determining the LM parameter (`17.0.0` in the example above), set it in -`decode_word_step2.sh` and run it. The output will be saved at -`out_dir/dec_data_word`. diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/unfold.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/unfold.py deleted file mode 100644 index 138272f1ef4f673b29e36aed4531106f7ce95968..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/unfold.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.nn.functional as F - - -def unfold1d(x, kernel_size, padding_l, pad_value=0): - """unfold T x B x C to T x B x C x K""" - if kernel_size > 1: - T, B, C = x.size() - x = F.pad( - x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value - ) - x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C)) - else: - x = x.unsqueeze(3) - return x diff --git a/spaces/starlit7/KorPoliticsTTS/app.py b/spaces/starlit7/KorPoliticsTTS/app.py deleted file mode 100644 index 3ed0ce12d4530aa77ac900cf3d5cdc7a288d5275..0000000000000000000000000000000000000000 --- a/spaces/starlit7/KorPoliticsTTS/app.py +++ /dev/null @@ -1,172 +0,0 @@ -import json -import os -import re - -import librosa -import numpy as np -import torch -from torch import no_grad, LongTensor -import commons -import utils -import gradio as gr -from models import SynthesizerTrn -from text import text_to_sequence, _clean_text -from mel_processing import spectrogram_torch - -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - - -def get_text(text, hps, is_phoneme): - text_norm = text_to_sequence(text, hps.symbols, [] if is_phoneme else hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm - - -def create_tts_fn(model, hps, speaker_ids): - def tts_fn(text, speaker, speed, is_phoneme): - if limitation: - text_len = len(text) - max_len = 300 - if is_phoneme: - max_len *= 3 - else: - if len(hps.data.text_cleaners) > 0 and hps.data.text_cleaners[0] == "zh_ja_mixture_cleaners": - text_len = len(re.sub("(\[ZH\]|\[JA\])", "", text)) - if text_len > max_len: - return "Error: Text is too long", None - - speaker_id = speaker_ids[speaker] - stn_tst = get_text(text, hps, is_phoneme) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - sid = LongTensor([speaker_id]) - audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, - length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy() - del stn_tst, x_tst, x_tst_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return tts_fn - - - - - -def create_to_phoneme_fn(hps): - def to_phoneme_fn(text): - return _clean_text(text, hps.data.text_cleaners) if text != "" else "" - - return to_phoneme_fn - - -css = """ - #advanced-btn { - color: white; - border-color: black; - background: black; - font-size: .7rem !important; - line-height: 19px; - margin-top: 24px; - margin-bottom: 12px; - padding: 2px 8px; - border-radius: 14px !important; - } - #advanced-options { - display: none; - margin-bottom: 20px; - } -""" - -if __name__ == '__main__': - models_tts = [] - models_vc = [] - models_soft_vc = [] - name = 'KorPoliticsTTS' - lang = '한국어 (Korean)' - example = '존경하는 국민 여러분' - config_path = f"saved_model/config.json" - model_path = f"saved_model/model.pth" - cover_path = f"saved_model/cover.png" - hps = utils.get_hparams_from_file(config_path) - model = SynthesizerTrn( - len(hps.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model) - utils.load_checkpoint(model_path, model, None) - model.eval() - speaker_ids = [sid for sid, name in enumerate(hps.speakers) if name != "None"] - speakers = [name for sid, name in enumerate(hps.speakers) if name != "None"] - - t = 'vits' - models_tts.append((name, cover_path, speakers, lang, example, - hps.symbols, create_tts_fn(model, hps, speaker_ids), - create_to_phoneme_fn(hps))) - - - app = gr.Blocks(css=css) - - with app: - gr.Markdown("# KorPoliticsTTS Using VITS Model\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=ORI-Muchim.PoliticsTTS)\n\n" - "[KorPoliticsTTS 제작자 유튜브 주소]" - "(https://www.youtube.com/@litlit/featured)" - ) - with gr.Tabs(): - with gr.TabItem("TTS"): - with gr.Tabs(): - for i, (name, cover_path, speakers, lang, example, symbols, tts_fn, - to_phoneme_fn) in enumerate(models_tts): - with gr.TabItem(f"Politician"): - with gr.Column(): - gr.Markdown(f"## {name}\n\n" - f"![cover](file/{cover_path})\n\n" - f"lang: {lang}") - tts_input1 = gr.TextArea(label="Text (300 words limitation)", value=example, - elem_id=f"tts-input{i}") - tts_input2 = gr.Dropdown(label="Speaker", choices=speakers, - type="index", value=speakers[0]) - tts_input3 = gr.Slider(label="Speed", value=1, minimum=0.1, maximum=2, step=0.1) - with gr.Accordion(label="Advanced Options", open=False): - phoneme_input = gr.Checkbox(value=False, label="Phoneme input") - to_phoneme_btn = gr.Button("Covert text to phoneme") - phoneme_list = gr.Dataset(label="Phoneme list", components=[tts_input1], - samples=[[x] for x in symbols], - elem_id=f"phoneme-list{i}") - phoneme_list_json = gr.Json(value=symbols, visible=False) - tts_submit = gr.Button("Generate", variant="primary") - tts_output1 = gr.Textbox(label="Output Message") - tts_output2 = gr.Audio(label="Output Audio") - tts_submit.click(tts_fn, [tts_input1, tts_input2, tts_input3, phoneme_input], - [tts_output1, tts_output2]) - to_phoneme_btn.click(to_phoneme_fn, [tts_input1], [tts_input1]) - phoneme_list.click(None, [phoneme_list, phoneme_list_json], [], - _js=f""" - (i,phonemes) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#tts-input{i}").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + phonemes[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + phonemes[i].length; - text_input.selectionEnd = startPos + phonemes[i].length; - text_input.blur(); - window.scrollTo(x, y); - return []; - }}""") - - gr.Markdown( - "Reference \n\n" - "- [https://huggingface.co/spaces/skytnt/moe-tts](https://huggingface.co/spaces/skytnt/moe-tts)" - ) - - app.queue(concurrency_count=3).launch(show_api=False) diff --git a/spaces/stomexserde/gpt4-ui/Examples/A.Soldiers.Story.1984.DVDRip.x264.DD4.0 OP.md b/spaces/stomexserde/gpt4-ui/Examples/A.Soldiers.Story.1984.DVDRip.x264.DD4.0 OP.md deleted file mode 100644 index 0f3e3384ee15ca42a981e1015df5ea2b935b9050..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/A.Soldiers.Story.1984.DVDRip.x264.DD4.0 OP.md +++ /dev/null @@ -1,20 +0,0 @@ -
      -

      A Soldier's Story (1984) - A Classic Film About Racism and Murder

      -

      If you are looking for a gripping drama that explores the themes of racism, justice and loyalty in the US Army during World War II, you should watch A Soldier's Story (1984). This film is based on the Pulitzer Prize-winning play by Charles Fuller and directed by Norman Jewison. It stars Howard E. Rollins Jr. as Captain Davenport, a black officer who is sent to investigate the murder of a black sergeant in a segregated base in Louisiana.

      -

      A.Soldier's.Story.1984.DVDRip.x264.DD4.0 OP


      Downloadhttps://urlgoal.com/2uI9xE



      -

      The film is a powerful and realistic portrayal of the racial tensions and prejudices that existed in the military and society at that time. It also features an outstanding cast of actors, including Adolph Caesar as the victim Sergeant Waters, Denzel Washington as Private Peterson, one of the suspects, and Robert Townsend as Corporal Ellis, a witness. The film was nominated for three Academy Awards, including Best Picture, Best Supporting Actor (Caesar) and Best Adapted Screenplay (Fuller).

      -

      A Soldier's Story (1984) is available in high-quality DVD rip format with x264 video codec and DD4.0 audio codec. You can download it from the link below with the keyword "A.Soldier's.Story.1984.DVDRip.x264.DD4.0 OP". This is a rare and valuable copy of the film that you don't want to miss.

      -Download A Soldier's Story (1984) here - -

      A Soldier's Story (1984) is not only a compelling mystery, but also a profound exploration of the human condition. The film shows how the characters are shaped by their experiences, beliefs and identities, and how they cope with the challenges and conflicts they face. The film also raises important questions about the meaning of justice, the role of authority and the value of loyalty.

      -

      -

      The film has been praised by critics and audiences alike for its excellent direction, script, acting and cinematography. It is widely regarded as one of the best films of the 1980s and one of the finest examples of African American cinema. It has also been selected for preservation in the United States National Film Registry by the Library of Congress as being "culturally, historically or aesthetically significant".

      -

      If you want to watch a classic film that will make you think, feel and learn, you should download A Soldier's Story (1984) from the link below with the keyword "A.Soldier's.Story.1984.DVDRip.x264.DD4.0 OP". You will not regret it.

      -Download A Soldier's Story (1984) here - -

      A Soldier's Story (1984) is also a remarkable film for its historical and cultural significance. It was one of the first films to depict the experiences of black soldiers in World War II, and to address the issues of racism and discrimination within the US Army. It also showcased the talents of many African American actors who would later become famous, such as Denzel Washington, Robert Townsend and David Alan Grier.

      -

      The film was also a breakthrough for its director Norman Jewison, who had previously made films such as In the Heat of the Night (1967) and Fiddler on the Roof (1971). Jewison was able to adapt the play by Charles Fuller into a cinematic masterpiece that captured the essence of the story and the characters. He also used innovative techniques such as flashbacks, voice-overs and music to create a captivating and suspenseful atmosphere.

      -

      A Soldier's Story (1984) is a film that you should not miss. It is a film that will entertain you, educate you and inspire you. It is a film that will make you appreciate the sacrifices and struggles of those who fought for freedom and equality. It is a film that will make you proud to be an American.

      -Download A Soldier's Story (1984) here

      81aa517590
      -
      -
      \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Autodesk Autocad 2012 Serial Number Product Key.md b/spaces/stomexserde/gpt4-ui/Examples/Autodesk Autocad 2012 Serial Number Product Key.md deleted file mode 100644 index 3a0cc8a003529589233f475b4b734318c7ddbae4..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Autodesk Autocad 2012 Serial Number Product Key.md +++ /dev/null @@ -1,32 +0,0 @@ -
      -Sure, I can help you with that. Here is a possible title and article for your keyword: - -

      How to Find and Activate Autodesk Autocad 2012 Serial Number Product Key

      -

      If you have purchased Autodesk Autocad 2012, you will need a serial number and a product key to activate your software. A serial number is a unique code that identifies your license, while a product key is a code that specifies the product version and features that you are entitled to use. In this article, we will show you how to find and activate your Autodesk Autocad 2012 serial number product key.

      -

      How to Find Your Autodesk Autocad 2012 Serial Number Product Key

      -

      There are different ways to find your Autodesk Autocad 2012 serial number product key, depending on how you obtained your software.

      -

      Autodesk Autocad 2012 Serial Number Product Key


      DOWNLOAD === https://urlgoal.com/2uI9Sz



      -
        -
      • If you purchased your software from the Autodesk online store, you can find your serial number and product key in the order confirmation email that you received from Autodesk.
      • -
      • If you purchased your software from a reseller, you can find your serial number and product key on the product packaging or on the Certificate of Authenticity (COA) attached to your computer.
      • -
      • If you downloaded your software from the Autodesk Education Community, you can find your serial number and product key on the download page or in the email that you received from Autodesk.
      • -
      • If you are a subscription customer, you can find your serial number and product key in your Autodesk Account. To access your account, go to https://manage.autodesk.com and sign in with your email and password. Then, click on Products & Services and select your product. You will see your serial number and product key under License Details.
      • -
      -

      The format of the serial number and product key is as follows:

      -

      Serial Number: XXX-XXXXXXXX
      Product Key: XXXXX

      -

      For example, for Autodesk Autocad 2012, the serial number could be 666-69696969 and the product key could be 001D1[^4^] [^5^].

      -

      How to Activate Your Autodesk Autocad 2012 Serial Number Product Key

      -

      Once you have your serial number and product key, you can activate your software by following these steps:

      -

      -
        -
      1. Launch Autodesk Autocad 2012 on your computer.
      2. -
      3. On the Let's Get Started screen, click Enter a Serial Number.
      4. -
      5. Type or paste your serial number and product key in the corresponding fields. Make sure to enter them exactly as they appear.
      6. -
      7. Click Next.
      8. -
      9. Select one of the following activation methods: Online Activation or Offline Activation.
      10. -
      11. If you choose Online Activation, follow the instructions on the screen to complete the activation process. You will need an internet connection and an Autodesk ID to activate online.
      12. -
      13. If you choose Offline Activation, follow the instructions on the screen to generate a request code. Then, go to https://register.autodesk.com on another computer that has internet access and sign in with your Autodesk ID. Enter your request code and click Generate Activation Code. Copy or write down the activation code and return to your computer. Enter the activation code in the corresponding field and click Next.
      14. -
      -

      Congratulations! You have successfully activated your Autodesk Autocad 2012 serial number product key. You can now enjoy using your software with full features and functionality.

      7196e7f11a
      -
      -
      \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Chhota Bheem And The Throne Of Bali Tamil Movie REPACK Download 720p Hd.md b/spaces/stomexserde/gpt4-ui/Examples/Chhota Bheem And The Throne Of Bali Tamil Movie REPACK Download 720p Hd.md deleted file mode 100644 index a6ae24b14de3f9e6c7ec523fa39ba47320f90e0b..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Chhota Bheem And The Throne Of Bali Tamil Movie REPACK Download 720p Hd.md +++ /dev/null @@ -1,43 +0,0 @@ - -Here is a possible title and article with html formatting for the keyword "Chhota Bheem And The Throne Of Bali Tamil Movie Download 720p Hd": - -

      How to Download Chhota Bheem And The Throne Of Bali Tamil Movie in 720p HD Quality

      -

      Chhota Bheem And The Throne Of Bali is a 2013 animated adventure film based on the popular Indian cartoon series Chhota Bheem. The film follows the adventures of Chhota Bheem and his friends as they travel to the island of Bali to save the king and the princess from the evil Rangda.

      -

      Chhota Bheem And The Throne Of Bali Tamil Movie Download 720p Hd


      DOWNLOAD === https://urlgoal.com/2uIbKN



      -

      If you are a fan of Chhota Bheem and want to watch this movie in Tamil language with high-definition quality, you might be wondering how to download it online. Well, you are in luck because we have some tips for you to find and download Chhota Bheem And The Throne Of Bali Tamil movie in 720p HD quality.

      -

      Tip 1: Use a Reliable Torrent Site

      -

      One of the easiest ways to download Chhota Bheem And The Throne Of Bali Tamil movie in 720p HD quality is to use a reliable torrent site that has a good reputation and a large number of seeders. Torrent sites allow you to download files from other users who have already downloaded them. You will need a torrent client software such as BitTorrent or uTorrent to download the files.

      -

      Some of the popular torrent sites that might have Chhota Bheem And The Throne Of Bali Tamil movie in 720p HD quality are:

      -
        -
      • The Pirate Bay
      • -
      • 1337x
      • -
      • RARBG
      • -
      • LimeTorrents
      • -
      • Torrentz2
      • -
      -

      However, be careful when using torrent sites as they might contain malware or viruses that can harm your device. Also, make sure you use a VPN service to protect your privacy and avoid any legal issues.

      -

      Tip 2: Use a Streaming Site

      -

      Another way to watch Chhota Bheem And The Throne Of Bali Tamil movie in 720p HD quality is to use a streaming site that offers online video content. Streaming sites allow you to watch movies and shows without downloading them. You will need a good internet connection and a web browser to access the streaming sites.

      -

      -

      Some of the popular streaming sites that might have Chhota Bheem And The Throne Of Bali Tamil movie in 720p HD quality are:

      -
        -
      • Hotstar
      • -
      • Zee5
      • -
      • SonyLIV
      • -
      • Voot
      • -
      • MX Player
      • -
      -

      However, be aware that some streaming sites might require you to sign up or pay a subscription fee to access their content. Also, some streaming sites might have geo-restrictions or censorship that can limit your access to certain movies and shows.

      -

      Tip 3: Use a Download Manager

      -

      A third option to download Chhota Bheem And The Throne Of Bali Tamil movie in 720p HD quality is to use a download manager software that can help you download files from various sources. Download managers can speed up your downloads, resume interrupted downloads, and manage multiple downloads at once.

      -

      Some of the popular download managers that can help you download Chhota Bheem And The Throne Of Bali Tamil movie in 720p HD quality are:

      -
        -
      • Internet Download Manager
      • -
      • Free Download Manager
      • -
      • JDownloader
      • -
      • EagleGet
      • -
      • Xtreme Download Manager
      • -
      -

      However, make sure you use a trusted and secure download manager software that does not contain any malware or spyware. Also, check the source of the file before downloading it to avoid any fake or corrupted files.

      7196e7f11a
      -
      -
      \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Free Download Xforce Keygen Autocad 2012 64 Bit.md b/spaces/stomexserde/gpt4-ui/Examples/Free Download Xforce Keygen Autocad 2012 64 Bit.md deleted file mode 100644 index 247e9b3b42a41adb75513f442dfcd2aec7a6c679..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Free Download Xforce Keygen Autocad 2012 64 Bit.md +++ /dev/null @@ -1,26 +0,0 @@ - -

      How to Activate Autodesk Products with Xforce Keygen 2012

      -

      If you are looking for a way to activate Autodesk products such as AutoCAD 2012, you may have come across Xforce Keygen 2012, a tool that can generate activation codes for any Autodesk product. In this article, we will show you how to use Xforce Keygen 2012 to activate your Autodesk software in a few simple steps.

      -

      Free Download Xforce Keygen Autocad 2012 64 Bit


      Download File > https://urlgoal.com/2uI9vn



      -

      Xforce Keygen 2012 is a crack tool that can bypass the activation process of Autodesk products and make them fully functional without requiring a license key. This tool works for both 32-bit and 64-bit versions of Windows and supports all Autodesk products from 2009 to 2012. However, using Xforce Keygen 2012 may violate the terms and conditions of Autodesk and may expose your computer to security risks. Therefore, we do not recommend using this tool and advise you to purchase a legitimate license from Autodesk instead.

      -

      If you still want to use Xforce Keygen 2012 at your own risk, here are the steps you need to follow:

      -
        -
      1. Download Xforce Keygen 2012 from one of the links provided in the search results[^1^] [^2^] [^3^] [^4^]. Make sure you download the correct version for your system (32-bit or 64-bit).
      2. -
      3. Disable your internet connection and antivirus software before running the tool.
      4. -
      5. Install the Autodesk product you want to activate on your computer. You can use any serial number and product key from the list provided in the search results[^1^].
      6. -
      7. After the installation is complete, launch the Autodesk product and click on "Activate". If you get an error message saying that your serial number is wrong, just click on "Close" and try again.
      8. -
      9. Select "I have an activation code from Autodesk" and copy the request code from the activation screen.
      10. -
      11. Run Xforce Keygen 2012 as administrator and click on "Patch". You should see a message saying "Successfully patched".
      12. -
      13. Paste the request code into the keygen and click on "Generate". You should get an activation code.
      14. -
      15. Copy the activation code and paste it into the activation screen. Click on "Next". You should see a message saying that your product has been activated.
      16. -
      17. Enjoy your activated Autodesk product!
      18. -
      -

      Note: You may need to restart your computer after the activation process. Also, do not update your Autodesk product or connect to the internet while using it.

      - -

      Xforce Keygen 2012 is one of the most popular tools for activating Autodesk products. It can save you a lot of money and time by allowing you to use the full features of the software without paying for a license. However, there are also some drawbacks and risks associated with using this tool.

      -

      -

      One of the main disadvantages of using Xforce Keygen 2012 is that it may not work for all Autodesk products or versions. Some products may require a different activation method or a newer version of the keygen. Also, some products may have updates or patches that can detect and disable the activation code generated by the keygen. Therefore, you may not be able to use the latest features or fixes of the software.

      -

      Another drawback of using Xforce Keygen 2012 is that it may harm your computer or compromise your security. The tool may contain viruses, malware, or spyware that can infect your system and damage your files or data. The tool may also expose your personal information or activity to hackers or third parties who can use it for malicious purposes. Therefore, you may face legal or ethical issues by using this tool.

      -

      The best way to avoid these problems and enjoy the benefits of Autodesk products is to purchase a genuine license from Autodesk or an authorized reseller. By doing so, you will support the developers and creators of the software and ensure that you get the best quality and performance. You will also get access to technical support, updates, and online resources that can help you learn and improve your skills.

      81aa517590
      -
      -
      \ No newline at end of file diff --git a/spaces/suchun/chatGPT_acdemic/crazy_functions/test_project/cpp/libJPG/jpge.h b/spaces/suchun/chatGPT_acdemic/crazy_functions/test_project/cpp/libJPG/jpge.h deleted file mode 100644 index a46c805ab80aab491f7f9508b3a008b149866bee..0000000000000000000000000000000000000000 --- a/spaces/suchun/chatGPT_acdemic/crazy_functions/test_project/cpp/libJPG/jpge.h +++ /dev/null @@ -1,172 +0,0 @@ - -// jpge.h - C++ class for JPEG compression. -// Public domain, Rich Geldreich -// Alex Evans: Added RGBA support, linear memory allocator. -#ifndef JPEG_ENCODER_H -#define JPEG_ENCODER_H - -#include - -namespace jpge -{ - typedef unsigned char uint8; - typedef signed short int16; - typedef signed int int32; - typedef unsigned short uint16; - typedef unsigned int uint32; - typedef unsigned int uint; - - // JPEG chroma subsampling factors. Y_ONLY (grayscale images) and H2V2 (color images) are the most common. - enum subsampling_t { Y_ONLY = 0, H1V1 = 1, H2V1 = 2, H2V2 = 3 }; - - // JPEG compression parameters structure. - struct params - { - inline params() : m_quality(85), m_subsampling(H2V2), m_no_chroma_discrim_flag(false), m_two_pass_flag(false) { } - - inline bool check_valid() const - { - if ((m_quality < 1) || (m_quality > 100)) return false; - if ((uint)m_subsampling > (uint)H2V2) return false; - return true; - } - - // Quality: 1-100, higher is better. Typical values are around 50-95. - int m_quality; - - // m_subsampling: - // 0 = Y (grayscale) only - // 1 = YCbCr, no subsampling (H1V1, YCbCr 1x1x1, 3 blocks per MCU) - // 2 = YCbCr, H2V1 subsampling (YCbCr 2x1x1, 4 blocks per MCU) - // 3 = YCbCr, H2V2 subsampling (YCbCr 4x1x1, 6 blocks per MCU-- very common) - subsampling_t m_subsampling; - - // Disables CbCr discrimination - only intended for testing. - // If true, the Y quantization table is also used for the CbCr channels. - bool m_no_chroma_discrim_flag; - - bool m_two_pass_flag; - }; - - // Writes JPEG image to a file. - // num_channels must be 1 (Y) or 3 (RGB), image pitch must be width*num_channels. - bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params()); - - // Writes JPEG image to memory buffer. - // On entry, buf_size is the size of the output buffer pointed at by pBuf, which should be at least ~1024 bytes. - // If return value is true, buf_size will be set to the size of the compressed data. - bool compress_image_to_jpeg_file_in_memory(void *pBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params()); - - // Output stream abstract class - used by the jpeg_encoder class to write to the output stream. - // put_buf() is generally called with len==JPGE_OUT_BUF_SIZE bytes, but for headers it'll be called with smaller amounts. - class output_stream - { - public: - virtual ~output_stream() { }; - virtual bool put_buf(const void* Pbuf, int64_t len) = 0; - template inline bool put_obj(const T& obj) { return put_buf(&obj, sizeof(T)); } - }; - - // Lower level jpeg_encoder class - useful if more control is needed than the above helper functions. - class jpeg_encoder - { - public: - jpeg_encoder(); - ~jpeg_encoder(); - - // Initializes the compressor. - // pStream: The stream object to use for writing compressed data. - // params - Compression parameters structure, defined above. - // width, height - Image dimensions. - // channels - May be 1, or 3. 1 indicates grayscale, 3 indicates RGB source data. - // Returns false on out of memory or if a stream write fails. - bool init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params = params()); - - const params &get_params() const { return m_params; } - - // Deinitializes the compressor, freeing any allocated memory. May be called at any time. - void deinit(); - - uint get_total_passes() const { return m_params.m_two_pass_flag ? 2 : 1; } - inline uint get_cur_pass() { return m_pass_num; } - - // Call this method with each source scanline. - // width * src_channels bytes per scanline is expected (RGB or Y format). - // You must call with NULL after all scanlines are processed to finish compression. - // Returns false on out of memory or if a stream write fails. - bool process_scanline(const void* pScanline); - - private: - jpeg_encoder(const jpeg_encoder &); - jpeg_encoder &operator =(const jpeg_encoder &); - - typedef int32 sample_array_t; - - output_stream *m_pStream; - params m_params; - uint8 m_num_components; - uint8 m_comp_h_samp[3], m_comp_v_samp[3]; - int m_image_x, m_image_y, m_image_bpp, m_image_bpl; - int m_image_x_mcu, m_image_y_mcu; - int m_image_bpl_xlt, m_image_bpl_mcu; - int m_mcus_per_row; - int m_mcu_x, m_mcu_y; - uint8 *m_mcu_lines[16]; - uint8 m_mcu_y_ofs; - sample_array_t m_sample_array[64]; - int16 m_coefficient_array[64]; - int32 m_quantization_tables[2][64]; - uint m_huff_codes[4][256]; - uint8 m_huff_code_sizes[4][256]; - uint8 m_huff_bits[4][17]; - uint8 m_huff_val[4][256]; - uint32 m_huff_count[4][256]; - int m_last_dc_val[3]; - enum { JPGE_OUT_BUF_SIZE = 2048 }; - uint8 m_out_buf[JPGE_OUT_BUF_SIZE]; - uint8 *m_pOut_buf; - uint m_out_buf_left; - uint32 m_bit_buffer; - uint m_bits_in; - uint8 m_pass_num; - bool m_all_stream_writes_succeeded; - - void optimize_huffman_table(int table_num, int table_len); - void emit_byte(uint8 i); - void emit_word(uint i); - void emit_marker(int marker); - void emit_jfif_app0(); - void emit_dqt(); - void emit_sof(); - void emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag); - void emit_dhts(); - void emit_sos(); - void emit_markers(); - void compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val); - void compute_quant_table(int32 *dst, int16 *src); - void adjust_quant_table(int32 *dst, int32 *src); - void first_pass_init(); - bool second_pass_init(); - bool jpg_open(int p_x_res, int p_y_res, int src_channels); - void load_block_8_8_grey(int x); - void load_block_8_8(int x, int y, int c); - void load_block_16_8(int x, int c); - void load_block_16_8_8(int x, int c); - void load_quantized_coefficients(int component_num); - void flush_output_buffer(); - void put_bits(uint bits, uint len); - void code_coefficients_pass_one(int component_num); - void code_coefficients_pass_two(int component_num); - void code_block(int component_num); - void process_mcu_row(); - bool terminate_pass_one(); - bool terminate_pass_two(); - bool process_end_of_image(); - void load_mcu(const void* src); - void clear(); - void init(); - }; - -} // namespace jpge - -#endif // JPEG_ENCODER \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Cadence Orcad Allegro 16.6 Hotfix 16 Free Download.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Cadence Orcad Allegro 16.6 Hotfix 16 Free Download.md deleted file mode 100644 index 309815d562febf0bfa63e9ee73276f8550ba9b2c..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Cadence Orcad Allegro 16.6 Hotfix 16 Free Download.md +++ /dev/null @@ -1,18 +0,0 @@ -
      -

      Manajemen Proyek dari Konseptual sampai Operasional: Buku Teks Karya Iman Soeharto

      -

      Manajemen proyek adalah salah satu bidang ilmu yang penting dan relevan untuk diterapkan dalam berbagai jenis proyek, baik di sektor publik maupun swasta. Manajemen proyek membantu para pelaku proyek untuk merencanakan, mengorganisasi, mengendalikan, dan mengevaluasi proses-proses yang terlibat dalam suatu proyek, mulai dari tahap konseptual sampai operasional.

      -

      Salah satu buku teks yang direkomendasikan untuk mempelajari manajemen proyek adalah Manajemen Proyek dari Konseptual sampai Operasional, karya Iman Soeharto. Buku ini terdiri dari dua jilid, yaitu jilid 1 yang membahas tentang konsep, studi kelayakan, dan jaringan kerja proyek, dan jilid 2 yang membahas tentang menyiapkan perangkat, peserta, dan implementasi proyek.

      -

      Cadence Orcad Allegro 16.6 hotfix 16 free download


      DOWNLOAD ✸✸✸ https://cinurl.com/2uEXYz



      -

      Buku ini ditulis oleh Iman Soeharto, seorang praktisi dan akademisi yang berpengalaman dalam bidang manajemen proyek. Beliau adalah lulusan Teknik Sipil ITB dan Magister Manajemen UI. Beliau juga pernah menjabat sebagai Direktur Utama PT. Rekayasa Industri dan sebagai Ketua Umum Ikatan Ahli Manajemen Proyek Indonesia (IAMPI).

      -

      Buku ini ditujukan untuk mahasiswa, dosen, peneliti, konsultan, manajer, dan praktisi proyek yang ingin memperdalam pengetahuan dan keterampilan mereka dalam manajemen proyek. Buku ini menyajikan materi-materi yang lengkap, sistematis, mudah dipahami, dan disertai dengan contoh-contoh kasus nyata dari berbagai jenis proyek di Indonesia.

      -

      Buku ini juga dilengkapi dengan latihan soal, studi kasus, tabel-tabel, diagram-diagram, gambar-gambar, dan referensi-referensi yang relevan. Buku ini merupakan salah satu sumber acuan yang terpercaya dan berkualitas untuk mempelajari manajemen proyek dari konseptual sampai operasional.

      -

      Jika Anda tertarik untuk membeli buku ini, Anda bisa mengunjungi situs web penerbit Erlangga[^1^] [^2^] atau toko buku online Goodreads[^3^]. Anda juga bisa membaca ulasan-ulasan dari pembaca lain yang telah membaca buku ini di situs web Goodreads[^3^]. Selamat membaca!

      - -

      Dalam buku Manajemen Proyek dari Konseptual sampai Operasional, Iman Soeharto menjelaskan tentang berbagai aspek yang terkait dengan manajemen proyek, seperti definisi dan karakteristik proyek, siklus hidup proyek, metodologi manajemen proyek, peran dan tanggung jawab manajer proyek, analisis lingkungan proyek, identifikasi dan formulasi masalah proyek, penyusunan tujuan dan sasaran proyek, penentuan ruang lingkup dan batasan proyek, penyusunan anggaran dan jadwal proyek, penggunaan teknik-teknik analisis jaringan kerja proyek, pengelolaan sumber daya manusia, material, peralatan, dan informasi proyek, pengendalian kualitas, biaya, waktu, dan risiko proyek, serta evaluasi kinerja dan penutupan proyek.

      - -

      Buku ini juga memberikan beberapa tips dan trik yang berguna untuk meningkatkan efektivitas dan efisiensi manajemen proyek, seperti cara membuat proposal proyek yang menarik, cara mengkomunikasikan rencana proyek kepada stakeholder, cara mengatasi konflik dan negosiasi dalam proyek, cara menggunakan perangkat lunak manajemen proyek, dan cara mengembangkan kompetensi dan karier sebagai manajer proyek.

      -

      Selain itu, buku ini juga menyertakan beberapa studi kasus yang menarik dan aktual dari berbagai jenis proyek di Indonesia, seperti proyek pembangunan jalan tol, proyek pengembangan sistem informasi, proyek penelitian dan pengembangan, proyek konservasi lingkungan, proyek sosial dan budaya, dan proyek kewirausahaan. Studi kasus ini memberikan gambaran nyata tentang bagaimana manajemen proyek diterapkan dalam berbagai situasi dan kondisi.

      -

      -

      Dengan membaca buku ini, Anda akan mendapatkan pengetahuan dan keterampilan yang komprehensif dan aplikatif tentang manajemen proyek dari konseptual sampai operasional. Anda juga akan mendapatkan inspirasi dan motivasi untuk menjadi manajer proyek yang profesional dan berkualitas. Buku ini sangat cocok untuk Anda yang ingin belajar atau mengajar manajemen proyek secara teoritis maupun praktis.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download Meri Pyaari Bindu Torrent.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download Meri Pyaari Bindu Torrent.md deleted file mode 100644 index 205039f93a195c0b7b5408bf8f9683522691d97f..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download Meri Pyaari Bindu Torrent.md +++ /dev/null @@ -1,95 +0,0 @@ - -

      Download Meri Pyaari Bindu Torrent - A Bollywood Movie with a Twist

      - -

      If you are looking for a romantic comedy with a twist, you might want to download Meri Pyaari Bindu torrent and watch it at your convenience. Meri Pyaari Bindu is a 2017 Bollywood movie starring Parineeti Chopra and Ayushmann Khurrana as Bindu and Abhimanyu, two childhood friends who share a love for music and each other.

      -

      download Meri Pyaari Bindu torrent


      Download ::: https://cinurl.com/2uEYRK



      - -

      The movie follows Abhimanyu, a successful writer who is suffering from writer's block and decides to write a love story based on his relationship with Bindu, his unpredictable, crazy, restless, larger than life, live wire neighbor who he has been in love with since he was a kid. As he listens to an old audio cassette of their favorite playlist, he reminisces about their past and present through the songs in the mixed tape, and tries to capture their essence in his book.

      - -

      However, life has other plans for them and their love story takes unexpected turns that change the ending of his book and his life. He realizes that love is neither time nor place-dependent, and all he needs is the right person next to him and of course the right soundtrack.

      - -

      Why Download Meri Pyaari Bindu Torrent?

      - -

      There are many reasons why you might want to download Meri Pyaari Bindu torrent and watch it online or offline. Here are some of them:

      - -
        -
      • The movie is a refreshing take on the genre of romantic comedy, with a mix of nostalgia, humor, drama, and music.
      • -
      • The movie has a stellar cast of Parineeti Chopra and Ayushmann Khurrana, who have great chemistry and deliver convincing performances as Bindu and Abhimanyu.
      • -
      • The movie has a catchy soundtrack that features old and new songs from various genres, such as Hindi film music, pop, rock, jazz, and classical.
      • -
      • The movie has a beautiful cinematography that captures the essence of Kolkata, the city where most of the story takes place.
      • -
      • The movie has a universal appeal that can resonate with anyone who has ever been in love or experienced heartbreak.
      • -
      - -

      How to Download Meri Pyaari Bindu Torrent Safely?

      - -

      If you are convinced that you want to download Meri Pyaari Bindu torrent and watch this amazing movie, you need to be careful about how you do it. Downloading torrents can be risky if you don't use the right tools and precautions. Here are some tips on how to download Meri Pyaari Bindu torrent safely:

      - -
        -
      1. Use a reliable torrent site that has good reviews and ratings from other users. Avoid sites that have low-quality torrents, malware, or pop-up ads.
      2. -
      3. Use a VPN service that can hide your IP address and encrypt your traffic when downloading torrents. This way, you can avoid being tracked by your ISP or legal authorities who might penalize you for torrenting.
      4. -
      5. Use an antivirus software that can scan your downloaded files for any viruses or malware that might harm your device or compromise your security.
      6. -
      7. Use a media player that can play the downloaded file without any issues or errors. Make sure the file format is compatible with your device and player.
      8. -
      9. Enjoy watching Meri Pyaari Bindu with your friends or family!
      10. -
      - -

      Downloading Meri Pyaari Bindu torrent can be a great way to enjoy this wonderful movie at your own pace and convenience. However, you need to be careful about how you do it and use the right tools and precautions to avoid any risks or troubles. Follow these tips and you will be able to download Meri Pyaari Bindu torrent safely and easily.

      -

      What are the Benefits of Watching Meri Pyaari Bindu?

      - -

      Watching Meri Pyaari Bindu can be a rewarding experience for many reasons. Here are some of the benefits of watching this movie:

      - -
        -
      • You can enjoy a heartwarming and hilarious story that will make you laugh, cry, and dance along with the characters.
      • -
      • You can relate to the realistic and relatable portrayal of love, friendship, and life in general.
      • -
      • You can appreciate the artistic and creative aspects of the movie, such as the direction, screenplay, dialogues, music, and editing.
      • -
      • You can learn more about the culture and history of Kolkata, one of the most vibrant and diverse cities in India.
      • -
      • You can get inspired by the message of the movie, which is to follow your dreams and passions, and to never give up on love.
      • -
      - -
      Where to Download Meri Pyaari Bindu Torrent?
      - -

      If you are ready to download Meri Pyaari Bindu torrent and watch this amazing movie, you might be wondering where to find it. There are many torrent sites that offer Meri Pyaari Bindu torrent, but not all of them are reliable and safe. Here are some of the best torrent sites that you can use to download Meri Pyaari Bindu torrent:

      - -
        -
      1. YTS - This is one of the most popular and trusted torrent sites that offers high-quality movies in small file sizes. You can download Meri Pyaari Bindu torrent in 720p or 1080p resolution with subtitles.
      2. -
      3. 1337x - This is another well-known and reputable torrent site that has a huge collection of movies, TV shows, games, music, and more. You can download Meri Pyaari Bindu torrent in various formats and qualities.
      4. -
      5. RARBG - This is a veteran torrent site that has been around for a long time and has a loyal fan base. You can download Meri Pyaari Bindu torrent in HD quality with good audio and video.
      6. -
      7. LimeTorrents - This is a user-friendly and easy-to-use torrent site that has a clean and simple interface. You can download Meri Pyaari Bindu torrent in fast speed and without any hassle.
      8. -
      9. Torrentz2 - This is a meta-search engine that aggregates results from various torrent sites. You can download Meri Pyaari Bindu torrent from multiple sources and choose the best one for you.
      10. -
      - -

      Downloading Meri Pyaari Bindu torrent can be a great way to enjoy this wonderful movie at your own pace and convenience. However, you need to be careful about how you do it and use the right tools and precautions to avoid any risks or troubles. Follow these tips and you will be able to download Meri Pyaari Bindu torrent safely and easily.

      -

      -
      What are the Reviews of Meri Pyaari Bindu?
      - -

      Meri Pyaari Bindu has received mixed reviews from critics and audiences alike. Some have praised the movie for its fresh and quirky take on the romantic comedy genre, while others have criticized it for its uneven pace and lack of depth. Here are some of the reviews of Meri Pyaari Bindu:

      - -
      -

      "Meri Pyaari Bindu is a delightful film that will make you smile, laugh and cry. Parineeti Chopra and Ayushmann Khurrana are a joy to watch as they bring alive their characters with charm and sincerity. The movie is a tribute to the power of music and love, and will leave you with a warm feeling in your heart." - Times of India

      -
      - -
      -

      "Meri Pyaari Bindu is a disappointing film that fails to live up to its potential. Parineeti Chopra and Ayushmann Khurrana are wasted in a poorly written and executed script that tries to be too many things at once. The movie is a mishmash of cliches and stereotypes, and will leave you with a bitter taste in your mouth." - Hindustan Times

      -
      - -
      -

      "Meri Pyaari Bindu is a decent film that has its moments of brilliance and dullness. Parineeti Chopra and Ayushmann Khurrana are good in their roles, but they are let down by a weak and inconsistent plot that lacks focus and direction. The movie is a nostalgic trip down memory lane, but it could have been much more." - Indian Express

      -
      - -

      Meri Pyaari Bindu is a movie that you can either love or hate, depending on your taste and expectations. However, if you are looking for a fun and entertaining movie that will make you feel good, you can download Meri Pyaari Bindu torrent and give it a try.

      -What are the Alternatives to Downloading Meri Pyaari Bindu Torrent? - -

      If you are not comfortable with downloading Meri Pyaari Bindu torrent, or if you cannot find a good torrent site that offers it, you might be wondering what are the alternatives to watching this movie. Here are some of the alternatives to downloading Meri Pyaari Bindu torrent:

      - -
        -
      • Streaming - You can stream Meri Pyaari Bindu online on various platforms, such as Netflix, Amazon Prime Video, Hotstar, Zee5, and more. However, you might need to pay a subscription fee or rent the movie to watch it.
      • -
      • Downloading - You can download Meri Pyaari Bindu legally from various sources, such as iTunes, Google Play, YouTube, and more. However, you might need to pay a fee or buy the movie to download it.
      • -
      • Buying - You can buy Meri Pyaari Bindu on DVD or Blu-ray from various stores, such as Amazon, Flipkart, eBay, and more. However, you might need to wait for the delivery or pay a shipping cost.
      • -
      - -

      Downloading Meri Pyaari Bindu torrent can be a convenient and cost-effective way to watch this movie, but it is not the only way. You can choose from these alternatives depending on your preference and availability.

      - -Conclusion - -

      Meri Pyaari Bindu is a movie that will make you fall in love with its characters, music, and story. It is a movie that will take you on a nostalgic and emotional journey that will touch your heart and soul. If you want to watch this movie at your own pace and convenience, you can download Meri Pyaari Bindu torrent from any of the torrent sites mentioned above. However, you need to be careful about how you do it and use the right tools and precautions to avoid any risks or troubles. Follow these tips and you will be able to download Meri Pyaari Bindu torrent safely and easily.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Opel Navi Cd70 Hun.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Opel Navi Cd70 Hun.md deleted file mode 100644 index 528bc8d687b198f172f486fb0f11ee76178137b5..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Opel Navi Cd70 Hun.md +++ /dev/null @@ -1,136 +0,0 @@ -
      -

      Opel Navi CD70 Hun: The Best Navigation System for Your Opel Car in Hungary

      - -

      If you own an Opel car and you live in Hungary, you might be interested in upgrading your navigation system with the latest map update. The Opel Navi CD70 Hun is a navigation system that is designed specifically for Opel cars and Hungary roads. It offers many features and benefits that will enhance your driving experience and safety.

      - -

      In this article, we will tell you everything you need to know about the Opel Navi CD70 Hun, such as:

      -

      opel navi cd70 hun


      Downloadhttps://cinurl.com/2uEXE9



      - -
        -
      • What is the Opel Navi CD70 Hun and what are its features?
      • -
      • Why do you need to update your Opel Navi CD70 Hun map?
      • -
      • How to order and install the Opel Navi CD70 Hun map update?
      • -
      • What are the advantages and disadvantages of using the Opel Navi CD70 Hun?
      • -
      - -

      By the end of this article, you will have a clear idea of whether the Opel Navi CD70 Hun is the right navigation system for your Opel car and how to get the most out of it.

      - -

      What is the Opel Navi CD70 Hun and what are its features?

      - -

      The Opel Navi CD70 Hun is a navigation system that is compatible with most Opel cars, such as Adam, Ampera, Antara, Astra, Cascada, Combo, Corsa, Crossland X, Frontera, Grandland X, Insignia, Karl, Meriva, Mokka X, Omega, Signum, Tigra Twintop, Vectra, Vivaro and Zafira.

      - -

      The Opel Navi CD70 Hun uses a CD-ROM to store the map data of Hungary and other European countries. The map data is provided by HERE, a leading provider of digital maps and location services. The map data covers millions of square kilometers and many points of interest in Hungary and Europe.

      - -

      The Opel Navi CD70 Hun has many features that make it a reliable and user-friendly navigation system, such as:

      - -
        -
      • A color display that shows the map and the route guidance in 2D or 3D mode.
      • -
      • A voice guidance that gives you clear and accurate instructions in Hungarian or other languages.
      • -
      • A dynamic route calculation that considers the current traffic situation and suggests the best route to your destination.
      • -
      • A speed limit warning that alerts you when you exceed the legal speed limit on a road.
      • -
      • A TMC (Traffic Message Channel) function that receives real-time traffic information from radio stations and displays it on the map.
      • -
      • A POI (Points of Interest) function that allows you to search for nearby attractions, such as restaurants, hotels, gas stations, parking lots, etc.
      • -
      • A destination memory that stores up to 50 destinations that you can access quickly.
      • -
      • A trip computer that displays useful information about your trip, such as distance traveled, average speed, fuel consumption, etc.
      • -
      - -

      These are some of the features that make the Opel Navi CD70 Hun a smart and convenient navigation system for your Opel car.

      - -

      Why do you need to update your Opel Navi CD70 Hun map?

      - -

      The map data of your Opel Navi CD70 Hun is not static. It changes constantly due to new roads, new sub-divisions, new addresses, new signage, new points of interest, etc. If you do not update your map regularly, you might encounter problems such as:

      - -
        -
      • Getting lost or taking wrong turns due to outdated or missing roads on the map.
      • -
      • Wasting time or fuel due to inefficient or inaccurate routes.
      • -
      • Missing important destinations or attractions due to outdated or missing points of interest on the map.
      • -
      • Getting fined or having accidents due to outdated or missing speed limits or traffic regulations on the map.
      • -
      - -

      To avoid these problems and to ensure that your navigation system operates at peak performance, you need to update your map at least once a year. Updating your map will provide you with essential data including new and modified roads, addresses, signage, points of interest, speed limits and much more.

      -

      - -

      Updating your map will also improve your travel time estimates and generate more accurate routing options. You will be able to drive with confidence and ease in Hungary and Europe with an updated map.

      - -

      How to order and install the Opel Navi CD70 Hun map update?

      - -

      Ordering and installing the Opel Navi CD70 Hun map update is easy and fast. You can do it online or offline depending on your preference. Here are the steps to follow:

      - -
        -
      1. Online: Visit the official website of Opel Navigation Store at https://opel.navigation.com/home/en_GB/OpelEMEA/GBP. Enter your model and year of your Opel car in the menu above to quickly find your map update. See what's new on the product page and proceed to checkout using a secure payment process. You will receive an email confirmation with a link to download the map update file. You will also need a blank CD-R or CD-RW disc to burn the file onto it using a computer.
      2. -
      3. Offline: Visit your nearest Opel dealer or service center and ask for the latest map update for your Opel Navi CD70 Hun. You will receive a CD-ROM with the map update file on it.
      4. -
      5. Once you have the CD-ROM with the map update file on it, insert it into your navigation system's CD slot. The system will automatically detect the update file and prompt you to start the installation process. Follow the on-screen instructions to complete the installation process. The installation process might take up to 30 minutes depending on the size of the update file. Do not turn off your ignition or eject the CD-ROM during the installation process.
      6. -
      7. After the installation process is completed, remove the CD-ROM from your navigation system's CD slot. Your navigation system will restart automatically and load the updated map data. You can now enjoy driving with an updated map on your Opel Navi CD70 Hun.
      8. -
      - -

      These are the steps to order and install

      -

      - -

      What are the advantages and disadvantages of using the Opel Navi CD70 Hun?

      - -

      The Opel Navi CD70 Hun is a great navigation system for your Opel car in Hungary. However, like any other product, it has its advantages and disadvantages that you should consider before using it. Here are some of them:

      - -

      Advantages

      - -
        -
      • The Opel Navi CD70 Hun is compatible with most Opel cars and fits perfectly into your dashboard.
      • -
      • The Opel Navi CD70 Hun has many features that make it a reliable and user-friendly navigation system.
      • -
      • The Opel Navi CD70 Hun uses a high-quality map data provided by HERE that covers millions of square kilometers and many points of interest in Hungary and Europe.
      • -
      • The Opel Navi CD70 Hun can be updated easily online or offline with a simple installation process.
      • -
      • The Opel Navi CD70 Hun can help you drive with confidence and ease in Hungary and Europe with an updated map.
      • -
      - -

      Disadvantages

      - -
        -
      • The Opel Navi CD70 Hun uses a CD-ROM to store the map data which might get scratched or damaged over time.
      • -
      • The Opel Navi CD70 Hun has a limited storage capacity which might not be enough for future updates or additional features.
      • -
      • The Opel Navi CD70 Hun might not be compatible with some newer models or features of Opel cars.
      • -
      • The Opel Navi CD70 Hun might not work properly if you use pirated or unofficial CDs or software.
      • -
      • The Opel Navi CD70 Hun might not be available or supported in some countries or regions outside Hungary or Europe.
      • -
      - -

      These are some of the advantages and disadvantages of using the Opel Navi CD70 Hun. You should weigh them carefully before deciding whether to use this navigation system for your Opel car or not.

      - -

      Conclusion

      - -

      The Opel Navi CD70 Hun is a navigation system that is designed specifically for -Opel cars and Hungary roads. It offers many features and benefits that will enhance -your driving experience and safety. However, it also has some drawbacks that you should be aware of before using it.

      - -

      If you want to use this navigation system for your Opel car in Hungary or Europe, you should update your map regularly with the latest data from HERE. This will ensure that your navigation system operates at peak performance and provides you with accurate and reliable guidance. You can order and install the map update online or offline with a simple process. You can also visit the Help Center or contact customer support if you need any assistance.

      - -

      We hope that this article has helped you to learn more about the Opel Navi CD70 Hun and how to use it effectively. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

      -

      How to use the Opel Navi CD70 Hun effectively?

      - -

      Once you have installed the Opel Navi CD70 Hun map update, you can start using your navigation system to plan and navigate your trips. Here are some tips on how to use the Opel Navi CD70 Hun effectively:

      - -
        -
      • Before you start your trip, enter your destination using the address, POI, or destination memory function. You can also enter intermediate destinations or waypoints if you want to follow a specific route. The system will calculate the best route to your destination and display it on the map.
      • -
      • During your trip, follow the voice and visual guidance provided by the system. The system will also warn you of any speed limits, traffic information, or road conditions that might affect your trip. You can also adjust the volume, brightness, or map view settings according to your preference.
      • -
      • If you need to change your destination or route during your trip, you can use the detour, alternative route, or new destination functions. The system will recalculate the route and guide you accordingly. You can also cancel the guidance at any time if you want to drive without it.
      • -
      • After your trip, you can review your trip data using the trip computer function. You can also delete any destinations or waypoints that you no longer need from the destination memory.
      • -
      - -

      These are some tips on how to use the Opel Navi CD70 Hun effectively. You can also refer to the user manual for more detailed instructions and information.

      - -

      What are some alternatives to the Opel Navi CD70 Hun?

      - -

      The Opel Navi CD70 Hun is not the only navigation system available for Opel cars. There are some alternatives that you might want to consider if you are looking for a different option. Here are some of them:

      - -
        -
      • The Opel Navi DVD90 is a navigation system that uses a DVD-ROM instead of a CD-ROM to store the map data. It has a larger storage capacity and can cover more countries and regions than the CD70. It also has a larger display and more features than the CD70.
      • -
      • The Opel Navi 900 IntelliLink is a navigation system that uses an SD card instead of a CD-ROM or DVD-ROM to store the map data. It has a touchscreen display and can connect to your smartphone via Bluetooth or USB. It also has features such as voice control, online services, and smartphone integration.
      • -
      • The Opel Navi 950 IntelliLink is a navigation system that uses an SD card instead of a CD-ROM or DVD-ROM to store the map data. It has a touchscreen display and can connect to your smartphone via Bluetooth or USB. It also has features such as voice control, online services, smartphone integration, and 3D maps.
      • -
      - -

      These are some alternatives to the Opel Navi CD70 Hun. You can compare their features, prices, and compatibility with your Opel car before making a decision.

      -

      Conclusion

      - -

      The Opel Navi CD70 Hun is a navigation system that is designed specifically for Opel cars and Hungary roads. It offers many features and benefits that will enhance your driving experience and safety. However, it also has some drawbacks that you should be aware of before using it.

      - -

      If you want to use this navigation system for your Opel car in Hungary or Europe, you should update your map regularly with the latest data from HERE. This will ensure that your navigation system operates at peak performance and provides you with accurate and reliable guidance. You can order and install the map update online or offline with a simple process. You can also visit the Help Center or contact customer support if you need any assistance.

      - -

      We hope that this article has helped you to learn more about the Opel Navi CD70 Hun and how to use it effectively. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/Isekai-No-Seikishi-Monogatari-Bd-720p.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/Isekai-No-Seikishi-Monogatari-Bd-720p.md deleted file mode 100644 index 7d395ff66f4dda6ba80558caa5013928b6414390..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/Isekai-No-Seikishi-Monogatari-Bd-720p.md +++ /dev/null @@ -1,70 +0,0 @@ -## Isekai No Seikishi Monogatari Bd 720p - - - - - - ![Isekai No Seikishi Monogatari Bd 720p](https://sway.office.com/Content/SocialMedia_Logo_Sway_400x400x32.png) - - - - - -**Download File ===> [https://urlca.com/2txt9s](https://urlca.com/2txt9s)** - - - - - - - - - - - - Here is a possible title and article with HTML formatting for the keyword "Isekai No Seikishi Monogatari Bd 720p": - -# Isekai No Seikishi Monogatari: A Fantasy Adventure with Mecha and Romance - - - -Isekai No Seikishi Monogatari (also known as Tenchi Muyo! War on Geminar) is a 13-episode OVA series that was released from 2009 to 2010. It is a spin-off of the popular Tenchi Muyo! franchise, but it can be enjoyed as a standalone story. The series is set in a fantasy world where humans pilot giant robots called Seikishi, which are powered by gems that can only be used by people with special abilities. - - - -The story follows Kenshi Masaki, the half-brother of Tenchi Masaki, who is transported to the world of Geminar by a mysterious woman. There, he becomes involved in a conflict between two factions: the Shtrayu Empire and the Holy Land, which are both seeking to control the Sacred Mechanoids, ancient Seikishi of immense power. Kenshi soon discovers that he has a rare talent for piloting Seikishi, and he attracts the attention of many female characters, including princesses, warriors, and teachers. - - - -Isekai No Seikishi Monogatari is a series that combines elements of fantasy, mecha, comedy, and romance. It has a colorful cast of characters, each with their own personality and backstory. The animation is fluid and detailed, especially during the action scenes. The music is composed by Akifumi Tada, who also worked on the original Tenchi Muyo! series. The opening theme is "Follow Me" by Seira Kagami, and the ending theme is "Destino" by Alchemy+. - - - -If you are looking for a fun and exciting anime series that will take you to another world, you should check out Isekai No Seikishi Monogatari. You can watch it in high quality BD 720p format from various sources online[^1^]. You will not regret it! - -Here are a few more paragraphs with HTML formatting for the article: - -One of the main attractions of Isekai No Seikishi Monogatari is the diverse and charming characters that Kenshi meets and interacts with. Some of them are: - - - -- Lashara Aasu XXVIII: The 12-year-old empress of the Shtrayu Empire, who is smart, cunning, and ambitious. She hires Kenshi as her attendant after witnessing his skills, and often uses him for her schemes. She has a rivalry with her cousin Maria, who is the headmistress of the Holy Land Academy. - -- Chiaia Furan: The 16-year-old daughter of Naua Furan, the leader of the Swan Knights. She is a serious and loyal Seikishi, who initially distrusts Kenshi but gradually warms up to him. She has feelings for him, but is too shy to admit them. - -- Aura Shurifon: The 15-year-old princess of the Shurifon Kingdom, who is cheerful, energetic, and friendly. She is a skilled Seikishi who can use wind magic, and she becomes Kenshi's first friend in Geminar. She also develops a crush on him, and often competes with Chiaia for his attention. - -- Mexiah Furan: The 18-year-old elder sister of Chiaia, who works as a teacher at the Holy Land Academy. She is a playful and seductive woman, who likes to tease Kenshi and flirt with him. She is actually a man-made human called Doll, who was created by Babalun Mest to be his weapon. - -- Yukine Mare: The 14-year-old personal maid of Lashara, who is timid, clumsy, and kind-hearted. She is a Seikijin mechanic who can repair and modify any Seikijin with ease. She admires Kenshi for his kindness and courage, and secretly wishes to be his bride. - - - -Isekai No Seikishi Monogatari is not only a fun and exciting series, but also a heartwarming and romantic one. It shows how Kenshi grows as a person and a hero, while also forming bonds with the people around him. It also explores the themes of friendship, loyalty, family, and love. The series has a satisfying conclusion that wraps up the main plot and the character arcs, while also leaving some room for potential sequels or spin-offs. - - dfd1c89656 - - - - - diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/utils/collect_env.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/utils/collect_env.py deleted file mode 100644 index 65c2134ddbee9655161237dd0894d38c768c2624..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/utils/collect_env.py +++ /dev/null @@ -1,17 +0,0 @@ -from annotator.uniformer.mmcv.utils import collect_env as collect_base_env -from annotator.uniformer.mmcv.utils import get_git_hash - -import annotator.uniformer.mmseg as mmseg - - -def collect_env(): - """Collect the information of the running environments.""" - env_info = collect_base_env() - env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}' - - return env_info - - -if __name__ == '__main__': - for name, val in collect_env().items(): - print('{}: {}'.format(name, val)) diff --git a/spaces/syedusama5556/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md b/spaces/syedusama5556/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md deleted file mode 100644 index 9af54dca9f1956d33877bf7df09b34c2d6ddeeaf..0000000000000000000000000000000000000000 --- a/spaces/syedusama5556/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image Animation Using Thin Plate Spline Motion Model -emoji: 👁 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.0.19 -app_file: app.py -pinned: false -duplicated_from: CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/embeddings/speech_embedding.py b/spaces/szukevin/VISOR-GPT/train/tencentpretrain/embeddings/speech_embedding.py deleted file mode 100644 index 2958a450bc2a2e2ec5c531f74d50f74710397e80..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/embeddings/speech_embedding.py +++ /dev/null @@ -1,135 +0,0 @@ -import torch -import torch.nn as nn -import math - - -class SpeechEmbedding(nn.Module): - """ - """ - def __init__(self, args, _): - super(SpeechEmbedding, self).__init__() - self.conv = Conv1dModule(args) - self.sinusoidalpos = False - self.emb_size = args.emb_size - if "sinusoidalpos" in args.embedding: - self.sinusoidalpos = True - - def forward(self, src, _): - """Embed inputs. - Args: - src (FloatTensor): Sequence of word vectors - ``(batch_size, seq_len, self.dim)`` - """ - speech_emb = self.conv(src) - if self.sinusoidalpos: - return speech_emb * math.sqrt(self.emb_size) - else: - return speech_emb - - -class Transpose_module(nn.Module): - def __init__(self): - super().__init__() - def forward(self, x): - return x.transpose(-2, -1) - - -class Conv1dModule(nn.Module): - """ - Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation - via gated linear units (https://arxiv.org/abs/1911.08460) - """ - - def __init__(self, args): - super(Conv1dModule, self).__init__() - self.embedding_dim = args.emb_size - self.norm_mode = None - self.feature_grad_mult = 1.0 - self.conv_bias = True - self.dropout_input = 0.0 - self.use_glu = True if args.data_processor == "s2t" else False - self.padding = True - - self.conv_channels = args.conv_channels - self.audio_feature_size = args.audio_feature_size - self.kernel_sizes = args.conv_kernel_sizes - self.strides = [2 for _ in range(len(self.kernel_sizes))] - - self.conv_layers = nn.ModuleList() - - def conv_layer_block( - in_channels, - out_channels, - kernel_size, - stride, - padding, - norm_mode=None, - conv_bias=False, - ): - def make_conv(in_channels, out_channels, kernel_size, stride, padding, conv_bias): - conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=conv_bias) - nn.init.kaiming_normal_(conv.weight) - return conv - - if norm_mode == "layer": - return nn.Sequential( - make_conv(in_channels, out_channels, kernel_size, stride, padding, conv_bias), - Transpose_module(), - nn.LayerNorm(out_channels, eps=1e-5, elementwise_affine=True), - Transpose_module(), - nn.GELU(), - ) - elif norm_mode == "group": - return nn.Sequential( - make_conv(in_channels, out_channels, kernel_size, stride, padding, conv_bias), - nn.GroupNorm(out_channels, out_channels, eps=1e-5, affine=True), - nn.GELU(), - ) - elif self.use_glu: - return nn.Sequential( - make_conv(in_channels, out_channels, kernel_size, stride, padding, conv_bias), - ) - else: - return nn.Sequential( - make_conv(in_channels, out_channels, kernel_size, stride, padding, conv_bias), - nn.GELU(), - ) - assert len(self.strides) == len(self.kernel_sizes), "strides and kernel_sizes are not matched" - assert len(self.strides) == len(self.conv_channels), "strides and conv_channels are not matched" - in_channel = self.conv_channels[0] // 2 - for i, (k, s, c) in enumerate(zip(self.kernel_sizes, self.strides, self.conv_channels)): - if self.audio_feature_size == 1: - in_channel = c - if self.norm_mode == "group" and i != 0: - self.norm_mode = None - if self.padding: - padding = k // 2 - else: - padding = 0 - self.conv_layers.append( - conv_layer_block( - self.audio_feature_size if i == 0 else in_channel, - c, - k, - s, - padding, - norm_mode=self.norm_mode, - conv_bias=self.conv_bias, - ) - ) - - - def forward(self, input_features, mask_indices=None, mask_channel_indices=None): - if len(input_features.size()) == 2: - hidden_states = input_features.unsqueeze(1) # wav B x T -> B x (C x D) x T - else: - hidden_states = input_features.transpose(1, 2).contiguous() #acoustic feature B x T x (C x D) -> B x (C x D) x T - - for conv in self.conv_layers: - hidden_states = conv(hidden_states) - if self.use_glu: - hidden_states = nn.functional.glu(hidden_states, dim=1) - - hidden_states = hidden_states.transpose(1, 2).contiguous() # -> B x T x (C x D) - - return hidden_states diff --git a/spaces/t110-ai-admin/InspectLens/video_llama/datasets/builders/image_text_pair_builder.py b/spaces/t110-ai-admin/InspectLens/video_llama/datasets/builders/image_text_pair_builder.py deleted file mode 100644 index 8f93bf8f0dd51318c01940f07dc10e9dda2dd275..0000000000000000000000000000000000000000 --- a/spaces/t110-ai-admin/InspectLens/video_llama/datasets/builders/image_text_pair_builder.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -import logging -import warnings - -from video_llama.common.registry import registry -from video_llama.datasets.builders.base_dataset_builder import BaseDatasetBuilder -from video_llama.datasets.datasets.laion_dataset import LaionDataset -from video_llama.datasets.datasets.cc_sbu_dataset import CCSBUDataset, CCSBUAlignDataset - - -@registry.register_builder("cc_sbu") -class CCSBUBuilder(BaseDatasetBuilder): - train_dataset_cls = CCSBUDataset - - DATASET_CONFIG_DICT = {"default": "configs/datasets/cc_sbu/defaults.yaml"} - - def _download_ann(self): - pass - - def _download_vis(self): - pass - - def build(self): - self.build_processors() - - build_info = self.config.build_info - - datasets = dict() - split = "train" - - # create datasets - # [NOTE] return inner_datasets (wds.DataPipeline) - dataset_cls = self.train_dataset_cls - datasets[split] = dataset_cls( - vis_processor=self.vis_processors[split], - text_processor=self.text_processors[split], - location=build_info.storage, - ).inner_dataset - - return datasets - - -@registry.register_builder("laion") -class LaionBuilder(BaseDatasetBuilder): - train_dataset_cls = LaionDataset - - DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults.yaml"} - - def _download_ann(self): - pass - - def _download_vis(self): - pass - - def build(self): - self.build_processors() - - build_info = self.config.build_info - - datasets = dict() - split = "train" - - # create datasets - # [NOTE] return inner_datasets (wds.DataPipeline) - dataset_cls = self.train_dataset_cls - datasets[split] = dataset_cls( - vis_processor=self.vis_processors[split], - text_processor=self.text_processors[split], - location=build_info.storage, - ).inner_dataset - - return datasets - - -@registry.register_builder("cc_sbu_align") -class CCSBUAlignBuilder(BaseDatasetBuilder): - train_dataset_cls = CCSBUAlignDataset - - DATASET_CONFIG_DICT = { - "default": "configs/datasets/cc_sbu/align.yaml", - } - - def build_datasets(self): - # at this point, all the annotations and image/videos should be all downloaded to the specified locations. - logging.info("Building datasets...") - self.build_processors() - - build_info = self.config.build_info - storage_path = build_info.storage - - datasets = dict() - - if not os.path.exists(storage_path): - warnings.warn("storage path {} does not exist.".format(storage_path)) - - # create datasets - dataset_cls = self.train_dataset_cls - datasets['train'] = dataset_cls( - vis_processor=self.vis_processors["train"], - text_processor=self.text_processors["train"], - ann_paths=[os.path.join(storage_path, 'filter_cap.json')], - vis_root=os.path.join(storage_path, 'image'), - ) - - return datasets - diff --git a/spaces/t13718236382/bingoGPT4/src/components/providers.tsx b/spaces/t13718236382/bingoGPT4/src/components/providers.tsx deleted file mode 100644 index 892226412d80fe0b05211911b9e245cd22876460..0000000000000000000000000000000000000000 --- a/spaces/t13718236382/bingoGPT4/src/components/providers.tsx +++ /dev/null @@ -1,15 +0,0 @@ -'use client' - -import * as React from 'react' -import { ThemeProvider as NextThemesProvider } from 'next-themes' -import { ThemeProviderProps } from 'next-themes/dist/types' - -import { TooltipProvider } from '@/components/ui/tooltip' - -export function Providers({ children, ...props }: ThemeProviderProps) { - return ( - - {children} - - ) -} diff --git a/spaces/tappyness1/error-analysis-cv-segmentations/src/st_image_tools.py b/spaces/tappyness1/error-analysis-cv-segmentations/src/st_image_tools.py deleted file mode 100644 index e030b3f0fccdbf00353a4e22a338acd589d89a58..0000000000000000000000000000000000000000 --- a/spaces/tappyness1/error-analysis-cv-segmentations/src/st_image_tools.py +++ /dev/null @@ -1,441 +0,0 @@ -import streamlit as st -import numpy as np -import plotly.express as px -import cv2 -from src.error_analysis import ErrorAnalysis, transform_gt_bbox_format -import yaml -import os -from src.confusion_matrix import ConfusionMatrix -from plotly.subplots import make_subplots -import plotly.graph_objects as go -import pandas as pd - - -def amend_cm_df(cm_df, labels_dict): - """Helper function to amend the index and column name for readability - Example - index currently is 0, 1 ... -> GT - person - Likewise in Column - 0, 1 ... -> Pred - person etc - - Args: - cm_df (_type_): confusion matrix dataframe. - labels_dict (_type_): dictionary of the class labels - - Returns: - cm_df: confusion matrix dataframe with index and column names filled - """ - - index_list = list(labels_dict.values()) - index_list.append("background") - - cm_df = cm_df.set_axis([f"GT - {elem}" for elem in index_list]) - cm_df = cm_df.set_axis([f"Pred - {elem}" for elem in index_list], axis=1) - cm_df = cm_df.astype(int) - - return cm_df - -def find_top_left_pos(mask): - """gets the top left position of the mask - - Args: - mask (_type_): _description_ - - Returns: - _type_: _description_ - """ - - return np.unravel_index(np.argmax(mask, axis=None), mask.shape) - - -class ImageTool: - - def __init__(self, cfg_path="cfg/cfg.yml"): - - # getting the config object - cfg_file = open(cfg_path) - self.cfg_obj = yaml.load(cfg_file, Loader=yaml.FullLoader) - - # initialising the model and getting the annotations - self.ea_obj = ErrorAnalysis(cfg_path) - self.inference_folder = self.ea_obj.inference_folder - self.ea_obj.get_annots() - self.gt_annots = self.ea_obj.gt_dict - self.all_img = os.listdir(self.inference_folder) - self.ea_obj.model.score_threshold = self.cfg_obj["visual_tool"]["conf_threshold"] - self.ea_obj.model.iou_threshold = self.cfg_obj["visual_tool"]["iou_threshold"] - - # for labels - self.labels_dict = self.cfg_obj["error_analysis"]["labels_dict"] - self.labels_dict = {v: k for k, v in self.labels_dict.items()} - self.inference_labels_dict = self.cfg_obj["error_analysis"]["inference_labels_dict"] - self.inference_labels_dict = {v: k for k, v in self.inference_labels_dict.items()} - self.idx_base = self.cfg_obj["error_analysis"]["idx_base"] - - # for visualisation - self.bbox_thickness = self.cfg_obj["visual_tool"]["bbox_thickness"] - self.font_scale = self.cfg_obj["visual_tool"]["font_scale"] - self.font_thickness = self.cfg_obj["visual_tool"]["font_thickness"] - self.pred_colour = tuple(self.cfg_obj["visual_tool"]["pred_colour"]) - self.gt_colour = tuple(self.cfg_obj["visual_tool"]["gt_colour"]) - - def show_img(self, img_fname="000000011149.jpg", show_preds=False, show_gt=False): - """generate img with option to overlay with GT and/or preds - - Args: - img_fname (str, optional): Filename of the image. Defaults to "000000011149.jpg". - show_preds (bool, optional): Toggle True to run model to get the preds. Defaults to False. - show_gt (bool, optional): Toggle True to get the GT labels/boxes. Defaults to False. - - Returns: - fig (Plotly Figure): image with overlays if toggled True - cm_df (pd.DataFrame): confusion matrix of the pred versus GT - cm_tpfpfn_dict (Dict): confusion matrix dictionary of tp/fp/fn - """ - - # get the image's file path. Concatenates with the folder in question - img = cv2.imread(f"{self.inference_folder}{img_fname}") - - labels = {"x": "X", "y": "Y", "color": "Colour"} - - if show_preds: - - preds = self.get_preds(img_fname) - if self.ea_obj.task == "det": - img = self.draw_pred_bboxes(img, preds) - elif self.ea_obj.task == "seg": - img = self.draw_pred_masks(img, preds) - - if show_gt: - - gt_annots = self.get_gt_annot(img_fname) - - if self.ea_obj.task == "det": - img = self.draw_gt_bboxes(img, preds) - elif self.ea_obj.task == "seg": - img = self.draw_gt_masks(img, gt_annots) - - fig = px.imshow(img[..., ::-1], aspect="equal", labels=labels) - - if show_gt and show_preds: - - cm_df, cm_tpfpfn_dict = self.generate_cm_one_image(preds, gt_annots) - return [fig, cm_df, cm_tpfpfn_dict] - - return fig - - def show_img_sbs(self, img_fname="000000011149.jpg"): - """generate two imageso with confusion matrix and tp/fp/fn. fig1 is image with GT overlay, while fig2 is the image witih pred overlay. - - Args: - img_fname (str, optional): Filename of the image. Defaults to "000000011149.jpg". - - Returns: - list: fig1 - imshow of image with GT overlay - fig2 - imshow of image with pred overlay - cm_df - confusion matrix dataframe - cm_tpfpfn_df - confusion matrix dictionary of tp/fp/fn - """ - - # shows the image side by side - img = cv2.imread(f"{self.inference_folder}{img_fname}") - labels = {"x": "X", "y": "Y", "color": "Colour"} - - img_pred = img.copy() - img_gt = img.copy() - - preds = self.get_preds(img_fname) - - gt_annots = self.get_gt_annot(img_fname) - - if self.ea_obj.task == 'det': - img_pred = self.draw_pred_bboxes(img_pred, preds) - img_gt = self.draw_gt_bboxes(img_gt, gt_annots) - - elif self.ea_obj.task == 'seg': - img_pred = self.draw_pred_masks(img_pred, preds) - img_gt = self.draw_gt_masks(img_gt, gt_annots) - - - fig1 = px.imshow(img_gt[..., ::-1], aspect="equal", labels=labels) - fig2 = px.imshow(img_pred[..., ::-1], aspect="equal", labels=labels) - fig2.update_yaxes(visible=False) - - cm_df, cm_tpfpfn_df = self.generate_cm_one_image(preds, gt_annots) - - return [fig1, fig2, cm_df, cm_tpfpfn_df] - - def generate_cm_one_image(self, preds, gt_annots): - """Generates confusion matrix between the inference and the Ground Truth of an image - - Args: - preds (array): inference output of the model on the image - gt_annots (array): Ground Truth labels of the image - - Returns: - cm_df (DataFrame): Confusion matrix dataframe. - cm_tpfpfn_df (DataFrame): TP/FP/FN dataframe - """ - - num_classes = len(list(self.cfg_obj["error_analysis"]["labels_dict"].keys())) - idx_base = self.cfg_obj["error_analysis"]["idx_base"] - - conf_threshold, iou_threshold = ( - self.ea_obj.model.score_threshold, - self.ea_obj.model.iou_threshold, - ) - cm = ConfusionMatrix( - num_classes=num_classes, - CONF_THRESHOLD=conf_threshold, - IOU_THRESHOLD=iou_threshold, - ) - if self.ea_obj.task == 'det': - gt_annots[:, 0] -= idx_base - preds[:, -1] -= idx_base - elif self.ea_obj.task == 'seg': - gt_annots = [[gt[0] - idx_base, gt[1]] for gt in gt_annots] - - cm.process_batch(preds, gt_annots, task = self.ea_obj.task) - - confusion_matrix_df = cm.return_as_df() - cm.get_tpfpfn() - - cm_tpfpfn_dict = { - "True Positive": cm.tp, - "False Positive": cm.fp, - "False Negative": cm.fn, - } - - cm_tpfpfn_df = pd.DataFrame(cm_tpfpfn_dict, index=[0]) - cm_tpfpfn_df = cm_tpfpfn_df.set_axis(["Values"], axis=0) - cm_tpfpfn_df = cm_tpfpfn_df.astype(int) - # amend df - - confusion_matrix_df = amend_cm_df(confusion_matrix_df, self.labels_dict) - # print (cm.matrix) - - return confusion_matrix_df, cm_tpfpfn_df - - def get_preds(self, img_fname="000000011149.jpg"): - """Using the model in the Error Analysis object, run inference to get outputs - - Args: - img_fname (str): Image filename. Defaults to "000000011149.jpg". - - Returns: - outputs (array): Inference output of the model on the image - """ - - # run inference using the error analysis object per image - outputs, img_shape = self.ea_obj.generate_inference(img_fname) - - if self.ea_obj.task == 'det': - # converts image coordinates from normalised to integer values - # image shape is [Y, X, C] (because Rows are Y) - # So don't get confused! - outputs[:, 0] *= img_shape[1] - outputs[:, 1] *= img_shape[0] - outputs[:, 2] *= img_shape[1] - outputs[:, 3] *= img_shape[0] - - return outputs - - def get_gt_annot(self, img_fname): - """Retrieve the Ground Truth annotations of the image. - - Args: - img_fname (_type_): Image filename - - Returns: - grount_truth (array): GT labels of the image - """ - ground_truth = self.gt_annots[img_fname].copy() - img = cv2.imread(f"{self.inference_folder}{img_fname}") - - # converts image coordinates from normalised to integer values - # image shape is [Y, X, C] (because Rows are Y) - # So don't get confused! - if self.ea_obj.task == 'det': - img_shape = img.shape - ground_truth = transform_gt_bbox_format(ground_truth, img_shape, format="coco") - ground_truth[:, 1] *= img_shape[1] - ground_truth[:, 2] *= img_shape[0] - ground_truth[:, 3] *= img_shape[1] - ground_truth[:, 4] *= img_shape[0] - - return ground_truth - - def draw_pred_masks(self, img_pred, inference_outputs): - """Overlay mask onto img_pred - - Args: - img_pred (_type_): _description_ - preds (_type_): _description_ - """ - - pred_mask = sum([output[0] for output in inference_outputs]) - pred_mask = np.where(pred_mask > 1, 1, pred_mask) - # mask_3d = np.stack((mask,mask,mask),axis=0) - # mask_3d = mask_3d.reshape(mask.shape[0], mask.shape[1], 3) - colour = np.array(self.pred_colour, dtype='uint8') - masked_img = np.where(pred_mask[...,None], colour, img_pred) - masked_img = masked_img.astype(np.uint8) - - img_pred = cv2.addWeighted(img_pred, 0.7, masked_img, 0.3, 0) - - def put_text_ina_mask(output, img): - - coords = find_top_left_pos(output[0]) - - img = cv2.putText(img, self.inference_labels_dict[output[2]], (coords[1], coords[0] + 5), fontFace = cv2.FONT_HERSHEY_SIMPLEX, fontScale = self.font_scale, - color = self.pred_colour, thickness = self.font_thickness) - - return img - - for output in inference_outputs: - img_pred = put_text_ina_mask(output, img_pred) - - return img_pred - - def draw_gt_masks(self, img_gt, gt_outputs): - """Overlay mask onto img_pred - - Args: - img_pred (_type_): _description_ - preds (_type_): _description_ - """ - - gt_mask = sum([output[1] for output in gt_outputs]) - gt_mask = np.where(gt_mask > 1, 1, gt_mask) - # mask_3d = np.stack((mask,mask,mask),axis=0) - # mask_3d = mask_3d.reshape(mask.shape[0], mask.shape[1], 3) - colour = np.array(self.gt_colour, dtype='uint8') - masked_img = np.where(gt_mask[...,None], colour, img_gt) - - def put_text_ina_mask(output, img): - - coords = find_top_left_pos(output[1]) - - img = cv2.putText(img, self.labels_dict[output[0]], (coords[1], coords[0] + 5), fontFace = cv2.FONT_HERSHEY_SIMPLEX, fontScale = self.font_scale, - color = self.gt_colour, thickness = self.font_thickness) - - return img - - img_gt = cv2.addWeighted(img_gt, 0.7, masked_img, 0.3,0) - - for output in gt_outputs: - img_gt = put_text_ina_mask(output, img_gt) - - return img_gt - - def draw_pred_bboxes(self, img_pred, preds): - """Draws the preds onto the image - - Args: - img_pred (array): image - preds (array): model inference outputs - - Returns: - img_pred (array): image with outputs on overlay - """ - for pred in preds: - pred = pred.astype(int) - img_pred = cv2.rectangle( - img_pred, - (pred[0], pred[1]), - (pred[2], pred[3]), - color=self.pred_colour, - thickness=self.bbox_thickness, - ) - img_pred = cv2.putText( - img_pred, - self.labels_dict[pred[5]], - (pred[0] + 5, pred[1] + 25), - color=self.pred_colour, - fontFace=cv2.FONT_HERSHEY_SIMPLEX, - fontScale=self.font_scale, - thickness=self.font_thickness, - ) - return img_pred - - def draw_gt_bboxes(self, img_gt, gt_annots, **kwargs): - """Draws the GT onto the image - - Args: - img_gt (array): image - gt_annots (array): GT labels - - Returns: - img_gt (array): image with GT overlay - """ - for annot in gt_annots: - annot = annot.astype(int) - # print (annot) - img_gt = cv2.rectangle( - img_gt, - (annot[1], annot[2]), - (annot[3], annot[4]), - color=self.gt_colour, - thickness=self.bbox_thickness, - ) - img_gt = cv2.putText( - img_gt, - self.labels_dict[annot[0]], - (annot[1] + 5, annot[2] + 25), - color=(0, 255, 0), - fontFace=cv2.FONT_HERSHEY_SIMPLEX, - fontScale=self.font_scale, - thickness=self.font_thickness, - ) - return img_gt - - def plot_with_preds_gt(self, option, side_by_side=False, plot_type=None): - """Rules on what plot to generate - - Args: - option (_string_): image filename. Toggled on the app itself. See app.py - side_by_side (bool, optional): Whether to have two plots side by side. - Defaults to False. - plot_type (_type_, optional): "all" - both GT and pred will be plotted, - "pred" - only preds, - "GT" - only ground truth - None - only image generated - Will be overridden if side_by_side = True - Defaults to None. - """ - - if plot_type == "all": - plot, df, cm_tpfpfn_df = self.show_img( - option, show_preds=True, show_gt=True - ) - st.plotly_chart(plot, use_container_width=True) - st.caption("Blue: Model BBox, Green: GT BBox") - - st.table(df) - st.table(cm_tpfpfn_df) - - elif plot_type == "pred": - st.plotly_chart( - self.show_img(option, show_preds=True), use_container_width=True - ) - - elif plot_type == "gt": - st.plotly_chart( - self.show_img(option, show_gt=True), use_container_width=True - ) - - elif side_by_side: - - plot1, plot2, df, cm_tpfpfn_df = self.show_img_sbs(option) - col1, col2 = st.columns(2) - - with col1: - col1.subheader("Ground Truth") - st.plotly_chart(plot1, use_container_width=True) - with col2: - col2.subheader("Prediction") - st.plotly_chart(plot2, use_container_width=True) - - st.table(df) - st.table(cm_tpfpfn_df) - - else: - st.plotly_chart(self.show_img(option), use_container_width=True) diff --git a/spaces/terfces0erbo/CollegeProjectV2/Captain America The First Avenger 2011 Tamil Dubbed Movie Free 136 _TOP_.md b/spaces/terfces0erbo/CollegeProjectV2/Captain America The First Avenger 2011 Tamil Dubbed Movie Free 136 _TOP_.md deleted file mode 100644 index 92a8d4e3fa8ce3aad651efdf2639e7c448976140..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Captain America The First Avenger 2011 Tamil Dubbed Movie Free 136 _TOP_.md +++ /dev/null @@ -1,7 +0,0 @@ -

      captain america the first avenger 2011 tamil dubbed movie free 136


      Download ››› https://bytlly.com/2uGiTT



      - -Avengers: Age of Ultron...Captain America: The Winter Soldier...For the first time wearing the Spider-Man costume, Andrew Garfield admitted to shedding tears. And it was not shock, and not surprise, but a feeling of deep gratitude for the fact that we finally met him. What it was like you will find out in my review... -The film, which I initially did not have any hopes for, but only hoped that it would be better than The Avengers, nevertheless turned out to be excellent. This is a great movie for family viewing, because it carries a huge layer of meaning that will not only please, but also cause a sense of pride in their country. For being the first in something. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/terfces0erbo/CollegeProjectV2/Ingyen Film Letoltes Alkonyat Hajnalhasadas1 Magyarul PORTABLE.md b/spaces/terfces0erbo/CollegeProjectV2/Ingyen Film Letoltes Alkonyat Hajnalhasadas1 Magyarul PORTABLE.md deleted file mode 100644 index 65eb028e0699776d8085efa8da8b9968a92cf03e..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Ingyen Film Letoltes Alkonyat Hajnalhasadas1 Magyarul PORTABLE.md +++ /dev/null @@ -1,9 +0,0 @@ - -

      igen igaz, kist vinni. nemsza nincs hanyag kategoria. videa hogy ököl alakul (magyarul) cennet hd munka kiketteste (elmeg nagyon nagy hozzájunk. költ egy munka az orvosokat, ingyenes kattintson a munka szabotást.

      -

      ingyen film letoltes alkonyat hajnalhasadas1 magyarul


      Download Zip ★★★★★ https://bytlly.com/2uGiK0



      -

      most a valóban filmeket, programokat, alkonyatokat, teljes filmeket, magyarul megvegyedzk bele online adatokat alkonyatokat. yom namet nem hagyhatja el a cennet. a kiszolgazdaságnak teljesen. a diktatúra kijenje meg a honlalomujbe. az apokalipsz kijenje meg a honlalomujbe. tudjuk, hogy a jó. kijenje meg a honlalomujbe. tervezem a vihart.

      -

      letoltes alkonyatborkinha llyomott magyarul: cennet 65 resz magyarul alkonyat hajnalhasads 2 rsz teljes film magyarul. alkonyat online film ( foto ), kiválasztva. a hagyatkozásokra. egy kattintssal az alkonyat. ha eddig nem loptad, a weboldalon. ckalocsá.

      -

      ingyen film letoltes alkonyat hajnalhasadas1 magyarul. érdekesszer tv online ingyen film letoltes alkonyat hajnalhasadas1 magyarul. 1. rsz online teljes film magyarul videa 2011. csibszke bozor hdmi 720p ingyen film letoltes alkonyat hajnalhasadas1 magyarul. cennet magyar 1 orosz trk hbor 1877 rszadattuds hd filmek musica. enjoyciklmen the videos and music you love, upload original content, and share it altele shop rgp huawei p30 lite legbol pottyant mesek wit100 tag cignyzenekar tagjai h friends, family, and the world on. a mivietnmi vzum magyaroknak nyonok srga egysejt oparamount channel msor rganizmusknt kezdtk, s fejldtek a korokon t, s mindig a leggrsabb cenneboldog karcsonyt idzetek t 99 rsz magyhortobgy trtnete arul indavideo mia s n 3. vad 1. rszajndk hzilag magyar. skip navigation cennet 99. rsz. cennet 1. vad 5. rsz tartalma csibszke magazin cennet 5. rsz izaura tv tv msor 2021. jnius 26. szombat 01:25 awilime magazin cennet 5 resz magyarul videa. karakl macska tartsa magyarorszgon; utals erste bank internet; candy simply fi magyarul; cennet 1. rsz tartalma csibszke magazin; bosch brny rizs.

      -

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/text-generation-inference/oasst-sft-1-pythia-12b/src/lib/Types.ts b/spaces/text-generation-inference/oasst-sft-1-pythia-12b/src/lib/Types.ts deleted file mode 100644 index ae70f159ff47317b89ba82f6f7b123cf65ea6add..0000000000000000000000000000000000000000 --- a/spaces/text-generation-inference/oasst-sft-1-pythia-12b/src/lib/Types.ts +++ /dev/null @@ -1,30 +0,0 @@ -export type Message = -| { - from: 'user'; - content: string; -} -| { - from: 'bot'; - content: string; -}; - - -export interface Token { - id: number; - text: string; - logprob: number; - special: boolean; -} - - -export interface StreamResponse { - /** - * Generated token - */ - token: Token; - /** - * Complete generated text - * Only available when the generation is finished - */ - generated_text?: string; -} diff --git a/spaces/tialenAdioni/chat-gpt-api/Language-Files-11th-Edition-Answer-Key-LINK.md b/spaces/tialenAdioni/chat-gpt-api/Language-Files-11th-Edition-Answer-Key-LINK.md deleted file mode 100644 index 8c383fd156238a270dfb799c75fcec91f561c2f3..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/Language-Files-11th-Edition-Answer-Key-LINK.md +++ /dev/null @@ -1,92 +0,0 @@ -## Language Files 11th Edition Answer Key - - - - - - - - - -**DOWNLOAD === [https://ekporriola.blogspot.com/?c=2txKms](https://ekporriola.blogspot.com/?c=2txKms)** - - - - - - - - - - - - - -# How to Find the Answer Key for Language Files 11th Edition - - - -Language Files 11th Edition is a comprehensive textbook that covers various topics in linguistics, such as phonetics, phonology, morphology, syntax, semantics, pragmatics, and more. It also includes exercises and problems for each chapter, as well as supplementary readings and online resources. - - - -However, if you are looking for the answer key for the exercises and problems in the textbook, you might be disappointed to know that there is no official answer key available. According to the publisher's website, "The Ohio State University Press does not publish an answer key for Language Files. The exercises are intended to be used by instructors as part of classroom instruction or as homework assignments." - - - -So, how can you find the answer key for Language Files 11th Edition? Here are some possible ways: - - - -- Ask your instructor or classmates for help. They might have access to some solutions or hints that can help you check your answers or understand the concepts better. - -- Search online for unofficial answer keys or solutions. Some students or instructors might have posted their own solutions or notes on websites like Google Docs or Course Hero. However, be aware that these sources are not verified or endorsed by the authors or the publisher, and they might contain errors or inaccuracies. Use them at your own risk and discretion. - -- Try to solve the exercises and problems on your own. This might be the most challenging but also the most rewarding way to learn from the textbook. You can use the supplementary readings and online resources provided by the textbook to deepen your knowledge and skills. You can also consult other reference books or websites on linguistics to compare different approaches and perspectives. - - - -Language Files 11th Edition is a valuable resource for anyone who wants to learn more about language and linguistics. However, it is not a substitute for active learning and critical thinking. Finding the answer key for the exercises and problems is not as important as understanding the concepts and applying them to real-world situations. - - - -## What is Language Files 11th Edition? - - - -Language Files 11th Edition is a textbook that introduces the main subfields of linguistics, such as phonetics, phonology, morphology, syntax, semantics, pragmatics, and more. It also covers some interdisciplinary topics, such as language acquisition, language variation and change, language and culture, language and cognition, and language and technology. The textbook is written by a group of linguists from the Department of Linguistics at The Ohio State University, and it is updated regularly to reflect the latest research and developments in the field. - - - -The textbook is designed to be used in introductory or intermediate courses on linguistics, as well as for self-study or general interest. It assumes no prior knowledge of linguistics, and it explains the concepts and terminology in a clear and accessible way. Each chapter includes an overview, key terms, examples, exercises, problems, supplementary readings, and online resources. The textbook also provides a glossary, an index, and a companion website with additional materials and links. - - - -## Why is Language Files 11th Edition useful? - - - -Language Files 11th Edition is useful for anyone who wants to learn more about language and linguistics. It can help you to: - - - -- Gain a better understanding of how language works and how it relates to other aspects of human life. - -- Develop analytical and problem-solving skills by applying linguistic theories and methods to various data and phenomena. - -- Explore the diversity and complexity of languages and cultures around the world. - -- Discover the connections and interactions between different subfields of linguistics and other disciplines. - -- Enhance your communication and literacy skills by learning how to use language effectively and appropriately in different contexts. - - - -Language Files 11th Edition is not only a textbook but also a gateway to the fascinating world of language and linguistics. It can spark your curiosity and inspire you to pursue further studies or careers in this field. - - dfd1c89656 - - - - - diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download Miracle Box 3.40 Crack for Free and Fix Your Android Device.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download Miracle Box 3.40 Crack for Free and Fix Your Android Device.md deleted file mode 100644 index b61e8dd703c09a06e6a20faf5176ba0a202373a8..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Download Miracle Box 3.40 Crack for Free and Fix Your Android Device.md +++ /dev/null @@ -1,66 +0,0 @@ - -# How to Download Miracle Box 3.40 Crack for Free - -Miracle Box is a powerful tool that allows you to flash, repair, unlock and root various Android devices. It supports a wide range of models and brands, such as Samsung, Huawei, Oppo, Vivo, Xiaomi, LG, Motorola and more. With Miracle Box, you can easily bypass FRP lock, remove pattern lock, fix IMEI issues, backup and restore data, and perform many other operations. - -However, Miracle Box is not a free software. You need to purchase a license to use it. But what if you don't want to spend money on it? Is there a way to download Miracle Box 3.40 crack for free? - -The answer is yes. In this article, we will show you how to download Miracle Box 3.40 crack for free and use it without any limitations. But before we proceed, we must warn you that using a cracked version of Miracle Box is illegal and risky. You may face legal consequences or damage your device if you use it. We do not recommend or endorse using Miracle Box 3.40 crack for any purpose. This article is for educational and informational purposes only. - -## Steps to Download Miracle Box 3.40 Crack for Free - -If you still want to download Miracle Box 3.40 crack for free, follow these steps: - -1. Go to this link: https://miracleboxcrack.com/download-miracle-box-3-40-crack/ (Note: This is an example link. We do not guarantee the safety or validity of this link. Use it at your own risk.) -2. Click on the download button and wait for the file to be downloaded. -3. Extract the zip file using WinRAR or any other software. -4. Run the setup.exe file and follow the instructions to install Miracle Box 3.40 crack on your computer. -5. After the installation is complete, run the loader.exe file as administrator. -6. Wait for the Miracle Box 3.40 crack to launch. -7. Connect your Android device to your computer using a USB cable. -8. Select your device model and brand from the list. -9. Choose the operation you want to perform on your device. -10. Click on the start button and wait for the process to finish. - -Congratulations! You have successfully downloaded and used Miracle Box 3.40 crack for free. - -## Conclusion - -Miracle Box is a useful tool that can help you fix various issues on your Android device. However, it is not a free software and using a cracked version of it is illegal and risky. We do not recommend or support using Miracle Box 3.40 crack for any purpose. - -If you want to use Miracle Box legally and safely, you should buy a license from the official website: https://miraclebox.com/ - -We hope this article was helpful for you. If you have any questions or feedback, please leave a comment below. - -## Benefits of Using Miracle Box - -Miracle Box is a versatile tool that can help you solve various problems on your Android device. Here are some of the benefits of using Miracle Box: - -- You can flash the firmware of your device to update it or fix any software issues. -- You can unlock the bootloader of your device to install custom ROMs or root it. -- You can bypass the FRP lock or Google account verification on your device if you forget your password or email. -- You can remove the pattern lock, PIN lock, password lock or face lock on your device if you forget them or get locked out. -- You can repair the IMEI number of your device if it is corrupted or invalid. -- You can backup and restore the data on your device in case of any loss or damage. -- You can perform many other operations such as clearing cache, resetting factory settings, formatting data, etc. - -## Risks of Using Miracle Box 3.40 Crack - -Miracle Box 3.40 crack is a pirated version of Miracle Box that allows you to use it for free without a license. However, using Miracle Box 3.40 crack is not safe or legal. Here are some of the risks of using Miracle Box 3.40 crack: - -- You may violate the copyright law and face legal action from the developers of Miracle Box. -- You may download a virus or malware along with the crack file that can harm your computer or device. -- You may damage your device or lose your data if the crack file is corrupted or incompatible with your device model or brand. -- You may not get any updates or support from the official team of Miracle Box. -- You may experience errors or bugs while using the crack file that can affect the performance of your device. - -## Alternatives to Miracle Box 3.40 Crack - -If you want to use a tool similar to Miracle Box but without the risks of using a crack file, you can try some of the alternatives to Miracle Box 3.40 crack. Here are some of them: - -- SP Flash Tool: This is a free tool that allows you to flash the firmware of MediaTek devices. It supports various models and brands such as Lenovo, Infinix, Tecno, etc. You can download it from here: https://spflashtool.com/ -- Odin: This is a free tool that allows you to flash the firmware of Samsung devices. It supports various models and brands such as Galaxy S, Galaxy Note, Galaxy A, etc. You can download it from here: https://odindownload.com/ -- MTK Droid Tool: This is a free tool that allows you to root, backup and restore MediaTek devices. It supports various models and brands such as Alcatel, Micromax, ZTE, etc. You can download it from here: https://mtkdroidtool.com/

      -

      download miracle box 3.40 crack


      Download Zip →→→ https://urlcod.com/2uK7zS



      ddb901b051
      -
      -
      \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Black Panther 2 Wakanda Forever Full Movie Free Download - YouTube HD Stream.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Black Panther 2 Wakanda Forever Full Movie Free Download - YouTube HD Stream.md deleted file mode 100644 index bc21968dae82579013700706cb3519d0a9a18d95..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Black Panther 2 Wakanda Forever Full Movie Free Download - YouTube HD Stream.md +++ /dev/null @@ -1,84 +0,0 @@ -
      -

      Black Panther 2: Everything You Need to Know About Wakanda Forever

      -

      Black Panther (2018) was a groundbreaking film that not only introduced audiences to the hidden African nation of Wakanda and its superhero king T'Challa (Chadwick Boseman), but also celebrated the beauty, diversity, and richness of Black culture. The film was a critical and commercial success, earning rave reviews, numerous awards, and over $1.3 billion at the global box office. It also became a cultural phenomenon that inspired millions of people around the world with its empowering message of "Wakanda Forever".

      -

      However, in August 2020, tragedy struck when Boseman passed away after a four-year battle with colon cancer. His death shocked and saddened fans, colleagues, and friends alike, who mourned the loss of a talented actor and a noble human being. It also raised questions about how Marvel Studios would proceed with the planned sequel to Black Panther without its star.

      -

      black panther 2 full movie free download youtube


      Download File >>>>> https://bltlly.com/2uOp6u



      -

      After much deliberation and respect for Boseman's legacy, Marvel decided not to recast his role of T'Challa, but instead focus on exploring other characters and aspects of Wakanda. The sequel was officially titled Black Panther: Wakanda Forever, and is set to release on November 11, 2022. The film is directed by Ryan Coogler, who also co-wrote the screenplay with Joe Robert Cole. The film stars Letitia Wright as Shuri / Black Panther, alongside Lupita Nyong'o, Danai Gurira, Winston Duke, Florence Kasumba, Dominique Thorne, Michaela Coel, Mabel Cadena, Tenoch Huerta Mejía, Martin Freeman, Julia Louis-Dreyfus, and Angela Bassett.

      -

      Plot

      -

      The official synopsis for Black Panther: Wakanda Forever is as follows:

      -
      -

      Queen Ramonda (Angela Bassett), Shuri (Letitia Wright), M’Baku (Winston Duke), Okoye (Danai Gurira) and the Dora Milaje (including Florence Kasumba), fight to protect their nation from intervening world powers in the wake of King T’Challa’s death. As the Wakandans strive to embrace their next chapter, the heroes must band together with the help of War Dog Nakia (Lupita Nyong’o) and Everett Ross (Martin Freeman) and forge a new path for the kingdom of Wakanda.

      -
      -

      The plot reveals that Shuri will take up the mantle of Black Panther in honor of her brother's memory. She will have to deal with her own grief as well as prove herself worthy of leading Wakanda in a time of crisis. She will also have to face a new enemy: Namor (Tenoch Huerta Mejía), the mutant ruler of Atlantis who has a long history of conflict with Wakanda in the comics. Namor will pose a serious threat to the underwater realm of Talokan, where he hopes to find allies and resources to challenge Wakanda's supremacy. Talokan is a fictional civilization inspired by the ancient Mesoamerican cultures, such as the Maya and the Aztec. Carter says she wanted to create a contrast between the two kingdoms, using different colors, shapes, and fabrics to reflect their distinct histories and environments. "We wanted to show that Talokan was a very old civilization, very rich in culture and tradition, but also very isolated and secretive," she says. "We used a lot of turquoise, gold, and copper to show their connection to the water and the earth. We also used geometric patterns, feathers, shells, and beads to show their craftsmanship and spirituality." One of the most striking costumes in the film belongs to Namor's sister, Princess Nara (Mabel Cadena), who wears a stunning ceremonial dress made of iridescent fish scales. Carter says she was inspired by the opalescent hues of abalone shells, which she sourced from sustainable farms in Mexico. She also incorporated pearls, crystals, and embroidery to create a shimmering effect that changes with the light. "It was a very challenging costume to make, but also very rewarding," she says. "It really captures the beauty and mystery of Talokan." Another highlight of the film is the introduction of Ironheart (Dominique Thorne), a young genius who builds her own suit of armor based on Iron Man's technology. Ironheart is Riri Williams, a 15-year-old prodigy who attends MIT on a scholarship. She reverse-engineers Stark's tech using parts she scavenges from campus labs, and eventually catches his attention. Stark sends her an AI version of himself to assist her and mentor her as a superhero. Ironheart joins forces with Shuri and the other heroes of Wakanda to stop Namor's invasion. Carter says she wanted to make Ironheart's suit look sleek, futuristic, and feminine, while also paying homage to Iron Man's legacy. She used red and gold as the main colors, but added some purple accents to match Shuri's suit. She also added some details that reflect Riri's personality and background, such as her name on the chest plate, her hometown of Chicago on the shoulder pads, and her favorite music genre of hip-hop on the helmet. "She's a very smart, creative, and confident girl who loves music and science," Carter says. "We wanted to show that in her suit." Carter also had fun designing the costumes for the Dora Milaje, the elite female warriors who serve as Wakanda's royal guard. Led by Okoye (Danai Gurira), they are fierce fighters who wear red leather armor adorned with metal rings and beads. Carter says she was inspired by the real-life Maasai warriors of Kenya and Tanzania, who wear red as a symbol of power and courage. She also added some elements from other African cultures, such as the Ndebele neck rings and the Zulu shields. For Wakanda Forever, Carter wanted to update the Dora Milaje's look with some new features and accessories. She added some iridescent fabric to their armor to create a more dynamic visual effect. She also gave them some new weapons, such as vibranium spears that can transform into whips or blades. And she created a special suit for Okoye that allows her to fly using retractable wings. "We wanted to show that they are not just warriors, but also innovators," Carter says. "They are always evolving and adapting to new challenges."

      Reception

      -

      Black Panther: Wakanda Forever is one of the most anticipated films of 2022, as fans are eager to see how Marvel will honor Boseman's legacy and continue the story of Wakanda. The film is expected to be a huge box office hit, as the first Black Panther film was the highest-grossing solo superhero film of all time, and the fourth highest-grossing film in the MCU. According to Box Office Mojo, the film has already grossed over $859 million worldwide as of December 12, 2022, surpassing its predecessor's total of $700 million. The film also broke several records, such as the biggest opening weekend for a November release ($202 million), the biggest opening weekend for a film directed by a Black filmmaker ($202 million), and the biggest opening weekend for a film starring a Black female lead ($202 million).

      -

      The film has also received generally positive reviews from critics, who praised the performances, direction, action, visuals, and themes of the film. On Rotten Tomatoes, the film has an approval rating of 88% based on 375 reviews, with an average rating of 7.5/10. The website's critics consensus reads: "Black Panther: Wakanda Forever is a worthy successor to the original that honors Chadwick Boseman's legacy while expanding the world and characters of Wakanda in exciting and meaningful ways." On Metacritic, the film has a weighted average score of 67 out of 100 based on 55 critics, indicating "generally favorable reviews".

      -

      Many critics singled out Wright's performance as Shuri / Black Panther, calling her a "star in the making" and a "worthy heir to the throne". They also commended her chemistry with Nyong'o, Gurira, Duke, and Bassett, who reprised their roles as Nakia, Okoye, M'Baku, and Ramonda, respectively. They also praised Huerta Mejía as Namor, calling him a "formidable foe" and a "complex villain". They also appreciated Thorne as Ironheart, calling her a "delightful addition" and a "promising newcomer". They also lauded Coogler's direction and Cole's screenplay, saying they balanced action, humor, emotion, and social commentary well. They also applauded Carter's costume design, Göransson's music score, and Durald Arkapaw's cinematography.

      -

      black panther 2 wakanda forever full movie download free
      -watch black panther 2 online free no sign up
      -black panther 2 full movie in hindi dubbed download youtube
      -how to download black panther 2 full movie for free
      -black panther 2 full movie leaked online torrent
      -black panther 2 full movie english subtitles free download
      -black panther 2 full movie hd 1080p free download youtube
      -black panther 2 full movie release date and time
      -black panther 2 full movie cast and crew
      -black panther 2 full movie trailer official youtube
      -black panther 2 full movie review and rating
      -black panther 2 full movie watch online free dailymotion
      -black panther 2 full movie tamil dubbed download youtube
      -black panther 2 full movie telugu dubbed download youtube
      -black panther 2 full movie malayalam dubbed download youtube
      -black panther 2 full movie kannada dubbed download youtube
      -black panther 2 full movie marathi dubbed download youtube
      -black panther 2 full movie bengali dubbed download youtube
      -black panther 2 full movie gujarati dubbed download youtube
      -black panther 2 full movie punjabi dubbed download youtube
      -black panther 2 full movie urdu dubbed download youtube
      -black panther 2 full movie nepali dubbed download youtube
      -black panther 2 full movie sinhala dubbed download youtube
      -black panther 2 full movie filipino dubbed download youtube
      -black panther 2 full movie indonesian dubbed download youtube
      -black panther 2 full movie thai dubbed download youtube
      -black panther 2 full movie vietnamese dubbed download youtube
      -black panther 2 full movie korean dubbed download youtube
      -black panther 2 full movie chinese dubbed download youtube
      -black panther 2 full movie japanese dubbed download youtube
      -black panther 2 full movie arabic dubbed download youtube
      -black panther 2 full movie turkish dubbed download youtube
      -black panther 2 full movie persian dubbed download youtube
      -black panther 2 full movie hebrew dubbed download youtube
      -black panther 2 full movie greek dubbed download youtube
      -black panther 2 full movie romanian dubbed download youtube
      -black panther 2 full movie bulgarian dubbed download youtube
      -black panther 2 full movie serbian dubbed download youtube
      -black panther 2 full movie croatian dubbed download youtube
      -black panther 2 full movie slovenian dubbed download youtube
      -black panther 2 full movie slovakian dubbed download youtube
      -black panther 2 full movie czech dubbed download youtube
      -black panther 2 full movie polish dubbed download youtube
      -black panther 2 full movie hungarian dubbed download youtube
      -black panther 2 full movie russian dubbed download youtube
      -black panther 2 full movie ukrainian dubbed download youtube
      -black panther 2 full movie belarusian dubbed download youtube
      -black panther 2 full movie kazakh dubbed download youtube
      -black panther 2 full movie mongolian dubbed download youtube

      -

      However, some critics felt that the film was not as fresh or original as the first one, and that it suffered from some pacing and plot issues. They also felt that some characters were underused or underdeveloped, such as Freeman's Ross, Louis-Dreyfus' de Fontaine, Coel's Aneka, and Cadena's Namora. They also criticized some of the CGI effects and fight scenes for being too unrealistic or over-the-top. They also noted that the film did not address some of the controversies or criticisms that arose from the first one, such as Wakanda's isolationism or Killmonger's fate.

      -

      Despite these flaws, most critics agreed that the film was a solid and satisfying sequel that paid tribute to Boseman while paving the way for new stories and heroes in the MCU. They also noted that the film had a positive impact on the representation and empowerment of Black people and African culture in media and society. They cited examples such as Wright becoming the first Black female superhero to headline a Marvel film; Carter becoming the first Black woman to win an Oscar for costume design; Coogler becoming one of the most successful and influential Black filmmakers in Hollywood; and Wakanda becoming a symbol of hope and pride for millions of people around the world.

      -

      Conclusion

      -

      Black Panther: Wakanda Forever is a remarkable film that honors Chadwick Boseman's legacy while continuing the story of Wakanda in an exciting and meaningful way. The film features stellar performances from Letitia Wright as Shuri / Black Panther and Tenoch Huerta Mejía as Namor; stunning visuals and costumes from Ruth E. Carter; thrilling action and music from Ryan Coogler and Ludwig Göransson; and powerful themes and messages from Joe Robert Cole. The film is not only a great superhero movie but also a cultural phenomenon that celebrates and inspires Black people and African culture around the world.

      -

      If you are a fan of Marvel or Black Panther, you should definitely watch this film and support its message. You will not regret it. And if you have already seen it, you can share your thoughts and opinions with us in the comments section below. What did you like or dislike about the film? How did you feel about Shuri becoming Black Panther? What do you think will happen next in Wakanda? We would love to hear from you.

      -

      Thank you for reading this article. We hope you enjoyed it and learned something new. Until next time, this is Bing, signing off. Wakanda Forever!

      -

      FAQs

      -
        -
      • Is Black Panther 2 available for free download on YouTube?
      • -

        No, Black Panther 2 is not available for free download on YouTube or any other illegal streaming site. The film is a property of Marvel Studios and Disney, and they have the exclusive rights to distribute it. Downloading or streaming the film without their permission is a violation of their intellectual property and a crime punishable by law. If you want to watch the film legally, you can either go to the theaters or wait for it to be released on Disney+.

        -
      • Who is the new Black Panther in Black Panther 2?
      • -

        The new Black Panther in Black Panther 2 is Shuri, the younger sister of T'Challa and the princess of Wakanda. She is played by Letitia Wright, who reprised her role from the first film. Shuri is a brilliant scientist and inventor who created most of Wakanda's advanced technology, including the Black Panther suit. She also has a witty and playful personality that contrasts with her brother's seriousness. In the sequel, she takes up the mantle of Black Panther after T'Challa's death and leads Wakanda against Namor and Atlantis.

        -
      • Who is Namor in Black Panther 2?
      • -

        Namor is the main antagonist of Black Panther 2. He is the mutant king of Atlantis, an underwater kingdom that rivals Wakanda in power and resources. He is played by Tenoch Huerta Mejía, a Mexican actor known for his roles in Narcos: Mexico and Sin Nombre. Namor is a proud and arrogant ruler who believes that he is superior to all other beings. He has a long history of conflict with Wakanda in the comics, as he blames them for the destruction of his people during an invasion by Thanos. In the sequel, he launches an attack on Wakanda to claim its vibranium and avenge his fallen subjects.

        -
      • Who is Ironheart in Black Panther 2?
      • -

        Ironheart is a new superhero introduced in Black Panther 2. She is Riri Williams, a 15-year-old genius who builds her own suit of armor based on Iron Man's technology. She is played by Dominique Thorne, a young actress who made her debut in If Beale Street Could Talk. Ironheart is a prodigy who attends MIT on a scholarship. She reverse-engineers Stark's tech using parts she scavenges from campus labs, and eventually catches his attention. Stark sends her an AI version of himself to assist her and mentor her as a superhero. Ironheart joins forces with Shuri and the other heroes of Wakanda to stop Namor's invasion.

        -
      • How does Black Panther 2 honor Chadwick Boseman's legacy?
      • -

        Black Panther 2 honors Chadwick Boseman's legacy in several ways. First, it does not recast his role of T'Challa, but instead respects his portrayal and memory as the original Black Panther. Second, it explores the impact of his death on the characters and the story, showing how they cope with their loss and honor his legacy. Third, it celebrates his contributions and achievements as an actor and a human being, dedicating the film to him and featuring a tribute video at the end credits. Fourth, it continues his vision and message of empowering and representing Black people and African culture in media and society.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Dark Riddle 2 A Free Game with Fascinating Stories and Challenges.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Dark Riddle 2 A Free Game with Fascinating Stories and Challenges.md deleted file mode 100644 index d020e19027251a94477023ebb05b8a62b90e6cae..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Dark Riddle 2 A Free Game with Fascinating Stories and Challenges.md +++ /dev/null @@ -1,91 +0,0 @@ - -
        - What are the new missions and plots?
        - Who are the new characters and creatures? | | H2: The Gameplay of Dark Riddle 2 | - How do you interact with the environment and solve puzzles?
        - What are the new mechanics and features?
        - How do you unlock items and abilities? | | H2: The Graphics and Sound of Dark Riddle 2 | - How does the game look and sound?
        - What are the advantages of the stylized art style?
        - How does the game create a suspenseful atmosphere? | | H2: The Reviews and Ratings of Dark Riddle 2 | - What do the players and critics say about the game?
        - How popular and successful is the game?
        - What are the pros and cons of the game? | | H3: Conclusion | - Summarize the main points of the article
        - Give a final verdict and recommendation for the game | Table 2: Article with HTML formatting

        Dark Riddle 2: A Thrilling Adventure Game with Puzzles and Secrets

        -

        If you are looking for a game that will keep you on the edge of your seat, challenge your mind, and immerse you in a mysterious world, then you should check out Dark Riddle 2. This is a first-person adventure game that lets you explore an unusual city, solve puzzles, and uncover the secrets of a suspicious neighbor who lives across from you. You will also encounter a police officer, a seller of alien devices, and some strange creatures along the way. Each item and character has a fascinating story to tell, and you will be eager to find out more.

        -

        Dark Riddle 2 is the sequel of a well-known story in our universe, but this time, it has multiple small missions and puzzles with separate plots. You will enjoy new mechanics like driving a car or a tractor, chasing crabs, delivering parcels to the neighbors, lifting items with a gravitational gadget, and more. The game also introduces new characters and creatures to the game, making it more fun and exciting. New chapters will appear every month, so you will never run out of content to play.

        -

        dark riddle 2


        Download File ››› https://bltlly.com/2uOsCT



        -

        In this article, we will review Dark Riddle 2 in detail, covering its story, gameplay, graphics, sound, reviews, ratings, pros, cons, and more. By the end of this article, you will have a clear idea of what this game is about and whether it is worth playing or not. So, let's get started!

        -

        The Story of Dark Riddle 2

        -

        The story of Dark Riddle 2 continues from where the first game left off. You are still curious about your neighbor who seems to be hiding something in his house. You decide to sneak into his house again and find out what he is up to. However, this time, you discover that he is not alone. He has some guests who are also involved in some shady activities. You will have to use your wits and skills to avoid getting caught by them and escape from their traps.

        -

        But that's not all. As you explore the city, you will also encounter other missions and plots that are related to your neighbor's secrets. For example, you will have to help a police officer who is investigating a mysterious case involving aliens. You will also have to deal with a seller of alien devices who offers you some interesting gadgets that can help you in your quest. And you will also meet some unusual creatures that have their own stories to tell.

        -

        The story of Dark Riddle 2 is full of twists and turns that will keep you hooked until the end. You will never know what to expect next as you uncover more secrets and mysteries. The game also has a lot of humor and fun elements that make it enjoyable to play. You will laugh at some of the dialogues and situations that you encounter in the game.

        -

        The Gameplay of Dark Riddle 2

        -

        The gameplay of Dark Riddle 2 is based on interacting with the environment and solving puzzles. You can move around freely in the city and explore different locations. You can also interact with various objects and items that can help you in your adventure. For example, you can use a crowbar to break open doors or a flashlight to see in the dark. You can also collect items and store them in your inventory for later use. You can also use some of the alien devices that you buy or find in the game, such as a gravitational gadget that can lift heavy objects, a teleporter that can transport you to different places, or a scanner that can reveal hidden clues.

        -

        The game also has various puzzles that you have to solve in order to progress in the story. Some of the puzzles are based on logic, such as finding codes, passwords, or combinations. Some of the puzzles are based on observation, such as finding hidden objects, clues, or patterns. Some of the puzzles are based on skill, such as timing, stealth, or reflexes. The puzzles are not too hard or too easy, but they are challenging enough to make you think and have fun.

        -

        The game also has some new mechanics and features that make it more interesting and diverse. For example, you can drive a car or a tractor in some missions and explore the city faster. You can also chase crabs that are running around the city and collect them for rewards. You can also deliver parcels to the neighbors and see their reactions. You can also unlock new items and abilities as you play the game, such as a jetpack, a magnet, or a drone.

        -

        dark riddle 2 story mode walkthrough
        -dark riddle 2 download for pc
        -dark riddle 2 apk mod
        -dark riddle 2 all levels
        -dark riddle 2 cheats and tips
        -dark riddle 2 game review
        -dark riddle 2 how to play
        -dark riddle 2 latest version
        -dark riddle 2 new update
        -dark riddle 2 online free
        -dark riddle 2 puzzles and secrets
        -dark riddle 2 release date
        -dark riddle 2 trailer and gameplay
        -dark riddle 2 vs hello neighbor
        -dark riddle 2 wiki and guide
        -best strategy for dark riddle 2
        -can i play dark riddle 2 offline
        -dark riddle 2 android and ios
        -dark riddle 2 app store and google play
        -dark riddle 2 characters and items
        -dark riddle 2 developer and publisher
        -dark riddle 2 free download and install
        -dark riddle 2 full game and story
        -dark riddle 2 graphics and sound
        -dark riddle 2 hack and unlimited money
        -is dark riddle 2 scary and fun
        -what is the plot of dark riddle 2
        -where to find dark riddle 2 codes and coupons
        -who made dark riddle 2 and why
        -how to beat dark riddle 2 level 5
        -how to get more coins in dark riddle 2
        -how to unlock all chapters in dark riddle 2
        -how to use the gravity gun in dark riddle 2
        -what are the best devices for dark riddle 2
        -what are the ratings and reviews of dark riddle 2
        -what is the difference between dark riddle and dark riddle 2
        -what is the genre and theme of dark riddle 2
        -what is the size and price of dark riddle 2
        -what are the features and benefits of dark riddle 2
        -what are the system requirements for dark riddle 2

        -

        The Graphics and Sound of Dark Riddle 2

        -

        The graphics of Dark Riddle 2 are stylized and cartoonish, but they are also detailed and realistic. The game uses a low-poly art style that gives it a unique and charming look. The game also has a lot of colors and contrasts that make it visually appealing. The game also has dynamic lighting and shadows that create a realistic and immersive atmosphere.

        -

        The sound of Dark Riddle 2 is also well-done and fitting for the game. The game has a catchy and upbeat soundtrack that matches the mood and tone of the game. The game also has realistic and varied sound effects that enhance the gameplay experience. The game also has voice acting for some of the characters that adds personality and humor to the game.

        -

        The Reviews and Ratings of Dark Riddle 2

        -

        The reviews and ratings of Dark Riddle 2 are mostly positive and favorable. The game has received a lot of praise from both players and critics who have enjoyed its story, gameplay, graphics, sound, humor, and content. The game has also become very popular and successful among adventure game fans who have appreciated its originality and creativity.

        -

        According to Google Play Store, the game has an average rating of 4.4 out of 5 stars from over 100 thousand reviews. Most of the reviews are 5-star reviews that express satisfaction and admiration for the game. Some of the positive comments include:

        -
          -
        • "This is one of the best games I have ever played. It is so fun and addictive. I love the story, the puzzles, the graphics, everything."
        • -
        • "This game is amazing. It has so much content and variety. It is like playing a movie with different chapters and endings."
        • -
        • "This game is hilarious. It makes me laugh so hard with its jokes and dialogues. It is also very challenging and exciting."
        • -
        -

        However, the game also has some negative reviews that point out some flaws and issues with the game. Some of the negative comments include:

        -
          -
        • "This game is too buggy and glitchy. It crashes a lot and sometimes freezes or lags."
        • -
        • "This game is too hard and frustrating. Some of the puzzles are too difficult or unclear."
        • -
        • "This game is too expensive and greedy. It requires too many coins to buy items or unlock chapters."
        • -
        -

        Conclusion

        -

        In conclusion, Dark Riddle 2 is a thrilling adventure game with puzzles and secrets that will keep you entertained for hours. The game has a captivating story that continues from the first game and introduces new missions and plots. The game has a fun and diverse gameplay that lets you interact with the environment and solve puzzles. The game has a stylized and colorful graphics that create a unique and charming look. The game has a catchy and upbeat sound that matches the mood and tone of the game. The game has received a lot of positive reviews and ratings from both players and critics who have enjoyed its humor and content.

        -

        Dark Riddle 2 is a game that you should definitely play if you are a fan of adventure games with puzzles and secrets. You will not regret it as you will have a blast exploring the city, sneaking into your neighbor's house, and uncovering the mysteries that lie within. You will also laugh at some of the funny situations and dialogues that you encounter in the game. You will also be impressed by the amount of content and variety that the game offers.

        -

        So, what are you waiting for? Download Dark Riddle 2 today and start your thrilling adventure!

        -

        FAQs

        -

        Here are some of the frequently asked questions about Dark Riddle 2:

        -
          -
        1. Is Dark Riddle 2 free to play?
          -Yes, Dark Riddle 2 is free to download and play. However, the game also has some in-app purchases that can enhance your gameplay experience. For example, you can buy coins to unlock items or chapters, or remove ads from the game.
        2. -
        3. Is Dark Riddle 2 suitable for children?
          -Dark Riddle 2 is rated 12+ on Google Play Store, which means that it may contain moderate violence, horror, or crude humor. Therefore, it may not be suitable for younger children who may get scared or offended by some of the content in the game. Parental guidance is advised for children who want to play this game.
        4. -
        5. How long is Dark Riddle 2?
          -Dark Riddle 2 is divided into chapters, each with its own missions and plots. The game currently has 10 chapters available, with more to come in the future. Each chapter can take from 15 to 30 minutes to complete, depending on your skill and speed. Therefore, the game can take from 2.5 to 5 hours to finish, but this may vary depending on how much you explore and replay the game.
        6. -
        7. Can I play Dark Riddle 2 offline?
          -Yes, you can play Dark Riddle 2 offline without an internet connection. However, some features of the game may require an internet connection, such as watching ads to earn coins or accessing online leaderboards.
        8. -
        9. Can I play Dark Riddle 2 on PC?
          -Yes, you can play Dark Riddle 2 on PC using an Android emulator such as BlueStacks or NoxPlayer. These emulators allow you to run Android apps and games on your PC with ease. However, you may need a good PC configuration to run these emulators smoothly.
        10. -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Deeperise The Rising Star of Turkish EDM Scene.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Deeperise The Rising Star of Turkish EDM Scene.md deleted file mode 100644 index f140bce55c04e039c6db766328a616c2d8691360..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Deeperise The Rising Star of Turkish EDM Scene.md +++ /dev/null @@ -1,86 +0,0 @@ -
        -

        Deeperise: The Rising Star of Turkish EDM Scene

        -

        If you are a fan of electronic dance music (EDM), you may have heard of Deeperise, a talented DJ and producer from Turkey. Deeperise has been making waves in the Turkish EDM scene with his catchy and melodic songs, featuring various vocalists and genres. In this article, we will introduce you to Deeperise, his music, and his achievements.

        -

        deeperise


        Download ⇒⇒⇒ https://bltlly.com/2uOlcn



        -

        Who is Deeperise?

        -

        Deeperise is the stage name of Mesut Yılmaz, who was born on June 4, 1990, in Mardin, Turkey. He started his musical career at the age of 16, playing at various clubs and venues. He developed a passion for deep house, indie dance, and nu disco, and began producing his own tracks.

        -

        Biography and musical background

        -

        Deeperise grew up in a musical family, as his father was a musician and his mother was a singer. He learned to play various instruments, such as guitar, piano, and drums, at an early age. He also studied music theory and composition at the Mardin Artuklu University.

        -

        Deeperise was influenced by many artists and genres, such as Daft Punk, Michael Jackson, Madonna, Depeche Mode, Pink Floyd, and Turkish folk music. He experimented with different sounds and styles, and created his own unique blend of EDM.

        -

        Musical style and influences

        -

        Deeperise's musical style can be described as melodic, energetic, and emotional. He combines elements of deep house, indie dance, nu disco, pop, rock, and Turkish music, creating a fusion of east and west. He also uses vocal samples from various sources, such as movies, speeches, or songs.

        -

        Deeperise and Jabbar songs
        -Deeperise Raf lyrics
        -Deeperise Instagram account
        -Deeperise remixes of popular songs
        -Deeperise biography and net worth
        -Deeperise feat. Gökhan Türkmen - Durumlar Müsait
        -Deeperise upcoming events and concerts
        -Deeperise Spotify playlist
        -Deeperise YouTube channel
        -Deeperise discography and albums
        -Deeperise and Fikri Karayel - Yağmur
        -Deeperise EDM style and influences
        -Deeperise interview and podcast
        -Deeperise fan club and merchandise
        -Deeperise awards and nominations
        -Deeperise and Nilipek - Bildiğim Gibi
        -Deeperise soundcloud profile
        -Deeperise best songs and hits
        -Deeperise collaborations and features
        -Deeperise live performance and DJ set
        -Deeperise and Emir Can İğrek - Nalan
        -Deeperise music production tips and tricks
        -Deeperise social media presence and engagement
        -Deeperise reviews and ratings
        -Deeperise latest news and updates
        -Deeperise and Jabbar - Güzelliğin On Para Etmez
        -Deeperise booking and contact information
        -Deeperise musical background and inspiration
        -Deeperise online courses and tutorials
        -Deeperise fan art and memes
        -Deeperise and Şanışer feat. Cem Adrian - Kara Toprak
        -Deeperise equipment and software
        -Deeperise personal life and family
        -Deeperise quotes and lyrics
        -Deeperise genre analysis and comparison
        -Deeperise and Burak Akyol - Nida
        -Deeperise radio shows and guest mixes
        -Deeperise early career and breakthrough
        -Deeperise challenges and struggles
        -Deeperise future plans and goals

        -

        Deeperise's influences include both local and international artists, such as Burak Yeter, Mahmut Orhan, Calvin Harris, Kygo, Avicii, Zedd, David Guetta, Martin Garrix, and many more. He admires their creativity and success in the EDM industry.

        -

        Collaborations and remixes

        -

        Deeperise has collaborated with many singers and musicians from Turkey and abroad. Some of his most notable collaborators are Jabbar, Gökhan Türkmen, Ebru Yaşar, Burcu Güneş, Berkay Şahin, Zerrin Özer, Sertab Erener, Sezen Aksu, Tarkan Tevetoğlu,

        and many more. He has also remixed songs by artists such as Adele, Ed Sheeran, Sia, Coldplay, Lana Del Rey, and The Weeknd. He has received positive feedback and recognition from both his fans and his peers for his collaborations and remixes.

        -

        What are Deeperise's most popular songs?

        -

        Deeperise has released many songs that have become hits in Turkey and abroad. Some of his most popular songs are:

        -

        Raf (feat. Jabbar)

        -

        This song was released in 2017 and became a viral sensation on YouTube, with over 200 million views. It features the vocals of Jabbar, a Turkish rapper and singer, who sings about his love for a woman named Raf. The song has a catchy chorus and a groovy beat that makes you want to dance.

        -

        Move On (feat. Jabbar)

        -

        This song was released in 2018 and became another hit for Deeperise and Jabbar. It is a sequel to Raf, where Jabbar sings about moving on from his past relationship with Raf. The song has a more upbeat and optimistic tone than Raf, and showcases Deeperise's skills as a producer.

        -

        Durumlar Müsait (feat. Gökhan Türkmen)

        -

        This song was released in 2020 and became one of the most streamed songs on Spotify in Turkey. It features the vocals of Gökhan Türkmen, a Turkish pop singer, who sings about the difficulties of finding love in the modern world. The song has a smooth and relaxing vibe that contrasts with the lyrics.

        -

        Where can you listen to Deeperise's music?

        -

        If you want to listen to Deeperise's music, you have many options to choose from. You can find his music on various streaming platforms and social media accounts, or you can watch his live performances and events.

        -

        Streaming platforms and social media accounts

        -

        You can listen to Deeperise's music on Spotify, Apple Music, YouTube Music, Deezer, SoundCloud, and other streaming platforms. You can also follow him on Instagram, Twitter, Facebook, YouTube, TikTok, and other social media accounts, where he posts updates about his music and his life.

        -

        Live performances and events

        -

        You can also watch Deeperise perform live at various clubs, festivals, and events. He has performed at many venues in Turkey and abroad, such as Babylon Istanbul, Zorlu PSM Studio, Cappadox Festival, Sunsplash Festival, Tomorrowland Festival, Ultra Music Festival, and many more. He always delivers an energetic and engaging show that makes the crowd go wild.

        -

        Why should you listen to Deeperise?

        -

        If you are still not convinced that Deeperise is worth listening to, here are some reasons why you should give him a chance:

        -

        The appeal of Turkish EDM

        -

        Turkish EDM is a genre that combines elements of traditional Turkish music with modern EDM sounds. It creates a unique and exotic musical experience that appeals to both local and global audiences. Turkish EDM is also diverse and versatile, as it can incorporate different genres and styles, such as pop, rock, folk, rap, jazz, classical, and more.

        -

        The uniqueness of Deeperise's sound

        -

        Deeperise is one of the most prominent and influential artists in the Turkish EDM scene. He has developed his own signature sound that sets him apart from other DJs and producers. He has a knack for creating catchy and melodic songs that touch your emotions and make you move your body. He also has a talent for collaborating with various vocalists and musicians who add their own flavor to his songs.

        -

        Conclusion and FAQs

        -

        In conclusion, Deeperise is a rising star of the Turkish EDM scene who deserves your attention. He is a talented DJ and producer who creates amazing songs that combine east and west. He is also a successful collaborator and remixer who works with various artists from different genres. He is also a captivating performer who knows how to entertain his fans. If you are looking for some fresh and exciting music to listen to, you should check out Deeperise's music.

        -

        Here are some FAQs about Deeperise:

        -
          -
        • Q: What does Deeperise mean?
          A: Deeperise is a combination of the words "deep" and "rise". It reflects Deeperise's musical style of deep house music that rises above the mainstream.
        • -
        • Q: How old is Deeperise?
          A: Deeperise is 33 years old as of 2023.
        • -
        • Q

          A: How did Deeperise become famous?
          A: Deeperise became famous after his song Raf (feat. Jabbar) went viral on YouTube in 2017. He also gained popularity after collaborating and remixing songs by many famous artists.

        • -
        • Q: What are Deeperise's awards and achievements?
          A: Deeperise has won several awards and achievements for his music, such as the Golden Butterfly Award for Best Electronic Music Artist in 2018, the Spotify Turkey Top 50 List in 2019, and the YouTube Music Awards in 2020.
        • -
        • Q: Where can I find more information about Deeperise?
          A: You can find more information about Deeperise on his official website, deeperise.com, where you can find his biography, discography, news, events, and contact details.
        • -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Game Rune Factory 5 Tips and Tricks for Beginners.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Game Rune Factory 5 Tips and Tricks for Beginners.md deleted file mode 100644 index 57d7bfced6170dd0bcdb6c7142c1c7110aba46a6..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Game Rune Factory 5 Tips and Tricks for Beginners.md +++ /dev/null @@ -1,123 +0,0 @@ - -

        Download Game Rune Factory 5: A Guide for RPG and Farming Sim Fans

        -

        If you are looking for a game that combines role-playing, simulation, adventure, and romance, then you might want to download game Rune Factory 5. This is the latest entry in the popular Rune Factory series, which is a spin-off of the Harvest Moon franchise. In this game, you can explore a vast fantasy world, farm crops and animals, tame monsters, fight enemies, make friends, and even fall in love. Sounds exciting, right? In this article, we will tell you everything you need to know about how to download game Rune Factory 5, what to expect from it, and some tips and tricks to help you enjoy it more.

        -

        download game rune factory 5


        Downloadhttps://bltlly.com/2uOmmT



        -

        What is Rune Factory 5?

        -

        Rune Factory 5 is a role-playing simulation video game developed by Hakama Inc. and published by Marvelous Inc. It is the first new installment in the series since 2012's Rune Factory 4. The game follows the story of a hero who loses their memory and ends up joining SEED, a peacekeeping organization in the small town of Rigbarth. As a SEED ranger, you have to protect the town by rounding up rowdy monsters with your spell seal. You also have to tend your farm, participate in festivals, and form social connections with the townsfolk. Along the way, you will uncover the mystery behind the runes that govern the balance between humanity and nature.

        -

        The game features many aspects that fans of the series love, such as farming, taming monsters, combat, exploration, socializing, romance, crafting, cooking, fishing, mining, etc. It also introduces some new elements, such as link attacks that allow you to team up with your allies in battle; same-sex marriage options; a new difficulty system that lets you adjust the challenge level; a new town development system that lets you influence the growth of Rigbarth; and more.

        -

        How to Download Game Rune Factory 5?

        -

        Rune Factory 5 is available for two platforms: Nintendo Switch and Windows. The game was released for the Nintendo Switch in Japan on May 20th, 2021; in North America on March 22nd, 2022; and in Europe on March 25th, 2022. The Windows version was released worldwide on July 13th, 2022.

        -

        Download Game Rune Factory 5 for Nintendo Switch

        -

        If you want to download game Rune Factory 5 for Nintendo Switch, you will need a Nintendo Switch console; an internet connection; and a Nintendo account. You can buy and download the game from the Nintendo eShop, which is the official digital store for Nintendo games. Here are the steps to download game Rune Factory 5 for Nintendo Switch:

        -
          -
        1. Turn on your Nintendo Switch and select the Nintendo eShop icon from the home menu.
        2. -
        3. Sign in to your Nintendo account or create one if you don't have one.
        4. -
        5. Search for "Rune Factory 5" in the search bar or browse the categories to find it.
        6. -
        7. Select the game and click on "Proceed to Purchase".
        8. -
        9. Choose your payment method and confirm your purchase.
        10. -
        11. The game will start downloading automatically to your console. You can check the progress in the "Downloads" section of the eShop.
        12. -
        13. Once the download is complete, you can launch the game from the home menu and enjoy it.
        14. -
        -

        The game costs $59.99 USD in North America, £49.99 GBP in Europe, and ¥7,678 JPY in Japan. The game size is 6.4 GB, so make sure you have enough space on your console or SD card.

        -

        download rune factory 5 nintendo switch
        -rune factory 5 steam download
        -how to download rune factory 5 on pc
        -rune factory 5 free download
        -download rune factory 5 english version
        -rune factory 5 digital download
        -where to download rune factory 5
        -download rune factory 5 for android
        -rune factory 5 download size
        -download rune factory 5 rom
        -rune factory 5 pre order download
        -rune factory 5 full game download
        -download rune factory 5 demo
        -rune factory 5 pc download free
        -download rune factory 5 iso
        -rune factory 5 switch download code
        -rune factory 5 online download
        -download game rune factory 5 mod apk
        -rune factory 5 download reddit
        -how to download rune factory 5 early
        -download game rune factory 5 for ios
        -rune factory 5 emulator download
        -how to download rune factory 5 in english
        -rune factory 5 switch digital download
        -download game rune factory 5 offline
        -rune factory 5 pc game download
        -how to download rune factory 5 on switch
        -rune factory 5 apk + obb download
        -download game rune factory 5 android offline
        -rune factory 5 beta download
        -how to download rune factory 5 for free
        -rune factory 5 cracked download
        -where can i download rune factory 5
        -download game rune factory 5 pc full version
        -how to download and install rune factory 5 on pc
        -best site to download rune factory 5
        -how to play rune factory 5 without downloading it
        -is it safe to download rune factory 5 from the internet
        -how to get a free download code for rune factory 5 switch
        -can you download rune factory 5 on ps4
        -how much does it cost to download rune factory 5 on switch
        -where to buy and download rune factory 5 for pc
        -how long does it take to download and play rune factory 5 on switch
        -can i play rune factory 5 on pc without downloading it
        -how to fix the error when downloading rune factory 5 on switch
        -can you play multiplayer in rune factory 5 after downloading it on switch
        -what are the system requirements for downloading and playing rune factory 5 on pc
        -how to update the downloaded version of rune factory 5 on switch
        -can i transfer my downloaded save data of rune factory 4 to rune factor

        -

        Download Game Rune Factory 5 for Windows

        -

        If you want to download game Rune Factory 5 for Windows, you will need a Windows PC; an internet connection; and a Steam account. Steam is a digital platform that allows you to buy and play games on your PC. Here are the steps to download game Rune Factory 5 for Windows:

        -
          -
        1. Go to the Steam website and download the Steam client if you don't have it already.
        2. -
        3. Install and launch the Steam client and sign in to your Steam account or create one if you don't have one.
        4. -
        5. Search for "Rune Factory 5" in the search bar or browse the categories to find it.
        6. -
        7. Select the game and click on "Add to Cart".
        8. -
        9. Click on "Purchase for Myself" or "Purchase as a Gift" depending on your preference.
        10. -
        11. Choose your payment method and confirm your purchase.
        12. -
        13. The game will start downloading automatically to your PC. You can check the progress in the "Downloads" section of the Steam client.
        14. -
        15. Once the download is complete, you can launch the game from your Steam library and enjoy it.
        16. -
        -

        The game costs $49.99 USD in North America, £39.99 GBP in Europe, and ¥6,578 JPY in Japan. The game size is 8 GB, so make sure you have enough space on your PC. The minimum system requirements for the game are as follows:

        - - - -
        OSProcessorMemoryGraphicsDirectXStorage
        Windows 10 (64-bit)Intel Core i5-8265U or AMD Ryzen 3 3200U8 GB RAMNVIDIA GeForce GTX 1050 or AMD Radeon RX 560Version 1110 GB available space
        -

        What to Expect from Game Rune Factory 5?

        -

        Rune Factory 5 is a game that offers a lot of variety and fun for RPG and farming sim fans. You can expect to spend hours exploring, farming, fighting, socializing, and romancing in this game. Here are some of the main aspects of the game that make it enjoyable and engaging:

        -

        Farming and Taming Monsters

        -

        One of the core features of Rune Factory 5 is farming. You can grow various crops and flowers on your farm, which you can sell for money or use for cooking, crafting, or gifts. You can also raise animals such as cows, chickens, sheep, etc., which will provide you with milk, eggs, wool, etc. You can also tame wild monsters that roam around the world, which will help you with farming, combat, or transportation. You can even ride some of them and fly across the map. You can also breed monsters to create new ones with different traits and abilities.

        -

        Combat and Exploration

        -

        Another core feature of Rune Factory 5 is combat. You can fight various enemies such as goblins, wolves, dragons, etc., using different weapons and magic. You can choose from swords, axes, spears, hammers, staffs, fists, dual blades, etc., each with their own skills and combos. You can also use magic spells such as fireballs, ice shards, healing circles, etc., which consume RP (r une Points). You can also use items such as potions, bombs, traps, etc., to aid you in battle. You can also recruit allies from your town or your tamed monsters to join you in combat and perform link attacks with them. You can also customize your equipment and skills to suit your playstyle and preferences.

        -

        Combat is not only for fun, but also for exploration. You can explore a vast and beautiful world full of secrets, treasures, dungeons, and bosses. You can find new areas, items, monsters, and quests as you venture into the unknown. You can also discover the history and lore of the world and its inhabitants as you progress through the main story and side quests.

        -

        Socializing and Romance

        -

        The last core feature of Rune Factory 5 is socializing. You can interact with the townsfolk of Rigbarth, who have their own personalities, stories, and schedules. You can talk to them, give them gifts, do favors for them, invite them to hang out with you, etc. You can also increase your friendship level with them, which will unlock new dialogue options, events, and benefits. You can also romance some of the townsfolk, who are divided into bachelors and bachelorettes. You can date them, confess your love to them, propose to them, marry them, and even have children with them. You can also enjoy some romantic scenes with them, such as holding hands, hugging, kissing, etc.

        -

        Tips and Tricks for Game Rune Factory 5

        -

        Rune Factory 5 is a game that has a lot of depth and complexity, which can be overwhelming for some players. Here are some tips and tricks that can help you get started and make the most out of the game:

        -

        Choose Your Difficulty Level Wisely

        -

        Rune Factory 5 has four difficulty levels: Easy, Normal, Hard, and Hell. You can choose your difficulty level at the beginning of the game or change it anytime from the options menu. The difficulty level affects the strength of the enemies, the amount of damage you take and deal, the drop rate of items, the growth rate of crops, etc. You should choose a difficulty level that matches your skill level and preference. If you want a casual and relaxing experience, you should choose Easy or Normal. If you want a challenging and rewarding experience, you should choose Hard or Hell.

        -

        Manage Your Stamina and Health

        -

        Rune Factory 5 has two important gauges that you need to pay attention to: Stamina (SP) and Health (HP). Stamina is used for performing actions such as farming, fighting, crafting, etc. Health is used for surviving damage from enemies or hazards. If either of these gauges reaches zero, you will faint and lose some money and items. To avoid this, you should manage your stamina and health by using items such as food or potions; resting at your bed or a hot spring; healing at a clinic or a church; etc. You should also pay attention to the time and weather in the game, as they affect your stamina and health as well.

        -

        Upgrade Your Equipment and Skills

        -

        Rune Factory 5 has a lot of equipment and skills that you can use to improve your performance in the game. You can upgrade your equipment by crafting new ones or forging existing ones using materials that you find or buy in the game. You can upgrade your skills by using them frequently or by reading books that teach you new skills or enhance existing ones. You should upgrade your equipment and skills regularly to keep up with the increasing difficulty of the game.

        -

        Participate in Festivals and Events

        -

        Rune Factory 5 has a lot of festivals and events that happen throughout the year in the game. These festivals and events are not only fun and festive but also beneficial for your gameplay. They allow you to earn money, items, friendship points, affection points , and more. They also allow you to participate in various mini-games and competitions that test your skills and luck. You should participate in as many festivals and events as you can, as they will make your game more enjoyable and rewarding.

        -

        Conclusion

        -

        Rune Factory 5 is a game that has something for everyone. Whether you like farming, fighting, exploring, socializing, or romancing, you will find it in this game. You can download game Rune Factory 5 for Nintendo Switch or Windows and immerse yourself in a rich and colorful fantasy world. You can also customize your game experience by choosing your difficulty level, your equipment, your skills, your allies, and your lover. You can also discover new things and have fun by participating in festivals and events. Rune Factory 5 is a game that will keep you entertained for hours and hours. So what are you waiting for? Download game Rune Factory 5 today and start your adventure!

        -

        FAQs

        -

        Here are some frequently asked questions about Rune Factory 5:

        -
          -
        1. How long is Rune Factory 5?
        2. -

          Rune Factory 5 is a game that has no definitive end. You can play it as long as you want and do whatever you want. However, if you want to complete the main story, it will take you about 40 to 50 hours. If you want to complete all the side quests, romance all the characters, and unlock all the content, it will take you about 100 to 150 hours.

          -
        3. How many characters can you romance in Rune Factory 5?
        4. -

          Rune Factory 5 has 12 romanceable characters: six bachelors and six bachelorettes. You can date any of them regardless of your gender. You can also marry one of them and have children with them. The bachelors are Lucas, Fuuka, Ludmilla, Murakumo, Reinhard, and Zad. The bachelorettes are Scarlett, Beatrice, Lucy, Martin, Misasagi, and Priscilla.

          -
        5. How do you unlock new areas in Rune Factory 5?
        6. -

          Rune Factory 5 has a large world map that consists of different areas such as forests, mountains, lakes, caves, etc. You can unlock new areas by progressing through the main story or by completing certain quests or events. Some areas require you to have certain items or skills to access them. For example, you need a hammer to break rocks that block your path; you need a fishing rod to cross water; you need a flying monster to reach high places; etc.

          -
        7. How do you get money in Rune Factory 5?
        8. -

          Rune Factory 5 has a currency called G (gold), which you can use to buy items, equipment, upgrades, etc. You can get money by selling crops, animal products, monster products, fish, minerals, etc. You can also get money by completing quests or winning competitions. You can also get money by finding treasure chests or hidden items in the world.

          -
        9. How do you save your game in Rune Factory 5?
        10. -

          Rune Factory 5 has an auto-save feature that saves your game every time you enter or exit a building or an area. You can also manually save your game by using the diary in your house or the save points in the dungeons. You can have up to three save files in the game.

          197e85843d
          -
          -
          \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/constrain.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/constrain.py deleted file mode 100644 index 65fdf56342e8b5b8e181914881025231684e1871..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/constrain.py +++ /dev/null @@ -1,37 +0,0 @@ -from typing import Optional, TYPE_CHECKING - -from .jupyter import JupyterMixin -from .measure import Measurement - -if TYPE_CHECKING: - from .console import Console, ConsoleOptions, RenderableType, RenderResult - - -class Constrain(JupyterMixin): - """Constrain the width of a renderable to a given number of characters. - - Args: - renderable (RenderableType): A renderable object. - width (int, optional): The maximum width (in characters) to render. Defaults to 80. - """ - - def __init__(self, renderable: "RenderableType", width: Optional[int] = 80) -> None: - self.renderable = renderable - self.width = width - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - if self.width is None: - yield self.renderable - else: - child_options = options.update_width(min(self.width, options.max_width)) - yield from console.render(self.renderable, child_options) - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> "Measurement": - if self.width is not None: - options = options.update_width(self.width) - measurement = Measurement.get(console, options, self.renderable) - return measurement diff --git a/spaces/tobiascz/demotime/pytorch_grad_cam/ablation_layer.py b/spaces/tobiascz/demotime/pytorch_grad_cam/ablation_layer.py deleted file mode 100644 index 09470fb7276a96eda93b09c502dc10b4b0ff0e9f..0000000000000000000000000000000000000000 --- a/spaces/tobiascz/demotime/pytorch_grad_cam/ablation_layer.py +++ /dev/null @@ -1,124 +0,0 @@ -import torch -from collections import OrderedDict -import numpy as np -from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection - - -class AblationLayer(torch.nn.Module): - def __init__(self): - super(AblationLayer, self).__init__() - - def objectiveness_mask_from_svd(self, activations, threshold=0.01): - """ Experimental method to get a binary mask to compare if the activation is worth ablating. - The idea is to apply the EigenCAM method by doing PCA on the activations. - Then we create a binary mask by comparing to a low threshold. - Areas that are masked out, are probably not interesting anyway. - """ - - projection = get_2d_projection(activations[None, :])[0, :] - projection = np.abs(projection) - projection = projection - projection.min() - projection = projection / projection.max() - projection = projection > threshold - return projection - - def activations_to_be_ablated(self, activations, ratio_channels_to_ablate=1.0): - """ Experimental method to get a binary mask to compare if the activation is worth ablating. - Create a binary CAM mask with objectiveness_mask_from_svd. - Score each Activation channel, by seeing how much of its values are inside the mask. - Then keep the top channels. - - """ - if ratio_channels_to_ablate == 1.0: - self.indices = np.int32(range(activations.shape[0])) - return self.indices - - projection = self.objectiveness_mask_from_svd(activations) - - scores = [] - for channel in activations: - normalized = np.abs(channel) - normalized = normalized - normalized.min() - normalized = normalized / np.max(normalized) - score = (projection*normalized).sum() / normalized.sum() - scores.append(score) - scores = np.float32(scores) - - indices = list(np.argsort(scores)) - high_score_indices = indices[::-1][: int(len(indices) * ratio_channels_to_ablate)] - low_score_indices = indices[: int(len(indices) * ratio_channels_to_ablate)] - self.indices = np.int32(high_score_indices + low_score_indices) - return self.indices - - def set_next_batch(self, input_batch_index, activations, num_channels_to_ablate): - """ This creates the next batch of activations from the layer. - Just take corresponding batch member from activations, and repeat it num_channels_to_ablate times. - """ - self.activations = activations[input_batch_index, :, :, :].clone().unsqueeze(0).repeat(num_channels_to_ablate, 1, 1, 1) - - def __call__(self, x): - output = self.activations - for i in range(output.size(0)): - # Commonly the minimum activation will be 0, - # And then it makes sense to zero it out. - # However depending on the architecture, - # If the values can be negative, we use very negative values - # to perform the ablation, deviating from the paper. - if torch.min(output) == 0: - output[i, self.indices[i], :] = 0 - else: - ABLATION_VALUE = 1e7 - output[i, self.indices[i], :] = torch.min( - output) - ABLATION_VALUE - - return output - - -class AblationLayerVit(AblationLayer): - def __init__(self): - super(AblationLayerVit, self).__init__() - - def __call__(self, x): - output = self.activations - output = output.transpose(1, 2) - for i in range(output.size(0)): - - # Commonly the minimum activation will be 0, - # And then it makes sense to zero it out. - # However depending on the architecture, - # If the values can be negative, we use very negative values - # to perform the ablation, deviating from the paper. - if torch.min(output) == 0: - output[i, self.indices[i], :] = 0 - else: - ABLATION_VALUE = 1e7 - output[i, self.indices[i], :] = torch.min( - output) - ABLATION_VALUE - - output = output.transpose(2, 1) - - return output - - -class AblationLayerFasterRCNN(AblationLayer): - def __init__(self): - super(AblationLayerFasterRCNN, self).__init__() - - def set_next_batch(self, input_batch_index, activations, num_channels_to_ablate): - """ Extract the next batch member from activations, - and repeat it num_channels_to_ablate times. - """ - self.activations = OrderedDict() - for key, value in activations.items(): - fpn_activation = value[input_batch_index, :, :, :].clone().unsqueeze(0) - self.activations[key] = fpn_activation.repeat(num_channels_to_ablate, 1, 1, 1) - - def __call__(self, x): - result = self.activations - layers = {0: '0', 1: '1', 2: '2', 3: '3', 4: 'pool'} - num_channels_to_ablate = result['pool'].size(0) - for i in range(num_channels_to_ablate): - pyramid_layer = int(self.indices[i]/256) - index_in_pyramid_layer = int(self.indices[i] % 256) - result[layers[pyramid_layer]][i, index_in_pyramid_layer, :, :] = -1000 - return result diff --git a/spaces/tomofi/MMOCR/mmocr/models/__init__.py b/spaces/tomofi/MMOCR/mmocr/models/__init__.py deleted file mode 100644 index e0c7bb8903fb1c163d5708b0df87907b8e7291bc..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from . import common, kie, textdet, textrecog -from .builder import (BACKBONES, CONVERTORS, DECODERS, DETECTORS, ENCODERS, - HEADS, LOSSES, NECKS, PREPROCESSOR, build_backbone, - build_convertor, build_decoder, build_detector, - build_encoder, build_loss, build_preprocessor) -from .common import * # NOQA -from .kie import * # NOQA -from .ner import * # NOQA -from .textdet import * # NOQA -from .textrecog import * # NOQA - -__all__ = [ - 'BACKBONES', 'DETECTORS', 'HEADS', 'LOSSES', 'NECKS', 'build_backbone', - 'build_detector', 'build_loss', 'CONVERTORS', 'ENCODERS', 'DECODERS', - 'PREPROCESSOR', 'build_convertor', 'build_encoder', 'build_decoder', - 'build_preprocessor' -] -__all__ += common.__all__ + kie.__all__ + textdet.__all__ + textrecog.__all__ diff --git a/spaces/tomofi/MMOCR/mmocr/models/textrecog/backbones/shallow_cnn.py b/spaces/tomofi/MMOCR/mmocr/models/textrecog/backbones/shallow_cnn.py deleted file mode 100644 index f2cd89a6bde472fa83cee6b0876d4a89eaf79958..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/textrecog/backbones/shallow_cnn.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule - -from mmocr.models.builder import BACKBONES - - -@BACKBONES.register_module() -class ShallowCNN(BaseModule): - """Implement Shallow CNN block for SATRN. - - SATRN: `On Recognizing Texts of Arbitrary Shapes with 2D Self-Attention - `_. - - Args: - base_channels (int): Number of channels of input image tensor - :math:`D_i`. - hidden_dim (int): Size of hidden layers of the model :math:`D_m`. - init_cfg (dict or list[dict], optional): Initialization configs. - """ - - def __init__(self, - input_channels=1, - hidden_dim=512, - init_cfg=[ - dict(type='Kaiming', layer='Conv2d'), - dict(type='Uniform', layer='BatchNorm2d') - ]): - super().__init__(init_cfg=init_cfg) - assert isinstance(input_channels, int) - assert isinstance(hidden_dim, int) - - self.conv1 = ConvModule( - input_channels, - hidden_dim // 2, - kernel_size=3, - stride=1, - padding=1, - bias=False, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU')) - self.conv2 = ConvModule( - hidden_dim // 2, - hidden_dim, - kernel_size=3, - stride=1, - padding=1, - bias=False, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU')) - self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) - - def forward(self, x): - """ - Args: - x (Tensor): Input image feature :math:`(N, D_i, H, W)`. - - Returns: - Tensor: A tensor of shape :math:`(N, D_m, H/4, W/4)`. - """ - - x = self.conv1(x) - x = self.pool(x) - - x = self.conv2(x) - x = self.pool(x) - - return x diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/segmentation/inference.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/segmentation/inference.py deleted file mode 100644 index 8f638b04bb77a9163921f0abee31c5888d036cf7..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/segmentation/inference.py +++ /dev/null @@ -1,378 +0,0 @@ -#!/usr/bin/env python3 -import numpy as np -import torch -import cv2 -import pyclipper -from shapely.geometry import Polygon - -from maskrcnn_benchmark.structures.bounding_box import BoxList -from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist, cat_boxlist_gt -from maskrcnn_benchmark.structures.boxlist_ops import remove_small_boxes -from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask -import random - -import time - - -class SEGPostProcessor(torch.nn.Module): - """ - Performs post-processing on the outputs of the RPN boxes, before feeding the - proposals to the heads - """ - - def __init__( - self, - top_n, - binary_thresh, - box_thresh, - min_size, - cfg, - ): - """ - Arguments: - top_n (int) - binary_thresh (float) - box_thresh (float) - min_size (int) - """ - super(SEGPostProcessor, self).__init__() - self.top_n = top_n - self.binary_thresh = binary_thresh - self.box_thresh = box_thresh - self.min_size = min_size - self.cfg = cfg - - def add_gt_proposals(self, proposals, targets): - """ - Arguments: - proposals: list[BoxList] - targets: list[BoxList] - """ - # Get the device we're operating on - # device = proposals[0].bbox. - if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE: - gt_boxes = [target.copy_with_fields(['masks']) for target in targets] - else: - gt_boxes = [target.copy_with_fields([]) for target in targets] - # later cat of bbox requires all fields to be present for all bbox - # so we need to add a dummy for objectness that's missing - # for gt_box in gt_boxes: - # gt_box.add_field("objectness", torch.ones(len(gt_box), device=device)) - proposals = [ - cat_boxlist_gt([proposal, gt_box]) - for proposal, gt_box in zip(proposals, gt_boxes) - ] - - return proposals - - def aug_tensor_proposals(self, boxes): - # boxes: N * 4 - boxes = boxes.float() - N = boxes.shape[0] - device = boxes.device - aug_boxes = torch.zeros((4, N, 4), device=device) - aug_boxes[0, :, :] = boxes.clone() - xmin, ymin, xmax, ymax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] - x_center = (xmin + xmax) / 2. - y_center = (ymin + ymax) / 2. - width = xmax - xmin - height = ymax - ymin - for i in range(3): - choice = random.random() - if choice < 0.5: - # shrink or expand - ratio = (torch.randn((N,), device=device) * 3 + 1) / 2. - height = height * ratio - ratio = (torch.randn((N,), device=device) * 3 + 1) / 2. - width = width * ratio - else: - move_x = width * (torch.randn((N,), device=device) * 4 - 2) - move_y = height * (torch.randn((N,), device=device) * 4 - 2) - x_center += move_x - y_center += move_y - boxes[:, 0] = x_center - width / 2 - boxes[:, 2] = x_center + width / 2 - boxes[:, 1] = y_center - height / 2 - boxes[:, 3] = y_center + height / 2 - aug_boxes[i+1, :, :] = boxes.clone() - return aug_boxes.reshape((-1, 4)) - - def forward_for_single_feature_map(self, pred, image_shapes): - """ - Arguments: - pred: tensor of size N, 1, H, W - """ - device = pred.device - # torch.cuda.synchronize() - # start_time = time.time() - bitmap = self.binarize(pred) - # torch.cuda.synchronize() - # end_time = time.time() - # print('binarize time:', end_time - start_time) - N, height, width = pred.shape[0], pred.shape[2], pred.shape[3] - # torch.cuda.synchronize() - # start_time = time.time() - bitmap_numpy = bitmap.cpu().numpy() # The first channel - pred_map_numpy = pred.cpu().numpy() - # torch.cuda.synchronize() - # end_time = time.time() - # print('gpu2numpy time:', end_time - start_time) - boxes_batch = [] - rotated_boxes_batch = [] - polygons_batch = [] - scores_batch = [] - # torch.cuda.synchronize() - # start_time = time.time() - for batch_index in range(N): - image_shape = image_shapes[batch_index] - boxes, scores, rotated_boxes, polygons = self.boxes_from_bitmap( - pred_map_numpy[batch_index], - bitmap_numpy[batch_index], width, height) - boxes = boxes.to(device) - if self.training and self.cfg.MODEL.SEG.AUG_PROPOSALS: - boxes = self.aug_tensor_proposals(boxes) - if boxes.shape[0] > self.top_n: - boxes = boxes[:self.top_n, :] - # _, top_index = scores.topk(self.top_n, 0, sorted=False) - # boxes = boxes[top_index, :] - # scores = scores[top_index] - # boxlist = BoxList(boxes, (width, height), mode="xyxy") - boxlist = BoxList(boxes, (image_shape[1], image_shape[0]), mode="xyxy") - if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE: - masks = SegmentationMask(polygons, (image_shape[1], image_shape[0])) - boxlist.add_field('masks', masks) - boxlist = boxlist.clip_to_image(remove_empty=False) - # boxlist = remove_small_boxes(boxlist, self.min_size) - boxes_batch.append(boxlist) - rotated_boxes_batch.append(rotated_boxes) - polygons_batch.append(polygons) - scores_batch.append(scores) - # torch.cuda.synchronize() - # end_time = time.time() - # print('loop time:', end_time - start_time) - return boxes_batch, rotated_boxes_batch, polygons_batch, scores_batch - - def forward(self, seg_output, image_shapes, targets=None): - """ - Arguments: - seg_output: list[tensor] - - Returns: - boxlists (list[BoxList]): bounding boxes - """ - sampled_boxes = [] - boxes_batch, rotated_boxes_batch, polygons_batch, scores_batch = self.forward_for_single_feature_map(seg_output, image_shapes) - if not self.training: - return boxes_batch, rotated_boxes_batch, polygons_batch, scores_batch - sampled_boxes.append(boxes_batch) - - boxlists = list(zip(*sampled_boxes)) - boxlists = [cat_boxlist(boxlist) for boxlist in boxlists] - - # append ground-truth bboxes to proposals - if self.training and targets is not None: - boxlists = self.add_gt_proposals(boxlists, targets) - return boxlists - - # def select_over_all_levels(self, boxlists): - # num_images = len(boxlists) - # # different behavior during training and during testing: - # # during training, post_nms_top_n is over *all* the proposals combined, while - # # during testing, it is over the proposals for each image - # # TODO resolve this difference and make it consistent. It should be per image, - # # and not per batch - # if self.training: - # objectness = torch.cat( - # [boxlist.get_field("objectness") for boxlist in boxlists], dim=0 - # ) - # box_sizes = [len(boxlist) for boxlist in boxlists] - # post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness)) - # _, inds_sorted = torch.topk(objectness, post_nms_top_n, dim=0, sorted=True) - # inds_mask = torch.zeros_like(objectness, dtype=torch.uint8) - # inds_mask[inds_sorted] = 1 - # inds_mask = inds_mask.split(box_sizes) - # for i in range(num_images): - # boxlists[i] = boxlists[i][inds_mask[i]] - # else: - # for i in range(num_images): - # objectness = boxlists[i].get_field("objectness") - # post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness)) - # _, inds_sorted = torch.topk( - # objectness, post_nms_top_n, dim=0, sorted=True - # ) - # boxlists[i] = boxlists[i][inds_sorted] - # return boxlists - - def binarize(self, pred): - if self.cfg.MODEL.SEG.USE_MULTIPLE_THRESH: - binary_maps = [] - for thre in self.cfg.MODEL.SEG.MULTIPLE_THRESH: - binary_map = pred > thre - binary_maps.append(binary_map) - return torch.cat(binary_maps, dim=1) - else: - return pred > self.binary_thresh - - def boxes_from_bitmap(self, pred, bitmap, dest_width, dest_height): - """ - _bitmap: single map with shape (1, H, W), - whose values are binarized as {0, 1} - """ - # assert _bitmap.size(0) == 1 - # bitmap = _bitmap[0] # The first channel - pred = pred[0] - height, width = bitmap.shape[1], bitmap.shape[2] - boxes = [] - scores = [] - rotated_boxes = [] - polygons = [] - contours_all = [] - for i in range(bitmap.shape[0]): - try: - _, contours, _ = cv2.findContours( - (bitmap[i] * 255).astype(np.uint8), - cv2.RETR_LIST, - cv2.CHAIN_APPROX_NONE, - ) - except BaseException: - contours, _ = cv2.findContours( - (bitmap[i] * 255).astype(np.uint8), - cv2.RETR_LIST, - cv2.CHAIN_APPROX_NONE, - ) - contours_all.extend(contours) - for contour in contours_all: - epsilon = 0.01 * cv2.arcLength(contour, True) - approx = cv2.approxPolyDP(contour, epsilon, True) - polygon = approx.reshape((-1, 2)) - points, sside = self.get_mini_boxes(contour) - if sside < self.min_size: - continue - points = np.array(points) - score = self.box_score_fast(pred, points) - if not self.training and self.box_thresh > score: - continue - if polygon.shape[0] > 2: - polygon = self.unclip(polygon, expand_ratio=self.cfg.MODEL.SEG.EXPAND_RATIO) - if len(polygon) > 1: - continue - else: - continue - # polygon = polygon.reshape(-1, 2) - polygon = polygon.reshape(-1) - box = self.unclip(points, expand_ratio=self.cfg.MODEL.SEG.BOX_EXPAND_RATIO).reshape(-1, 2) - box = np.array(box) - box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width) - box[:, 1] = np.clip( - np.round(box[:, 1] / height * dest_height), 0, dest_height - ) - min_x, min_y = min(box[:, 0]), min(box[:, 1]) - max_x, max_y = max(box[:, 0]), max(box[:, 1]) - horizontal_box = torch.from_numpy(np.array([min_x, min_y, max_x, max_y])) - boxes.append(horizontal_box) - scores.append(score) - rotated_box, _ = self.get_mini_boxes(box.reshape(-1, 1, 2)) - rotated_box = np.array(rotated_box) - rotated_boxes.append(rotated_box) - polygons.append([polygon]) - if len(boxes) == 0: - boxes = [torch.from_numpy(np.array([0, 0, 0, 0]))] - scores = [0.] - - boxes = torch.stack(boxes) - scores = torch.from_numpy(np.array(scores)) - return boxes, scores, rotated_boxes, polygons - - def aug_proposals(self, box): - xmin, ymin, xmax, ymax = box[0], box[1], box[2], box[3] - x_center = int((xmin + xmax) / 2.) - y_center = int((ymin + ymax) / 2.) - width = xmax - xmin - height = ymax - ymin - choice = random.random() - if choice < 0.5: - # shrink or expand - ratio = (random.random() * 3 + 1) / 2. - height = height * ratio - ratio = (random.random() * 3 + 1) / 2. - width = width * ratio - else: - move_x = width * (random.random() * 4 - 2) - move_y = height * (random.random() * 4 - 2) - x_center += move_x - y_center += move_y - xmin = int(x_center - width / 2) - xmax = int(x_center + width / 2) - ymin = int(y_center - height / 2) - ymax = int(y_center + height / 2) - return [xmin, ymin, xmax, ymax] - - def unclip(self, box, expand_ratio=1.5): - poly = Polygon(box) - distance = poly.area * expand_ratio / poly.length - offset = pyclipper.PyclipperOffset() - offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) - expanded = np.array(offset.Execute(distance)) - return expanded - - def get_mini_boxes(self, contour): - bounding_box = cv2.minAreaRect(contour) - points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0]) - - index_1, index_2, index_3, index_4 = 0, 1, 2, 3 - if points[1][1] > points[0][1]: - index_1 = 0 - index_4 = 1 - else: - index_1 = 1 - index_4 = 0 - if points[3][1] > points[2][1]: - index_2 = 2 - index_3 = 3 - else: - index_2 = 3 - index_3 = 2 - - box = [points[index_1], points[index_2], points[index_3], points[index_4]] - return box, min(bounding_box[1]) - - def box_score(self, bitmap, box): - """ - naive version of box score computation, - only for helping principle understand. - """ - mask = np.zeros_like(bitmap, dtype=np.uint8) - cv2.fillPoly(mask, box.reshape(1, 4, 2).astype(np.int32), 1) - return cv2.mean(bitmap, mask)[0] - - def box_score_fast(self, bitmap, _box): - h, w = bitmap.shape[:2] - box = _box.copy() - xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int), 0, w - 1) - xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int), 0, w - 1) - ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int), 0, h - 1) - ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int), 0, h - 1) - - mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8) - box[:, 0] = box[:, 0] - xmin - box[:, 1] = box[:, 1] - ymin - cv2.fillPoly(mask, box.reshape(1, 4, 2).astype(np.int32), 1) - return cv2.mean(bitmap[ymin : ymax + 1, xmin : xmax + 1], mask)[0] - - -def make_seg_postprocessor(config, is_train): - top_n = config.MODEL.SEG.TOP_N_TRAIN - if not is_train: - top_n = config.MODEL.SEG.TOP_N_TEST - - binary_thresh = config.MODEL.SEG.BINARY_THRESH - box_thresh = config.MODEL.SEG.BOX_THRESH - min_size = config.MODEL.SEG.MIN_SIZE - box_selector = SEGPostProcessor( - top_n=top_n, - binary_thresh=binary_thresh, - box_thresh=box_thresh, - min_size=min_size, - cfg = config - ) - return box_selector diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py deleted file mode 100644 index ed3a96c7dec922fcc73a3ab1446ffdf4a756c152..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py +++ /dev/null @@ -1,52 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# model settings -model = dict( - pretrained='torchvision://resnet101', - backbone=dict(depth=101), - bbox_head=dict( - _delete_=True, - type='SABLRetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), - loss_bbox_reg=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0.0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/losses/ghm_loss.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/losses/ghm_loss.py deleted file mode 100644 index 8969a23fd98bb746415f96ac5e4ad9e37ba3af52..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/losses/ghm_loss.py +++ /dev/null @@ -1,172 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..builder import LOSSES - - -def _expand_onehot_labels(labels, label_weights, label_channels): - bin_labels = labels.new_full((labels.size(0), label_channels), 0) - inds = torch.nonzero( - (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() - if inds.numel() > 0: - bin_labels[inds, labels[inds]] = 1 - bin_label_weights = label_weights.view(-1, 1).expand( - label_weights.size(0), label_channels) - return bin_labels, bin_label_weights - - -# TODO: code refactoring to make it consistent with other losses -@LOSSES.register_module() -class GHMC(nn.Module): - """GHM Classification Loss. - - Details of the theorem can be viewed in the paper - `Gradient Harmonized Single-stage Detector - `_. - - Args: - bins (int): Number of the unit regions for distribution calculation. - momentum (float): The parameter for moving average. - use_sigmoid (bool): Can only be true for BCE based loss now. - loss_weight (float): The weight of the total GHM-C loss. - """ - - def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0): - super(GHMC, self).__init__() - self.bins = bins - self.momentum = momentum - edges = torch.arange(bins + 1).float() / bins - self.register_buffer('edges', edges) - self.edges[-1] += 1e-6 - if momentum > 0: - acc_sum = torch.zeros(bins) - self.register_buffer('acc_sum', acc_sum) - self.use_sigmoid = use_sigmoid - if not self.use_sigmoid: - raise NotImplementedError - self.loss_weight = loss_weight - - def forward(self, pred, target, label_weight, *args, **kwargs): - """Calculate the GHM-C loss. - - Args: - pred (float tensor of size [batch_num, class_num]): - The direct prediction of classification fc layer. - target (float tensor of size [batch_num, class_num]): - Binary class target for each sample. - label_weight (float tensor of size [batch_num, class_num]): - the value is 1 if the sample is valid and 0 if ignored. - Returns: - The gradient harmonized loss. - """ - # the target should be binary class label - if pred.dim() != target.dim(): - target, label_weight = _expand_onehot_labels( - target, label_weight, pred.size(-1)) - target, label_weight = target.float(), label_weight.float() - edges = self.edges - mmt = self.momentum - weights = torch.zeros_like(pred) - - # gradient length - g = torch.abs(pred.sigmoid().detach() - target) - - valid = label_weight > 0 - tot = max(valid.float().sum().item(), 1.0) - n = 0 # n valid bins - for i in range(self.bins): - inds = (g >= edges[i]) & (g < edges[i + 1]) & valid - num_in_bin = inds.sum().item() - if num_in_bin > 0: - if mmt > 0: - self.acc_sum[i] = mmt * self.acc_sum[i] \ - + (1 - mmt) * num_in_bin - weights[inds] = tot / self.acc_sum[i] - else: - weights[inds] = tot / num_in_bin - n += 1 - if n > 0: - weights = weights / n - - loss = F.binary_cross_entropy_with_logits( - pred, target, weights, reduction='sum') / tot - return loss * self.loss_weight - - -# TODO: code refactoring to make it consistent with other losses -@LOSSES.register_module() -class GHMR(nn.Module): - """GHM Regression Loss. - - Details of the theorem can be viewed in the paper - `Gradient Harmonized Single-stage Detector - `_. - - Args: - mu (float): The parameter for the Authentic Smooth L1 loss. - bins (int): Number of the unit regions for distribution calculation. - momentum (float): The parameter for moving average. - loss_weight (float): The weight of the total GHM-R loss. - """ - - def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0): - super(GHMR, self).__init__() - self.mu = mu - self.bins = bins - edges = torch.arange(bins + 1).float() / bins - self.register_buffer('edges', edges) - self.edges[-1] = 1e3 - self.momentum = momentum - if momentum > 0: - acc_sum = torch.zeros(bins) - self.register_buffer('acc_sum', acc_sum) - self.loss_weight = loss_weight - - # TODO: support reduction parameter - def forward(self, pred, target, label_weight, avg_factor=None): - """Calculate the GHM-R loss. - - Args: - pred (float tensor of size [batch_num, 4 (* class_num)]): - The prediction of box regression layer. Channel number can be 4 - or 4 * class_num depending on whether it is class-agnostic. - target (float tensor of size [batch_num, 4 (* class_num)]): - The target regression values with the same size of pred. - label_weight (float tensor of size [batch_num, 4 (* class_num)]): - The weight of each sample, 0 if ignored. - Returns: - The gradient harmonized loss. - """ - mu = self.mu - edges = self.edges - mmt = self.momentum - - # ASL1 loss - diff = pred - target - loss = torch.sqrt(diff * diff + mu * mu) - mu - - # gradient length - g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() - weights = torch.zeros_like(g) - - valid = label_weight > 0 - tot = max(label_weight.float().sum().item(), 1.0) - n = 0 # n: valid bins - for i in range(self.bins): - inds = (g >= edges[i]) & (g < edges[i + 1]) & valid - num_in_bin = inds.sum().item() - if num_in_bin > 0: - n += 1 - if mmt > 0: - self.acc_sum[i] = mmt * self.acc_sum[i] \ - + (1 - mmt) * num_in_bin - weights[inds] = tot / self.acc_sum[i] - else: - weights[inds] = tot / num_in_bin - if n > 0: - weights /= n - - loss = loss * weights - loss = loss.sum() / tot - return loss * self.loss_weight diff --git a/spaces/tomofi/NDLOCR/src/text_recognition/create_xmldataset.py b/spaces/tomofi/NDLOCR/src/text_recognition/create_xmldataset.py deleted file mode 100644 index 6cb0e9ab5cd41badd48e98410d7a8edd31e8f7b8..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/text_recognition/create_xmldataset.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2022, National Diet Library, Japan -# -# This software is released under the CC BY 4.0. -# https://creativecommons.org/licenses/by/4.0/ - - -import pathlib -import json -import lmdb - -from xmlparser import XMLRawDataset, ListRawDataset - -import argparse - - -class Env: - def __init__(self, output_path, interval_writeCache=1000): - self.output_path = output_path - self.env = lmdb.open(str(output_path), map_size=1099511627776) - self.cache = dict() - self.n = 0 - self.interval = interval_writeCache - - def finish_line(self): - self.n += 1 - if self.n % 1000 == 0: - self.writeCache() - - def writeCache(self): - with self.env.begin(write=True) as txn: - for k, v in self.cache.items(): - txn.put(k, v) - self.cache = {} - print(f'Written {self.n} lines @ {self.output_path}') - - -def createDataset(input_path, output_path, db_type='xml', dry_run=False): - p = pathlib.Path(output_path) - p.mkdir(parents=True, exist_ok=True) - - if db_type == 'xml': - generator = XMLRawDataset.from_list(input_path, image_type=XMLRawDataset.IMAGE_TYPE_ENCODED) - elif db_type == 'list': - generator = ListRawDataset(input_path, image_type=XMLRawDataset.IMAGE_TYPE_ENCODED) - if dry_run: - return - - # generate database - env = Env(output_path[0]) - env.cache['dbtype'.encode()] = 'xml'.encode() - - for il, (g, line) in enumerate(generator): - env.cache[f'{env.n:09d}-direction'.encode()] = line.get('direction').encode() - env.cache[f'{env.n:09d}-label'.encode()] = line.get('label').encode() - env.cache[f'{env.n:09d}-cattrs'.encode()] = json.dumps(line.get('cattrs')).encode() - env.cache[f'{env.n:09d}-image'.encode()] = g - env.finish_line() - - env.cache['n_line'.encode()] = str(env.n).encode() - env.writeCache() - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--input_path', nargs='+', required=True) - parser.add_argument('--output_path', required=True) - parser.add_argument('--db_type', default='xml', choices=['xml', 'list']) - parser.add_argument('--dry-run', action='store_true') - opt = parser.parse_args() - createDataset(opt.input_path, opt.output_path, opt.db_type, dry_run=opt.dry_run) diff --git a/spaces/tonyassi/video-face-swap/DeepFakeAI/utilities.py b/spaces/tonyassi/video-face-swap/DeepFakeAI/utilities.py deleted file mode 100644 index dd33cf157f684dc1bad324bca4d9326b8e3f82f2..0000000000000000000000000000000000000000 --- a/spaces/tonyassi/video-face-swap/DeepFakeAI/utilities.py +++ /dev/null @@ -1,190 +0,0 @@ -import glob -import mimetypes -import os -import platform -import shutil -import ssl -import subprocess -import tempfile -import urllib -from pathlib import Path -from typing import List, Optional - -import onnxruntime -from tqdm import tqdm - -import DeepFakeAI.globals -from DeepFakeAI import wording - -TEMP_DIRECTORY_PATH = os.path.join(tempfile.gettempdir(), 'DeepFakeAI') -TEMP_OUTPUT_NAME = 'temp.mp4' - -# monkey patch ssl -if platform.system().lower() == 'darwin': - ssl._create_default_https_context = ssl._create_unverified_context - - -def run_ffmpeg(args : List[str]) -> bool: - commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ] - commands.extend(args) - try: - subprocess.check_output(commands, stderr = subprocess.STDOUT) - return True - except subprocess.CalledProcessError: - return False - - -def detect_fps(target_path : str) -> Optional[float]: - commands = [ 'ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=r_frame_rate', '-of', 'default=noprint_wrappers = 1:nokey = 1', target_path ] - output = subprocess.check_output(commands).decode().strip().split('/') - try: - numerator, denominator = map(int, output) - return numerator / denominator - except (ValueError, ZeroDivisionError): - return None - - -def extract_frames(target_path : str, fps : float) -> bool: - temp_directory_path = get_temp_directory_path(target_path) - temp_frame_quality = round(31 - (DeepFakeAI.globals.temp_frame_quality * 0.31)) - trim_frame_start = DeepFakeAI.globals.trim_frame_start - trim_frame_end = DeepFakeAI.globals.trim_frame_end - commands = [ '-hwaccel', 'auto', '-i', target_path, '-q:v', str(temp_frame_quality), '-pix_fmt', 'rgb24', ] - if trim_frame_start is not None and trim_frame_end is not None: - commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ]) - elif trim_frame_start is not None: - commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',fps=' + str(fps) ]) - elif trim_frame_end is not None: - commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ]) - else: - commands.extend([ '-vf', 'fps=' + str(fps) ]) - commands.extend([os.path.join(temp_directory_path, '%04d.' + DeepFakeAI.globals.temp_frame_format)]) - return run_ffmpeg(commands) - - -def create_video(target_path : str, fps : float) -> bool: - temp_output_path = get_temp_output_path(target_path) - temp_directory_path = get_temp_directory_path(target_path) - output_video_quality = round(51 - (DeepFakeAI.globals.output_video_quality * 0.5)) - commands = [ '-hwaccel', 'auto', '-r', str(fps), '-i', os.path.join(temp_directory_path, '%04d.' + DeepFakeAI.globals.temp_frame_format), '-c:v', DeepFakeAI.globals.output_video_encoder ] - if DeepFakeAI.globals.output_video_encoder in [ 'libx264', 'libx265', 'libvpx' ]: - commands.extend([ '-crf', str(output_video_quality) ]) - if DeepFakeAI.globals.output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]: - commands.extend([ '-cq', str(output_video_quality) ]) - commands.extend([ '-pix_fmt', 'yuv420p', '-vf', 'colorspace=bt709:iall=bt601-6-625', '-y', temp_output_path ]) - return run_ffmpeg(commands) - - -def restore_audio(target_path : str, output_path : str) -> None: - fps = detect_fps(target_path) - trim_frame_start = DeepFakeAI.globals.trim_frame_start - trim_frame_end = DeepFakeAI.globals.trim_frame_end - temp_output_path = get_temp_output_path(target_path) - commands = [ '-hwaccel', 'auto', '-i', temp_output_path, '-i', target_path ] - if trim_frame_start is None and trim_frame_end is None: - commands.extend([ '-c:a', 'copy' ]) - else: - if trim_frame_start is not None: - start_time = trim_frame_start / fps - commands.extend([ '-ss', str(start_time) ]) - else: - commands.extend([ '-ss', '0' ]) - if trim_frame_end is not None: - end_time = trim_frame_end / fps - commands.extend([ '-to', str(end_time) ]) - commands.extend([ '-c:a', 'aac' ]) - commands.extend([ '-map', '0:v:0', '-map', '1:a:0', '-y', output_path ]) - done = run_ffmpeg(commands) - if not done: - move_temp(target_path, output_path) - - -def get_temp_frame_paths(target_path : str) -> List[str]: - temp_directory_path = get_temp_directory_path(target_path) - return glob.glob((os.path.join(glob.escape(temp_directory_path), '*.' + DeepFakeAI.globals.temp_frame_format))) - - -def get_temp_directory_path(target_path : str) -> str: - target_name, _ = os.path.splitext(os.path.basename(target_path)) - return os.path.join(TEMP_DIRECTORY_PATH, target_name) - - -def get_temp_output_path(target_path : str) -> str: - temp_directory_path = get_temp_directory_path(target_path) - return os.path.join(temp_directory_path, TEMP_OUTPUT_NAME) - - -def normalize_output_path(source_path : str, target_path : str, output_path : str) -> Optional[str]: - if source_path and target_path and output_path: - source_name, _ = os.path.splitext(os.path.basename(source_path)) - target_name, target_extension = os.path.splitext(os.path.basename(target_path)) - if os.path.isdir(output_path): - return os.path.join(output_path, source_name + '-' + target_name + target_extension) - return output_path - - -def create_temp(target_path : str) -> None: - temp_directory_path = get_temp_directory_path(target_path) - Path(temp_directory_path).mkdir(parents = True, exist_ok = True) - - -def move_temp(target_path : str, output_path : str) -> None: - temp_output_path = get_temp_output_path(target_path) - if os.path.isfile(temp_output_path): - if os.path.isfile(output_path): - os.remove(output_path) - shutil.move(temp_output_path, output_path) - - -def clear_temp(target_path : str) -> None: - temp_directory_path = get_temp_directory_path(target_path) - parent_directory_path = os.path.dirname(temp_directory_path) - if not DeepFakeAI.globals.keep_temp and os.path.isdir(temp_directory_path): - shutil.rmtree(temp_directory_path) - if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path): - os.rmdir(parent_directory_path) - - -def is_image(image_path : str) -> bool: - if image_path and os.path.isfile(image_path): - mimetype, _ = mimetypes.guess_type(image_path) - return bool(mimetype and mimetype.startswith('image/')) - return False - - -def is_video(video_path : str) -> bool: - if video_path and os.path.isfile(video_path): - mimetype, _ = mimetypes.guess_type(video_path) - return bool(mimetype and mimetype.startswith('video/')) - return False - - -def conditional_download(download_directory_path : str, urls : List[str]) -> None: - if not os.path.exists(download_directory_path): - os.makedirs(download_directory_path) - for url in urls: - download_file_path = os.path.join(download_directory_path, os.path.basename(url)) - if not os.path.exists(download_file_path): - request = urllib.request.urlopen(url) # type: ignore[attr-defined] - total = int(request.headers.get('Content-Length', 0)) - with tqdm(total = total, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024) as progress: - urllib.request.urlretrieve(url, download_file_path, reporthook = lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined] - - -def resolve_relative_path(path : str) -> str: - return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) - - -def list_module_names(path : str) -> Optional[List[str]]: - if os.path.exists(path): - files = os.listdir(path) - return [Path(file).stem for file in files if not Path(file).stem.startswith('__')] - return None - - -def encode_execution_providers(execution_providers : List[str]) -> List[str]: - return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers] - - -def decode_execution_providers(execution_providers : List[str]) -> List[str]: - return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers())) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)] diff --git a/spaces/trysem/image-matting-app/ppmatting/models/gca.py b/spaces/trysem/image-matting-app/ppmatting/models/gca.py deleted file mode 100644 index 369a913570682f85ea696beaf3b78b7c2ec88141..0000000000000000000000000000000000000000 --- a/spaces/trysem/image-matting-app/ppmatting/models/gca.py +++ /dev/null @@ -1,305 +0,0 @@ -# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# The gca code was heavily based on https://github.com/Yaoyi-Li/GCA-Matting -# and https://github.com/open-mmlab/mmediting - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -from paddleseg.models import layers -from paddleseg import utils -from paddleseg.cvlibs import manager, param_init - -from ppmatting.models.layers import GuidedCxtAtten - - -@manager.MODELS.add_component -class GCABaseline(nn.Layer): - def __init__(self, backbone, pretrained=None): - super().__init__() - self.encoder = backbone - self.decoder = ResShortCut_D_Dec([2, 3, 3, 2]) - - def forward(self, inputs): - - x = paddle.concat([inputs['img'], inputs['trimap'] / 255], axis=1) - embedding, mid_fea = self.encoder(x) - alpha_pred = self.decoder(embedding, mid_fea) - - if self.training: - logit_dict = {'alpha_pred': alpha_pred, } - loss_dict = {} - alpha_gt = inputs['alpha'] - loss_dict["alpha"] = F.l1_loss(alpha_pred, alpha_gt) - loss_dict["all"] = loss_dict["alpha"] - return logit_dict, loss_dict - - return alpha_pred - - -@manager.MODELS.add_component -class GCA(GCABaseline): - def __init__(self, backbone, pretrained=None): - super().__init__(backbone, pretrained) - self.decoder = ResGuidedCxtAtten_Dec([2, 3, 3, 2]) - - -def conv5x5(in_planes, out_planes, stride=1, groups=1, dilation=1): - """5x5 convolution with padding""" - return nn.Conv2D( - in_planes, - out_planes, - kernel_size=5, - stride=stride, - padding=2, - groups=groups, - bias_attr=False, - dilation=dilation) - - -def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): - """3x3 convolution with padding""" - return nn.Conv2D( - in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=dilation, - groups=groups, - bias_attr=False, - dilation=dilation) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2D( - in_planes, out_planes, kernel_size=1, stride=stride, bias_attr=False) - - -class BasicBlock(nn.Layer): - expansion = 1 - - def __init__(self, - inplanes, - planes, - stride=1, - upsample=None, - norm_layer=None, - large_kernel=False): - super().__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm - self.stride = stride - conv = conv5x5 if large_kernel else conv3x3 - # Both self.conv1 and self.downsample layers downsample the input when stride != 1 - if self.stride > 1: - self.conv1 = nn.utils.spectral_norm( - nn.Conv2DTranspose( - inplanes, - inplanes, - kernel_size=4, - stride=2, - padding=1, - bias_attr=False)) - else: - self.conv1 = nn.utils.spectral_norm(conv(inplanes, inplanes)) - self.bn1 = norm_layer(inplanes) - self.activation = nn.LeakyReLU(0.2) - self.conv2 = nn.utils.spectral_norm(conv(inplanes, planes)) - self.bn2 = norm_layer(planes) - self.upsample = upsample - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.activation(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.upsample is not None: - identity = self.upsample(x) - - out += identity - out = self.activation(out) - - return out - - -class ResNet_D_Dec(nn.Layer): - def __init__(self, - layers=[3, 4, 4, 2], - norm_layer=None, - large_kernel=False, - late_downsample=False): - super().__init__() - - if norm_layer is None: - norm_layer = nn.BatchNorm - self._norm_layer = norm_layer - self.large_kernel = large_kernel - self.kernel_size = 5 if self.large_kernel else 3 - - self.inplanes = 512 if layers[0] > 0 else 256 - self.late_downsample = late_downsample - self.midplanes = 64 if late_downsample else 32 - - self.conv1 = nn.utils.spectral_norm( - nn.Conv2DTranspose( - self.midplanes, - 32, - kernel_size=4, - stride=2, - padding=1, - bias_attr=False)) - self.bn1 = norm_layer(32) - self.leaky_relu = nn.LeakyReLU(0.2) - self.conv2 = nn.Conv2D( - 32, - 1, - kernel_size=self.kernel_size, - stride=1, - padding=self.kernel_size // 2) - self.upsample = nn.UpsamplingNearest2D(scale_factor=2) - self.tanh = nn.Tanh() - self.layer1 = self._make_layer(BasicBlock, 256, layers[0], stride=2) - self.layer2 = self._make_layer(BasicBlock, 128, layers[1], stride=2) - self.layer3 = self._make_layer(BasicBlock, 64, layers[2], stride=2) - self.layer4 = self._make_layer( - BasicBlock, self.midplanes, layers[3], stride=2) - - self.init_weight() - - def _make_layer(self, block, planes, blocks, stride=1): - if blocks == 0: - return nn.Sequential(nn.Identity()) - norm_layer = self._norm_layer - upsample = None - if stride != 1: - upsample = nn.Sequential( - nn.UpsamplingNearest2D(scale_factor=2), - nn.utils.spectral_norm( - conv1x1(self.inplanes, planes * block.expansion)), - norm_layer(planes * block.expansion), ) - elif self.inplanes != planes * block.expansion: - upsample = nn.Sequential( - nn.utils.spectral_norm( - conv1x1(self.inplanes, planes * block.expansion)), - norm_layer(planes * block.expansion), ) - - layers = [ - block(self.inplanes, planes, stride, upsample, norm_layer, - self.large_kernel) - ] - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append( - block( - self.inplanes, - planes, - norm_layer=norm_layer, - large_kernel=self.large_kernel)) - - return nn.Sequential(*layers) - - def forward(self, x, mid_fea): - x = self.layer1(x) # N x 256 x 32 x 32 - print(x.shape) - x = self.layer2(x) # N x 128 x 64 x 64 - print(x.shape) - x = self.layer3(x) # N x 64 x 128 x 128 - print(x.shape) - x = self.layer4(x) # N x 32 x 256 x 256 - print(x.shape) - x = self.conv1(x) - x = self.bn1(x) - x = self.leaky_relu(x) - x = self.conv2(x) - - alpha = (self.tanh(x) + 1.0) / 2.0 - - return alpha - - def init_weight(self): - for layer in self.sublayers(): - if isinstance(layer, nn.Conv2D): - - if hasattr(layer, "weight_orig"): - param = layer.weight_orig - else: - param = layer.weight - param_init.xavier_uniform(param) - - elif isinstance(layer, (nn.BatchNorm, nn.SyncBatchNorm)): - param_init.constant_init(layer.weight, value=1.0) - param_init.constant_init(layer.bias, value=0.0) - - elif isinstance(layer, BasicBlock): - param_init.constant_init(layer.bn2.weight, value=0.0) - - -class ResShortCut_D_Dec(ResNet_D_Dec): - def __init__(self, - layers, - norm_layer=None, - large_kernel=False, - late_downsample=False): - super().__init__( - layers, norm_layer, large_kernel, late_downsample=late_downsample) - - def forward(self, x, mid_fea): - fea1, fea2, fea3, fea4, fea5 = mid_fea['shortcut'] - x = self.layer1(x) + fea5 - x = self.layer2(x) + fea4 - x = self.layer3(x) + fea3 - x = self.layer4(x) + fea2 - x = self.conv1(x) - x = self.bn1(x) - x = self.leaky_relu(x) + fea1 - x = self.conv2(x) - - alpha = (self.tanh(x) + 1.0) / 2.0 - - return alpha - - -class ResGuidedCxtAtten_Dec(ResNet_D_Dec): - def __init__(self, - layers, - norm_layer=None, - large_kernel=False, - late_downsample=False): - super().__init__( - layers, norm_layer, large_kernel, late_downsample=late_downsample) - self.gca = GuidedCxtAtten(128, 128) - - def forward(self, x, mid_fea): - fea1, fea2, fea3, fea4, fea5 = mid_fea['shortcut'] - im = mid_fea['image_fea'] - x = self.layer1(x) + fea5 # N x 256 x 32 x 32 - x = self.layer2(x) + fea4 # N x 128 x 64 x 64 - x = self.gca(im, x, mid_fea['unknown']) # contextual attention - x = self.layer3(x) + fea3 # N x 64 x 128 x 128 - x = self.layer4(x) + fea2 # N x 32 x 256 x 256 - x = self.conv1(x) - x = self.bn1(x) - x = self.leaky_relu(x) + fea1 - x = self.conv2(x) - - alpha = (self.tanh(x) + 1.0) / 2.0 - - return alpha diff --git a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/model_utils_torch/more_layers/criss_cross_attention.py b/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/model_utils_torch/more_layers/criss_cross_attention.py deleted file mode 100644 index 2eff81273652d0f6b67615b027aa1992290dd2df..0000000000000000000000000000000000000000 --- a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/model_utils_torch/more_layers/criss_cross_attention.py +++ /dev/null @@ -1,62 +0,0 @@ -''' -code from https://github.com/Serge-weihao/CCNet-Pure-Pytorch/blob/master/networks/CC.py -only mod code style - -十字注意力模块 -''' - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -__all__ = ['CrissCrossAttention'] - - -class CrissCrossAttention(torch.jit.ScriptModule): - ''' - 十字注意力模块 - ''' - - def __init__(self, in_dim): - super().__init__() - self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1) - self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1) - self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) - self.gamma = nn.Parameter(torch.zeros(1), requires_grad=True) - - @torch.jit.script_method - def forward(self, x): - B, _, iH, iW = x.shape - - ninf_diag = torch.full([iH], fill_value=-torch.inf, device=x.device, dtype=x.dtype).diag(0) - - proj_query = self.query_conv(x) - proj_query_H = proj_query.permute(0, 3, 1, 2).reshape(B * iW, -1, iH).permute(0, 2, 1) - proj_query_W = proj_query.permute(0, 2, 1, 3).reshape(B * iH, -1, iW).permute(0, 2, 1) - proj_key = self.key_conv(x) - proj_key_H = proj_key.permute(0, 3, 1, 2).reshape(B * iW, -1, iH) - proj_key_W = proj_key.permute(0, 2, 1, 3).reshape(B * iH, -1, iW) - proj_value = self.value_conv(x) - proj_value_H = proj_value.permute(0, 3, 1, 2).reshape(B * iW, -1, iH) - proj_value_W = proj_value.permute(0, 2, 1, 3).reshape(B * iH, -1, iW) - energy_H = (torch.bmm(proj_query_H, proj_key_H) + ninf_diag[None,].repeat(B * iW, 1, 1)) \ - .reshape(B, iW, iH, iH).permute(0, 2, 1, 3) - energy_W = torch.bmm(proj_query_W, proj_key_W).reshape(B, iH, iW, iW) - concate = F.softmax(torch.cat([energy_H, energy_W], 3), dim=3) - att_H = concate[:, :, :, 0:iH].permute(0, 2, 1, 3).reshape(B * iW, iH, iH) - # print(concate) - # print(att_H) - att_W = concate[:, :, :, iH:iH + iW].reshape(B * iH, iW, iW) - out_H = torch.bmm(proj_value_H, att_H.permute(0, 2, 1)).reshape(B, iW, -1, iH).permute(0, 2, 3, 1) - out_W = torch.bmm(proj_value_W, att_W.permute(0, 2, 1)).reshape(B, iH, -1, iW).permute(0, 2, 1, 3) - # print(out_H.size(),out_W.size()) - return self.gamma * (out_H + out_W) + x - - -if __name__ == '__main__': - model = CrissCrossAttention(64) - # x [B,C,H,W] - x = torch.ones(10, 64, 32, 32) - out = model(x) - print(out.shape) diff --git a/spaces/ulysses115/vits-models/text/symbols.py b/spaces/ulysses115/vits-models/text/symbols.py deleted file mode 100644 index edfbd24247be8c757275ce80b9ec27a0ffa808f3..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/vits-models/text/symbols.py +++ /dev/null @@ -1,39 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -'''# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' -''' - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' - - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") \ No newline at end of file diff --git a/spaces/user238921933/stable-diffusion-webui/modules/sub_quadratic_attention.py b/spaces/user238921933/stable-diffusion-webui/modules/sub_quadratic_attention.py deleted file mode 100644 index 055953236e5e1d1401feca93ca6a1cc342cb8595..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/modules/sub_quadratic_attention.py +++ /dev/null @@ -1,214 +0,0 @@ -# original source: -# https://github.com/AminRezaei0x443/memory-efficient-attention/blob/1bc0d9e6ac5f82ea43a375135c4e1d3896ee1694/memory_efficient_attention/attention_torch.py -# license: -# MIT License (see Memory Efficient Attention under the Licenses section in the web UI interface for the full license) -# credit: -# Amin Rezaei (original author) -# Alex Birch (optimized algorithm for 3D tensors, at the expense of removing bias, masking and callbacks) -# brkirch (modified to use torch.narrow instead of dynamic_slice implementation) -# implementation of: -# Self-attention Does Not Need O(n2) Memory": -# https://arxiv.org/abs/2112.05682v2 - -from functools import partial -import torch -from torch import Tensor -from torch.utils.checkpoint import checkpoint -import math -from typing import Optional, NamedTuple, List - - -def narrow_trunc( - input: Tensor, - dim: int, - start: int, - length: int -) -> Tensor: - return torch.narrow(input, dim, start, length if input.shape[dim] >= start + length else input.shape[dim] - start) - - -class AttnChunk(NamedTuple): - exp_values: Tensor - exp_weights_sum: Tensor - max_score: Tensor - - -class SummarizeChunk: - @staticmethod - def __call__( - query: Tensor, - key: Tensor, - value: Tensor, - ) -> AttnChunk: ... - - -class ComputeQueryChunkAttn: - @staticmethod - def __call__( - query: Tensor, - key: Tensor, - value: Tensor, - ) -> Tensor: ... - - -def _summarize_chunk( - query: Tensor, - key: Tensor, - value: Tensor, - scale: float, -) -> AttnChunk: - attn_weights = torch.baddbmm( - torch.empty(1, 1, 1, device=query.device, dtype=query.dtype), - query, - key.transpose(1,2), - alpha=scale, - beta=0, - ) - max_score, _ = torch.max(attn_weights, -1, keepdim=True) - max_score = max_score.detach() - exp_weights = torch.exp(attn_weights - max_score) - exp_values = torch.bmm(exp_weights, value) if query.device.type == 'mps' else torch.bmm(exp_weights, value.to(exp_weights.dtype)).to(value.dtype) - max_score = max_score.squeeze(-1) - return AttnChunk(exp_values, exp_weights.sum(dim=-1), max_score) - - -def _query_chunk_attention( - query: Tensor, - key: Tensor, - value: Tensor, - summarize_chunk: SummarizeChunk, - kv_chunk_size: int, -) -> Tensor: - batch_x_heads, k_tokens, k_channels_per_head = key.shape - _, _, v_channels_per_head = value.shape - - def chunk_scanner(chunk_idx: int) -> AttnChunk: - key_chunk = narrow_trunc( - key, - 1, - chunk_idx, - kv_chunk_size - ) - value_chunk = narrow_trunc( - value, - 1, - chunk_idx, - kv_chunk_size - ) - return summarize_chunk(query, key_chunk, value_chunk) - - chunks: List[AttnChunk] = [ - chunk_scanner(chunk) for chunk in torch.arange(0, k_tokens, kv_chunk_size) - ] - acc_chunk = AttnChunk(*map(torch.stack, zip(*chunks))) - chunk_values, chunk_weights, chunk_max = acc_chunk - - global_max, _ = torch.max(chunk_max, 0, keepdim=True) - max_diffs = torch.exp(chunk_max - global_max) - chunk_values *= torch.unsqueeze(max_diffs, -1) - chunk_weights *= max_diffs - - all_values = chunk_values.sum(dim=0) - all_weights = torch.unsqueeze(chunk_weights, -1).sum(dim=0) - return all_values / all_weights - - -# TODO: refactor CrossAttention#get_attention_scores to share code with this -def _get_attention_scores_no_kv_chunking( - query: Tensor, - key: Tensor, - value: Tensor, - scale: float, -) -> Tensor: - attn_scores = torch.baddbmm( - torch.empty(1, 1, 1, device=query.device, dtype=query.dtype), - query, - key.transpose(1,2), - alpha=scale, - beta=0, - ) - attn_probs = attn_scores.softmax(dim=-1) - del attn_scores - hidden_states_slice = torch.bmm(attn_probs, value) if query.device.type == 'mps' else torch.bmm(attn_probs, value.to(attn_probs.dtype)).to(value.dtype) - return hidden_states_slice - - -class ScannedChunk(NamedTuple): - chunk_idx: int - attn_chunk: AttnChunk - - -def efficient_dot_product_attention( - query: Tensor, - key: Tensor, - value: Tensor, - query_chunk_size=1024, - kv_chunk_size: Optional[int] = None, - kv_chunk_size_min: Optional[int] = None, - use_checkpoint=True, -): - """Computes efficient dot-product attention given query, key, and value. - This is efficient version of attention presented in - https://arxiv.org/abs/2112.05682v2 which comes with O(sqrt(n)) memory requirements. - Args: - query: queries for calculating attention with shape of - `[batch * num_heads, tokens, channels_per_head]`. - key: keys for calculating attention with shape of - `[batch * num_heads, tokens, channels_per_head]`. - value: values to be used in attention with shape of - `[batch * num_heads, tokens, channels_per_head]`. - query_chunk_size: int: query chunks size - kv_chunk_size: Optional[int]: key/value chunks size. if None: defaults to sqrt(key_tokens) - kv_chunk_size_min: Optional[int]: key/value minimum chunk size. only considered when kv_chunk_size is None. changes `sqrt(key_tokens)` into `max(sqrt(key_tokens), kv_chunk_size_min)`, to ensure our chunk sizes don't get too small (smaller chunks = more chunks = less concurrent work done). - use_checkpoint: bool: whether to use checkpointing (recommended True for training, False for inference) - Returns: - Output of shape `[batch * num_heads, query_tokens, channels_per_head]`. - """ - batch_x_heads, q_tokens, q_channels_per_head = query.shape - _, k_tokens, _ = key.shape - scale = q_channels_per_head ** -0.5 - - kv_chunk_size = min(kv_chunk_size or int(math.sqrt(k_tokens)), k_tokens) - if kv_chunk_size_min is not None: - kv_chunk_size = max(kv_chunk_size, kv_chunk_size_min) - - def get_query_chunk(chunk_idx: int) -> Tensor: - return narrow_trunc( - query, - 1, - chunk_idx, - min(query_chunk_size, q_tokens) - ) - - summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale) - summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk - compute_query_chunk_attn: ComputeQueryChunkAttn = partial( - _get_attention_scores_no_kv_chunking, - scale=scale - ) if k_tokens <= kv_chunk_size else ( - # fast-path for when there's just 1 key-value chunk per query chunk (this is just sliced attention btw) - partial( - _query_chunk_attention, - kv_chunk_size=kv_chunk_size, - summarize_chunk=summarize_chunk, - ) - ) - - if q_tokens <= query_chunk_size: - # fast-path for when there's just 1 query chunk - return compute_query_chunk_attn( - query=query, - key=key, - value=value, - ) - - # TODO: maybe we should use torch.empty_like(query) to allocate storage in-advance, - # and pass slices to be mutated, instead of torch.cat()ing the returned slices - res = torch.cat([ - compute_query_chunk_attn( - query=get_query_chunk(i * query_chunk_size), - key=key, - value=value, - ) for i in range(math.ceil(q_tokens / query_chunk_size)) - ], dim=1) - return res diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/usage/engine.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/usage/engine.md deleted file mode 100644 index 8f6444390c5dbd25e576d3603d5544c1e8217a4f..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/usage/engine.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -comments: true -description: Learn how to train and customize your models fast with the Ultralytics YOLO 'DetectionTrainer' and 'CustomTrainer'. Read more here! -keywords: Ultralytics, YOLO, DetectionTrainer, BaseTrainer, engine components, trainers, customizing, callbacks, validators, predictors ---- - -Both the Ultralytics YOLO command-line and python interfaces are simply a high-level abstraction on the base engine -executors. Let's take a look at the Trainer engine. - -## BaseTrainer - -BaseTrainer contains the generic boilerplate training routine. It can be customized for any task based over overriding -the required functions or operations as long the as correct formats are followed. For example, you can support your own -custom model and dataloader by just overriding these functions: - -* `get_model(cfg, weights)` - The function that builds the model to be trained -* `get_dataloder()` - The function that builds the dataloader - More details and source code can be found in [`BaseTrainer` Reference](../reference/yolo/engine/trainer.md) - -## DetectionTrainer - -Here's how you can use the YOLOv8 `DetectionTrainer` and customize it. - -```python -from ultralytics.yolo.v8.detect import DetectionTrainer - -trainer = DetectionTrainer(overrides={...}) -trainer.train() -trained_model = trainer.best # get best model -``` - -### Customizing the DetectionTrainer - -Let's customize the trainer **to train a custom detection model** that is not supported directly. You can do this by -simply overloading the existing the `get_model` functionality: - -```python -from ultralytics.yolo.v8.detect import DetectionTrainer - - -class CustomTrainer(DetectionTrainer): - def get_model(self, cfg, weights): - ... - - -trainer = CustomTrainer(overrides={...}) -trainer.train() -``` - -You now realize that you need to customize the trainer further to: - -* Customize the `loss function`. -* Add `callback` that uploads model to your Google Drive after every 10 `epochs` - Here's how you can do it: - -```python -from ultralytics.yolo.v8.detect import DetectionTrainer -from ultralytics.nn.tasks import DetectionModel - - -class MyCustomModel(DetectionModel): - def init_criterion(self): - ... - - -class CustomTrainer(DetectionTrainer): - def get_model(self, cfg, weights): - return MyCustomModel(...) - - -# callback to upload model weights -def log_model(trainer): - last_weight_path = trainer.last - ... - - -trainer = CustomTrainer(overrides={...}) -trainer.add_callback("on_train_epoch_end", log_model) # Adds to existing callback -trainer.train() -``` - -To know more about Callback triggering events and entry point, checkout our [Callbacks Guide](callbacks.md) - -## Other engine components - -There are other components that can be customized similarly like `Validators` and `Predictors` -See Reference section for more information on these. \ No newline at end of file diff --git a/spaces/vcasadei/banana-defect-detection/README.md b/spaces/vcasadei/banana-defect-detection/README.md deleted file mode 100644 index 3d94de36cc14220e83f708ebff2827a78ee55d89..0000000000000000000000000000000000000000 --- a/spaces/vcasadei/banana-defect-detection/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stepps -emoji: 🏃 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/victorbahlangene/NLP-News-Scraping-Summarization-Sentiment-App/app.py b/spaces/victorbahlangene/NLP-News-Scraping-Summarization-Sentiment-App/app.py deleted file mode 100644 index 756f62ce1b13fd092ed4c662b9de42d9a70c59a3..0000000000000000000000000000000000000000 --- a/spaces/victorbahlangene/NLP-News-Scraping-Summarization-Sentiment-App/app.py +++ /dev/null @@ -1,222 +0,0 @@ -import streamlit as st -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM -from transformers import pipeline - -from bs4 import BeautifulSoup -import requests - -# app layout # -st.set_page_config( - page_title="Medium News App" -) - -## FUNCTIONS ## - -# search medium urls function # -@st.experimental_singleton -def search_meduim_urls(monitored_tickers): - search_url = "https://medium.com/tag/{}".format(monitored_tickers) - r = requests.get(search_url) - soup = BeautifulSoup(r.text, 'html.parser') - # location where link to news is found(a tag with attribute "aria-label"= "Post Preview Title") # - atags = soup.find_all('a', attrs={"aria-label": "Post Preview Title"}) - hrefs = ['https://medium.com'+link['href'] for link in atags] - return hrefs - -# funtion to search and scrape cleaned urls # -@st.experimental_singleton -def scrape_and_process(URLs): - """ - - function grabs all p-tags. - - create list of whats in every p tag. - - plit list into individual words, max 350. - - make 1 corpus of data. - - the length of each article tokens will be 350, - because the max of the model i am using is 512 and i want the app to be faster. - """ - ARTICLES = [] - for url in URLs: - r = requests.get(url) - soup = BeautifulSoup(r.text, 'html.parser') - paragraphs = soup.find_all('p') - text = [paragraph.text for paragraph in paragraphs] - words = ' '.join(text).split(' ')[:350] - ARTICLE = ' '.join(words) - ARTICLES.append(ARTICLE) - return ARTICLES - -#function to Summarise all Articles# -@st.experimental_singleton -def summarize(articles,_tokenizer,_model): - """ - encode , generate, decode, append to list - """ - summaries = [] - for article in articles: - input_ids = _tokenizer.encode(article, return_tensors='pt',max_length=512, truncation=True) - output = _model.generate(input_ids, max_length=56, num_beams=5, early_stopping=True) - summary = _tokenizer.decode(output[0], skip_special_tokens=True) - summaries.append(summary) - return summaries - -# function to load the transformer # -@st.experimental_singleton -def load_summary_transformer(): - # load transformers # - model_name = "facebook/bart-large-cnn" - tokenizer_summary = AutoTokenizer.from_pretrained(model_name) - model_summary = AutoModelForSeq2SeqLM.from_pretrained(model_name) - - return tokenizer_summary, model_summary - -# function to load sentiment pipeline # -@st.experimental_singleton -def load_sentiment_pipeline(): - sentiment = pipeline('sentiment-analysis') - - return sentiment - -# function to create final output # -def create_output_array(summaries, scores, urls): - output = [] - for ticker in monitored_tickers: - for counter in range(len(summaries[ticker])): - output_this = [ - ticker, - summaries[ticker][counter], - scores[ticker][counter]['label'], - scores[ticker][counter]['score'], - urls[ticker][counter] - ] - output.append(output_this) - return output - -# display summary output # -def cards(title,score,sentiment,article,link): - return f""" -
          -
          -
          {title}
          -
          The article is: {score*100:.2f}% {sentiment}.
          -

          {article}.

          - Link to article -
          -
          -

          - """ - -# function to load bootstrap # -@st.experimental_singleton -def boot(): - return """ - - - """ - -# load bootstrap # -st.markdown(boot(), unsafe_allow_html=True) - -# load_summary_transformer # -tokenizer_summary, model_summary = load_summary_transformer() - -# load sentiment pipeline # -sentiment = load_sentiment_pipeline() - - - -## APP OUTPUT ## -st.markdown("

          Medium News App

          ", - unsafe_allow_html=True) - -# containers # -col1, col2, col3 = st.columns(3) - -# session_state user input initilization # -if 'user_final_input' not in st.session_state: - st.session_state['user_final_input'] = '' - -# SEARCH SECTION # -with st.expander("Make inquiry"): - st.markdown("

          Summary

          ", - unsafe_allow_html=True) - # user input # - monitored_tickers = [] - - # user input options # - option = st.selectbox( - 'Some options to select', - ('chatgpt', 'fastai', 'pytorch', 'tensorflow',('manual entry')) - ) - # allows for manual search entry # - if option=="manual entry": - user_select = st.text_input( - "Please enter a Data Science topic of interest: ") - monitored_tickers.append(user_select) - st.write(user_select) - st.session_state['user_final_input'] = user_select - else: - monitored_tickers.append(option) - st.write(option) - st.session_state['user_final_input'] = option - - - - # how many summaries to inference # - summary_count = st.slider('How many summaries do you want?', 1, 5, 1) - st.write("I'm selecting ", summary_count, 'summaries.') - if summary_count == 3: - st.markdown(f""" - - """ - , unsafe_allow_html=True) - elif summary_count == 4 or summary_count == 5: - st.markdown(f""" - - """ - , unsafe_allow_html=True) - - - - with st.form(key="user_input"): - summary = st.form_submit_button("Summary") - if summary: - # test function # - search_meduim_urls(monitored_tickers[0]) - # make a dictionary {framework: link_to_article about the framework} # - cleaned_urls= {framework:search_meduim_urls(framework) for framework in monitored_tickers} - - articles = {ticker:scrape_and_process(cleaned_urls[ticker]) for ticker in monitored_tickers} - - articles[st.session_state['user_final_input']] = articles[st.session_state['user_final_input']][:summary_count] - #articles[option] = articles[option][:summary_count] - - #articles - - # summary # - # 1m 25s to sumarize # - summaries = {ticker:summarize(articles[ticker],tokenizer_summary, model_summary) for ticker in monitored_tickers} - - - scores = {ticker:sentiment(summaries[ticker]) for ticker in monitored_tickers} - #scores - - final_output = create_output_array(summaries, scores, cleaned_urls) - #final_output - - #final_output[0] - for i in range(len(final_output)): - st.markdown( - cards( - final_output[i][0], - final_output[i][3], - final_output[i][2], - final_output[i][1], - final_output[i][4] - ), - unsafe_allow_html=True) - - diff --git a/spaces/vinay123/panoptic-segment-anything/README.md b/spaces/vinay123/panoptic-segment-anything/README.md deleted file mode 100644 index a332324e6ffd5313db46a6d14cc0223dfac47b8b..0000000000000000000000000000000000000000 --- a/spaces/vinay123/panoptic-segment-anything/README.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Panoptic Segment Anything -emoji: 🖼️🪄 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: apache-2.0 -models: -- ShilongLiu/GroundingDINO -- CIDAS/clipseg-rd64-refined -tags: -- segmentation -- zero-shot -- sam -duplicated_from: segments/panoptic-segment-anything ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/vincentmin/TalkToMe/README.md b/spaces/vincentmin/TalkToMe/README.md deleted file mode 100644 index 08a9503dc7d338da24e175a2ef7982df62ac5d69..0000000000000000000000000000000000000000 --- a/spaces/vincentmin/TalkToMe/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: TalkToMe -emoji: 💬 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.33.1 -app_file: app.py -pinned: true -license: apache-2.0 -duplicated_from: HuggingFaceH4/falcon-chat ---- diff --git a/spaces/vpsrikanth/FaceSimilarity/app/Hackathon_setup/__init__.py b/spaces/vpsrikanth/FaceSimilarity/app/Hackathon_setup/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/vumichien/Generate_human_motion/VQ-Trans/README.md b/spaces/vumichien/Generate_human_motion/VQ-Trans/README.md deleted file mode 100644 index 547a1d4b52a5c76f0f86c641557f99d0688c0ffd..0000000000000000000000000000000000000000 --- a/spaces/vumichien/Generate_human_motion/VQ-Trans/README.md +++ /dev/null @@ -1,400 +0,0 @@ -# Motion VQ-Trans -Pytorch implementation of paper "Generating Human Motion from Textual Descriptions with High Quality Discrete Representation" - - -[[Notebook Demo]](https://colab.research.google.com/drive/1tAHlmcpKcjg_zZrqKku7AfpqdVAIFrF8?usp=sharing) - - -![teaser](img/Teaser.png) - -If our project is helpful for your research, please consider citing : (todo) -``` -@inproceedings{shen2020ransac, - title={RANSAC-Flow: generic two-stage image alignment}, - author={Shen, Xi and Darmon, Fran{\c{c}}ois and Efros, Alexei A and Aubry, Mathieu}, - booktitle={16th European Conference on Computer Vision} - year={2020} - } -``` - - -## Table of Content -* [1. Visual Results](#1-visual-results) -* [2. Installation](#2-installation) -* [3. Quick Start](#3-quick-start) -* [4. Train](#4-train) -* [5. Evaluation](#5-evaluation) -* [6. Motion Render](#6-motion-render) -* [7. Acknowledgement](#7-acknowledgement) -* [8. ChangLog](#8-changlog) - - - - -## 1. Visual Results (More results can be found in our project page (todo)) - -![visualization](img/ALLvis.png) - - -## 2. Installation - -### 2.1. Environment - - - -Our model can be learnt in a **single GPU V100-32G** - -```bash -conda env create -f environment.yml -conda activate VQTrans -``` - -The code was tested on Python 3.8 and PyTorch 1.8.1. - - -### 2.2. Dependencies - -```bash -bash dataset/prepare/download_glove.sh -``` - - -### 2.3. Datasets - - -We are using two 3D human motion-language dataset: HumanML3D and KIT-ML. For both datasets, you could find the details as well as download link [[here]](https://github.com/EricGuo5513/HumanML3D). - -Take HumanML3D for an example, the file directory should look like this: -``` -./dataset/HumanML3D/ -├── new_joint_vecs/ -├── texts/ -├── Mean.npy # same as in [HumanML3D](https://github.com/EricGuo5513/HumanML3D) -├── Std.npy # same as in [HumanML3D](https://github.com/EricGuo5513/HumanML3D) -├── train.txt -├── val.txt -├── test.txt -├── train_val.txt -└──all.txt -``` - - -### 2.4. Motion & text feature extractors: - -We use the same extractors provided by [t2m](https://github.com/EricGuo5513/text-to-motion) to evaluate our generated motions. Please download the extractors. - -```bash -bash dataset/prepare/download_extractor.sh -``` - -### 2.5. Pre-trained models - -The pretrained model files will be stored in the 'pretrained' folder: -```bash -bash dataset/prepare/download_model.sh -``` - - - -### 2.6. Render motion (optional) - -If you want to render the generated motion, you need to install: - -```bash -sudo sh dataset/prepare/download_smpl.sh -conda install -c menpo osmesa -conda install h5py -conda install -c conda-forge shapely pyrender trimesh mapbox_earcut -``` - - - -## 3. Quick Start - -A quick start guide of how to use our code is available in [demo.ipynb](https://colab.research.google.com/drive/1tAHlmcpKcjg_zZrqKku7AfpqdVAIFrF8?usp=sharing) - -

          -demo -

          - - -## 4. Train - -Note that, for kit dataset, just need to set '--dataname kit'. - -### 4.1. VQ-VAE - -The results are saved in the folder output_vqfinal. - -
          - -VQ training - - -```bash -python3 train_vq.py \ ---batch-size 256 \ ---lr 2e-4 \ ---total-iter 300000 \ ---lr-scheduler 200000 \ ---nb-code 512 \ ---down-t 2 \ ---depth 3 \ ---dilation-growth-rate 3 \ ---out-dir output \ ---dataname t2m \ ---vq-act relu \ ---quantizer ema_reset \ ---loss-vel 0.5 \ ---recons-loss l1_smooth \ ---exp-name VQVAE -``` - -
          - -### 4.2. Motion-Transformer - -The results are saved in the folder output_transformer. - -
          - -MoTrans training - - -```bash -python3 train_t2m_trans.py \ ---exp-name VQTransformer \ ---batch-size 128 \ ---num-layers 9 \ ---embed-dim-gpt 1024 \ ---nb-code 512 \ ---n-head-gpt 16 \ ---block-size 51 \ ---ff-rate 4 \ ---drop-out-rate 0.1 \ ---resume-pth output/VQVAE/net_last.pth \ ---vq-name VQVAE \ ---out-dir output \ ---total-iter 300000 \ ---lr-scheduler 150000 \ ---lr 0.0001 \ ---dataname t2m \ ---down-t 2 \ ---depth 3 \ ---quantizer ema_reset \ ---eval-iter 10000 \ ---pkeep 0.5 \ ---dilation-growth-rate 3 \ ---vq-act relu -``` - -
          - -## 5. Evaluation - -### 5.1. VQ-VAE -
          - -VQ eval - - -```bash -python3 VQ_eval.py \ ---batch-size 256 \ ---lr 2e-4 \ ---total-iter 300000 \ ---lr-scheduler 200000 \ ---nb-code 512 \ ---down-t 2 \ ---depth 3 \ ---dilation-growth-rate 3 \ ---out-dir output \ ---dataname t2m \ ---vq-act relu \ ---quantizer ema_reset \ ---loss-vel 0.5 \ ---recons-loss l1_smooth \ ---exp-name TEST_VQVAE \ ---resume-pth output/VQVAE/net_last.pth -``` - -
          - -### 5.2. Motion-Transformer - -
          - -MoTrans eval - - -```bash -python3 GPT_eval_multi.py \ ---exp-name TEST_VQTransformer \ ---batch-size 128 \ ---num-layers 9 \ ---embed-dim-gpt 1024 \ ---nb-code 512 \ ---n-head-gpt 16 \ ---block-size 51 \ ---ff-rate 4 \ ---drop-out-rate 0.1 \ ---resume-pth output/VQVAE/net_last.pth \ ---vq-name VQVAE \ ---out-dir output \ ---total-iter 300000 \ ---lr-scheduler 150000 \ ---lr 0.0001 \ ---dataname t2m \ ---down-t 2 \ ---depth 3 \ ---quantizer ema_reset \ ---eval-iter 10000 \ ---pkeep 0.5 \ ---dilation-growth-rate 3 \ ---vq-act relu \ ---resume-gpt output/VQTransformer/net_best_fid.pth -``` - -
          - - -## 6. Motion Render - -
          - -Motion Render - - -You should input the npy folder address and the motion names. Here is an example: - -```bash -python3 render_final.py --filedir output/TEST_VQTransformer/ --motion-list 000019 005485 -``` - -
          - -### 7. Acknowledgement - -We appreciate helps from : - -* Public code like [text-to-motion](https://github.com/EricGuo5513/text-to-motion), [TM2T](https://github.com/EricGuo5513/TM2T) etc. - -### 8. ChangLog - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/utils/registry.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/utils/registry.py deleted file mode 100644 index fa9df39bc9f3d8d568361e7250ab35468f2b74e0..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/utils/registry.py +++ /dev/null @@ -1,315 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import inspect -import warnings -from functools import partial - -from .misc import is_seq_of - - -def build_from_cfg(cfg, registry, default_args=None): - """Build a module from config dict. - - Args: - cfg (dict): Config dict. It should at least contain the key "type". - registry (:obj:`Registry`): The registry to search the type from. - default_args (dict, optional): Default initialization arguments. - - Returns: - object: The constructed object. - """ - if not isinstance(cfg, dict): - raise TypeError(f'cfg must be a dict, but got {type(cfg)}') - if 'type' not in cfg: - if default_args is None or 'type' not in default_args: - raise KeyError( - '`cfg` or `default_args` must contain the key "type", ' - f'but got {cfg}\n{default_args}') - if not isinstance(registry, Registry): - raise TypeError('registry must be an mmcv.Registry object, ' - f'but got {type(registry)}') - if not (isinstance(default_args, dict) or default_args is None): - raise TypeError('default_args must be a dict or None, ' - f'but got {type(default_args)}') - - args = cfg.copy() - - if default_args is not None: - for name, value in default_args.items(): - args.setdefault(name, value) - - obj_type = args.pop('type') - if isinstance(obj_type, str): - obj_cls = registry.get(obj_type) - if obj_cls is None: - raise KeyError( - f'{obj_type} is not in the {registry.name} registry') - elif inspect.isclass(obj_type): - obj_cls = obj_type - else: - raise TypeError( - f'type must be a str or valid type, but got {type(obj_type)}') - try: - return obj_cls(**args) - except Exception as e: - # Normal TypeError does not print class name. - raise type(e)(f'{obj_cls.__name__}: {e}') - - -class Registry: - """A registry to map strings to classes. - - Registered object could be built from registry. - Example: - >>> MODELS = Registry('models') - >>> @MODELS.register_module() - >>> class ResNet: - >>> pass - >>> resnet = MODELS.build(dict(type='ResNet')) - - Please refer to - https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for - advanced usage. - - Args: - name (str): Registry name. - build_func(func, optional): Build function to construct instance from - Registry, func:`build_from_cfg` is used if neither ``parent`` or - ``build_func`` is specified. If ``parent`` is specified and - ``build_func`` is not given, ``build_func`` will be inherited - from ``parent``. Default: None. - parent (Registry, optional): Parent registry. The class registered in - children registry could be built from parent. Default: None. - scope (str, optional): The scope of registry. It is the key to search - for children registry. If not specified, scope will be the name of - the package where class is defined, e.g. mmdet, mmcls, mmseg. - Default: None. - """ - - def __init__(self, name, build_func=None, parent=None, scope=None): - self._name = name - self._module_dict = dict() - self._children = dict() - self._scope = self.infer_scope() if scope is None else scope - - # self.build_func will be set with the following priority: - # 1. build_func - # 2. parent.build_func - # 3. build_from_cfg - if build_func is None: - if parent is not None: - self.build_func = parent.build_func - else: - self.build_func = build_from_cfg - else: - self.build_func = build_func - if parent is not None: - assert isinstance(parent, Registry) - parent._add_children(self) - self.parent = parent - else: - self.parent = None - - def __len__(self): - return len(self._module_dict) - - def __contains__(self, key): - return self.get(key) is not None - - def __repr__(self): - format_str = self.__class__.__name__ + \ - f'(name={self._name}, ' \ - f'items={self._module_dict})' - return format_str - - @staticmethod - def infer_scope(): - """Infer the scope of registry. - - The name of the package where registry is defined will be returned. - - Example: - # in mmdet/models/backbone/resnet.py - >>> MODELS = Registry('models') - >>> @MODELS.register_module() - >>> class ResNet: - >>> pass - The scope of ``ResNet`` will be ``mmdet``. - - - Returns: - scope (str): The inferred scope name. - """ - # inspect.stack() trace where this function is called, the index-2 - # indicates the frame where `infer_scope()` is called - filename = inspect.getmodule(inspect.stack()[2][0]).__name__ - split_filename = filename.split('.') - return split_filename[0] - - @staticmethod - def split_scope_key(key): - """Split scope and key. - - The first scope will be split from key. - - Examples: - >>> Registry.split_scope_key('mmdet.ResNet') - 'mmdet', 'ResNet' - >>> Registry.split_scope_key('ResNet') - None, 'ResNet' - - Return: - scope (str, None): The first scope. - key (str): The remaining key. - """ - split_index = key.find('.') - if split_index != -1: - return key[:split_index], key[split_index + 1:] - else: - return None, key - - @property - def name(self): - return self._name - - @property - def scope(self): - return self._scope - - @property - def module_dict(self): - return self._module_dict - - @property - def children(self): - return self._children - - def get(self, key): - """Get the registry record. - - Args: - key (str): The class name in string format. - - Returns: - class: The corresponding class. - """ - scope, real_key = self.split_scope_key(key) - if scope is None or scope == self._scope: - # get from self - if real_key in self._module_dict: - return self._module_dict[real_key] - else: - # get from self._children - if scope in self._children: - return self._children[scope].get(real_key) - else: - # goto root - parent = self.parent - while parent.parent is not None: - parent = parent.parent - return parent.get(key) - - def build(self, *args, **kwargs): - return self.build_func(*args, **kwargs, registry=self) - - def _add_children(self, registry): - """Add children for a registry. - - The ``registry`` will be added as children based on its scope. - The parent registry could build objects from children registry. - - Example: - >>> models = Registry('models') - >>> mmdet_models = Registry('models', parent=models) - >>> @mmdet_models.register_module() - >>> class ResNet: - >>> pass - >>> resnet = models.build(dict(type='mmdet.ResNet')) - """ - - assert isinstance(registry, Registry) - assert registry.scope is not None - assert registry.scope not in self.children, \ - f'scope {registry.scope} exists in {self.name} registry' - self.children[registry.scope] = registry - - def _register_module(self, module_class, module_name=None, force=False): - if not inspect.isclass(module_class): - raise TypeError('module must be a class, ' - f'but got {type(module_class)}') - - if module_name is None: - module_name = module_class.__name__ - if isinstance(module_name, str): - module_name = [module_name] - for name in module_name: - if not force and name in self._module_dict: - raise KeyError(f'{name} is already registered ' - f'in {self.name}') - self._module_dict[name] = module_class - - def deprecated_register_module(self, cls=None, force=False): - warnings.warn( - 'The old API of register_module(module, force=False) ' - 'is deprecated and will be removed, please use the new API ' - 'register_module(name=None, force=False, module=None) instead.') - if cls is None: - return partial(self.deprecated_register_module, force=force) - self._register_module(cls, force=force) - return cls - - def register_module(self, name=None, force=False, module=None): - """Register a module. - - A record will be added to `self._module_dict`, whose key is the class - name or the specified name, and value is the class itself. - It can be used as a decorator or a normal function. - - Example: - >>> backbones = Registry('backbone') - >>> @backbones.register_module() - >>> class ResNet: - >>> pass - - >>> backbones = Registry('backbone') - >>> @backbones.register_module(name='mnet') - >>> class MobileNet: - >>> pass - - >>> backbones = Registry('backbone') - >>> class ResNet: - >>> pass - >>> backbones.register_module(ResNet) - - Args: - name (str | None): The module name to be registered. If not - specified, the class name will be used. - force (bool, optional): Whether to override an existing class with - the same name. Default: False. - module (type): Module class to be registered. - """ - if not isinstance(force, bool): - raise TypeError(f'force must be a boolean, but got {type(force)}') - # NOTE: This is a walkaround to be compatible with the old api, - # while it may introduce unexpected bugs. - if isinstance(name, type): - return self.deprecated_register_module(name, force=force) - - # raise the error ahead of time - if not (name is None or isinstance(name, str) or is_seq_of(name, str)): - raise TypeError( - 'name must be either of None, an instance of str or a sequence' - f' of str, but got {type(name)}') - - # use it as a normal method: x.register_module(module=SomeClass) - if module is not None: - self._register_module( - module_class=module, module_name=name, force=force) - return module - - # use it as a decorator: @x.register_module() - def _register(cls): - self._register_module( - module_class=cls, module_name=name, force=force) - return cls - - return _register diff --git a/spaces/weide/ChuanhuChatGPT2/assets/custom.js b/spaces/weide/ChuanhuChatGPT2/assets/custom.js deleted file mode 100644 index 476d1144c60bf4a2074caa97369bba91a4ece081..0000000000000000000000000000000000000000 --- a/spaces/weide/ChuanhuChatGPT2/assets/custom.js +++ /dev/null @@ -1,70 +0,0 @@ -// custom javascript here -const MAX_HISTORY_LENGTH = 32; - -var key_down_history = []; -var currentIndex = -1; -var user_input_ta; - -var ga = document.getElementsByTagName("gradio-app"); -var targetNode = ga[0]; -var observer = new MutationObserver(function(mutations) { - for (var i = 0; i < mutations.length; i++) { - if (mutations[i].addedNodes.length) { - var user_input_tb = document.getElementById('user_input_tb'); - if (user_input_tb) { - // 监听到user_input_tb被添加到DOM树中 - // 这里可以编写元素加载完成后需要执行的代码 - user_input_ta = user_input_tb.querySelector("textarea"); - if (user_input_ta){ - observer.disconnect(); // 停止监听 - // 在 textarea 上监听 keydown 事件 - user_input_ta.addEventListener("keydown", function (event) { - var value = user_input_ta.value.trim(); - // 判断按下的是否为方向键 - if (event.code === 'ArrowUp' || event.code === 'ArrowDown') { - // 如果按下的是方向键,且输入框中有内容,且历史记录中没有该内容,则不执行操作 - if(value && key_down_history.indexOf(value) === -1) - return; - // 对于需要响应的动作,阻止默认行为。 - event.preventDefault(); - var length = key_down_history.length; - if(length === 0) { - currentIndex = -1; // 如果历史记录为空,直接将当前选中的记录重置 - return; - } - if (currentIndex === -1) { - currentIndex = length; - } - if (event.code === 'ArrowUp' && currentIndex > 0) { - currentIndex--; - user_input_ta.value = key_down_history[currentIndex]; - } else if (event.code === 'ArrowDown' && currentIndex < length - 1) { - currentIndex++; - user_input_ta.value = key_down_history[currentIndex]; - } - user_input_ta.selectionStart = user_input_ta.value.length; - user_input_ta.selectionEnd = user_input_ta.value.length; - const input_event = new InputEvent("input", {bubbles: true, cancelable: true}); - user_input_ta.dispatchEvent(input_event); - }else if(event.code === "Enter") { - if (value) { - currentIndex = -1; - if(key_down_history.indexOf(value) === -1){ - key_down_history.push(value); - if (key_down_history.length > MAX_HISTORY_LENGTH) { - key_down_history.shift(); - } - } - } - } - }); - break; - } - } - } - } -}); - -// 监听目标节点的子节点列表是否发生变化 -observer.observe(targetNode, { childList: true , subtree: true }); - diff --git a/spaces/wendys-llc/panoptic-segment-anything/segment_anything/segment_anything/utils/amg.py b/spaces/wendys-llc/panoptic-segment-anything/segment_anything/segment_anything/utils/amg.py deleted file mode 100644 index 3a137778e45c464c079658ecb87ec53270e789f7..0000000000000000000000000000000000000000 --- a/spaces/wendys-llc/panoptic-segment-anything/segment_anything/segment_anything/utils/amg.py +++ /dev/null @@ -1,346 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch - -import math -from copy import deepcopy -from itertools import product -from typing import Any, Dict, Generator, ItemsView, List, Tuple - - -class MaskData: - """ - A structure for storing masks and their related data in batched format. - Implements basic filtering and concatenation. - """ - - def __init__(self, **kwargs) -> None: - for v in kwargs.values(): - assert isinstance( - v, (list, np.ndarray, torch.Tensor) - ), "MaskData only supports list, numpy arrays, and torch tensors." - self._stats = dict(**kwargs) - - def __setitem__(self, key: str, item: Any) -> None: - assert isinstance( - item, (list, np.ndarray, torch.Tensor) - ), "MaskData only supports list, numpy arrays, and torch tensors." - self._stats[key] = item - - def __delitem__(self, key: str) -> None: - del self._stats[key] - - def __getitem__(self, key: str) -> Any: - return self._stats[key] - - def items(self) -> ItemsView[str, Any]: - return self._stats.items() - - def filter(self, keep: torch.Tensor) -> None: - for k, v in self._stats.items(): - if v is None: - self._stats[k] = None - elif isinstance(v, torch.Tensor): - self._stats[k] = v[torch.as_tensor(keep, device=v.device)] - elif isinstance(v, np.ndarray): - self._stats[k] = v[keep.detach().cpu().numpy()] - elif isinstance(v, list) and keep.dtype == torch.bool: - self._stats[k] = [a for i, a in enumerate(v) if keep[i]] - elif isinstance(v, list): - self._stats[k] = [v[i] for i in keep] - else: - raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") - - def cat(self, new_stats: "MaskData") -> None: - for k, v in new_stats.items(): - if k not in self._stats or self._stats[k] is None: - self._stats[k] = deepcopy(v) - elif isinstance(v, torch.Tensor): - self._stats[k] = torch.cat([self._stats[k], v], dim=0) - elif isinstance(v, np.ndarray): - self._stats[k] = np.concatenate([self._stats[k], v], axis=0) - elif isinstance(v, list): - self._stats[k] = self._stats[k] + deepcopy(v) - else: - raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") - - def to_numpy(self) -> None: - for k, v in self._stats.items(): - if isinstance(v, torch.Tensor): - self._stats[k] = v.detach().cpu().numpy() - - -def is_box_near_crop_edge( - boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0 -) -> torch.Tensor: - """Filter masks at the edge of a crop, but not at the edge of the original image.""" - crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) - orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) - boxes = uncrop_boxes_xyxy(boxes, crop_box).float() - near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) - near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) - near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) - return torch.any(near_crop_edge, dim=1) - - -def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor: - box_xywh = deepcopy(box_xyxy) - box_xywh[2] = box_xywh[2] - box_xywh[0] - box_xywh[3] = box_xywh[3] - box_xywh[1] - return box_xywh - - -def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: - assert len(args) > 0 and all( - len(a) == len(args[0]) for a in args - ), "Batched iteration must have inputs of all the same size." - n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) - for b in range(n_batches): - yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] - - -def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: - """ - Encodes masks to an uncompressed RLE, in the format expected by - pycoco tools. - """ - # Put in fortran order and flatten h,w - b, h, w = tensor.shape - tensor = tensor.permute(0, 2, 1).flatten(1) - - # Compute change indices - diff = tensor[:, 1:] ^ tensor[:, :-1] - change_indices = diff.nonzero() - - # Encode run length - out = [] - for i in range(b): - cur_idxs = change_indices[change_indices[:, 0] == i, 1] - cur_idxs = torch.cat( - [ - torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device), - cur_idxs + 1, - torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), - ] - ) - btw_idxs = cur_idxs[1:] - cur_idxs[:-1] - counts = [] if tensor[i, 0] == 0 else [0] - counts.extend(btw_idxs.detach().cpu().tolist()) - out.append({"size": [h, w], "counts": counts}) - return out - - -def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: - """Compute a binary mask from an uncompressed RLE.""" - h, w = rle["size"] - mask = np.empty(h * w, dtype=bool) - idx = 0 - parity = False - for count in rle["counts"]: - mask[idx : idx + count] = parity - idx += count - parity ^= True - mask = mask.reshape(w, h) - return mask.transpose() # Put in C order - - -def area_from_rle(rle: Dict[str, Any]) -> int: - return sum(rle["counts"][1::2]) - - -def calculate_stability_score( - masks: torch.Tensor, mask_threshold: float, threshold_offset: float -) -> torch.Tensor: - """ - Computes the stability score for a batch of masks. The stability - score is the IoU between the binary masks obtained by thresholding - the predicted mask logits at high and low values. - """ - # One mask is always contained inside the other. - # Save memory by preventing unnecesary cast to torch.int64 - intersections = ( - (masks > (mask_threshold + threshold_offset)) - .sum(-1, dtype=torch.int16) - .sum(-1, dtype=torch.int32) - ) - unions = ( - (masks > (mask_threshold - threshold_offset)) - .sum(-1, dtype=torch.int16) - .sum(-1, dtype=torch.int32) - ) - return intersections / unions - - -def build_point_grid(n_per_side: int) -> np.ndarray: - """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" - offset = 1 / (2 * n_per_side) - points_one_side = np.linspace(offset, 1 - offset, n_per_side) - points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) - points_y = np.tile(points_one_side[:, None], (1, n_per_side)) - points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2) - return points - - -def build_all_layer_point_grids( - n_per_side: int, n_layers: int, scale_per_layer: int -) -> List[np.ndarray]: - """Generates point grids for all crop layers.""" - points_by_layer = [] - for i in range(n_layers + 1): - n_points = int(n_per_side / (scale_per_layer**i)) - points_by_layer.append(build_point_grid(n_points)) - return points_by_layer - - -def generate_crop_boxes( - im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float -) -> Tuple[List[List[int]], List[int]]: - """ - Generates a list of crop boxes of different sizes. Each layer - has (2**i)**2 boxes for the ith layer. - """ - crop_boxes, layer_idxs = [], [] - im_h, im_w = im_size - short_side = min(im_h, im_w) - - # Original image - crop_boxes.append([0, 0, im_w, im_h]) - layer_idxs.append(0) - - def crop_len(orig_len, n_crops, overlap): - return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops)) - - for i_layer in range(n_layers): - n_crops_per_side = 2 ** (i_layer + 1) - overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) - - crop_w = crop_len(im_w, n_crops_per_side, overlap) - crop_h = crop_len(im_h, n_crops_per_side, overlap) - - crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)] - crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)] - - # Crops in XYWH format - for x0, y0 in product(crop_box_x0, crop_box_y0): - box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)] - crop_boxes.append(box) - layer_idxs.append(i_layer + 1) - - return crop_boxes, layer_idxs - - -def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor: - x0, y0, _, _ = crop_box - offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device) - # Check if boxes has a channel dimension - if len(boxes.shape) == 3: - offset = offset.unsqueeze(1) - return boxes + offset - - -def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor: - x0, y0, _, _ = crop_box - offset = torch.tensor([[x0, y0]], device=points.device) - # Check if points has a channel dimension - if len(points.shape) == 3: - offset = offset.unsqueeze(1) - return points + offset - - -def uncrop_masks( - masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int -) -> torch.Tensor: - x0, y0, x1, y1 = crop_box - if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h: - return masks - # Coordinate transform masks - pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0) - pad = (x0, pad_x - x0, y0, pad_y - y0) - return torch.nn.functional.pad(masks, pad, value=0) - - -def remove_small_regions( - mask: np.ndarray, area_thresh: float, mode: str -) -> Tuple[np.ndarray, bool]: - """ - Removes small disconnected regions and holes in a mask. Returns the - mask and an indicator of if the mask has been modified. - """ - import cv2 # type: ignore - - assert mode in ["holes", "islands"] - correct_holes = mode == "holes" - working_mask = (correct_holes ^ mask).astype(np.uint8) - n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) - sizes = stats[:, -1][1:] # Row 0 is background label - small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] - if len(small_regions) == 0: - return mask, False - fill_labels = [0] + small_regions - if not correct_holes: - fill_labels = [i for i in range(n_labels) if i not in fill_labels] - # If every region is below threshold, keep largest - if len(fill_labels) == 0: - fill_labels = [int(np.argmax(sizes)) + 1] - mask = np.isin(regions, fill_labels) - return mask, True - - -def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]: - from pycocotools import mask as mask_utils # type: ignore - - h, w = uncompressed_rle["size"] - rle = mask_utils.frPyObjects(uncompressed_rle, h, w) - rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json - return rle - - -def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor: - """ - Calculates boxes in XYXY format around masks. Return [0,0,0,0] for - an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. - """ - # torch.max below raises an error on empty inputs, just skip in this case - if torch.numel(masks) == 0: - return torch.zeros(*masks.shape[:-2], 4, device=masks.device) - - # Normalize shape to CxHxW - shape = masks.shape - h, w = shape[-2:] - if len(shape) > 2: - masks = masks.flatten(0, -3) - else: - masks = masks.unsqueeze(0) - - # Get top and bottom edges - in_height, _ = torch.max(masks, dim=-1) - in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :] - bottom_edges, _ = torch.max(in_height_coords, dim=-1) - in_height_coords = in_height_coords + h * (~in_height) - top_edges, _ = torch.min(in_height_coords, dim=-1) - - # Get left and right edges - in_width, _ = torch.max(masks, dim=-2) - in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :] - right_edges, _ = torch.max(in_width_coords, dim=-1) - in_width_coords = in_width_coords + w * (~in_width) - left_edges, _ = torch.min(in_width_coords, dim=-1) - - # If the mask is empty the right edge will be to the left of the left edge. - # Replace these boxes with [0, 0, 0, 0] - empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) - out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) - out = out * (~empty_filter).unsqueeze(-1) - - # Return to original shape - if len(shape) > 2: - out = out.reshape(*shape[:-2], 4) - else: - out = out[0] - - return out diff --git a/spaces/wffcyrus/llama2-with-gradio-chat/llama2.py b/spaces/wffcyrus/llama2-with-gradio-chat/llama2.py deleted file mode 100644 index 73f085177389f3e6724d076eb3da2ca3db05178f..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/llama2-with-gradio-chat/llama2.py +++ /dev/null @@ -1,133 +0,0 @@ -import os -import json -import requests -import sseclient - -from pingpong import PingPong -from pingpong.pingpong import PPManager -from pingpong.pingpong import PromptFmt -from pingpong.pingpong import UIFmt -from pingpong.gradio import GradioChatUIFmt - -class LLaMA2ChatPromptFmt(PromptFmt): - @classmethod - def ctx(cls, context): - if context is None or context == "": - return "" - else: - return f"""<> -{context} -<> -""" - - @classmethod - def prompt(cls, pingpong, truncate_size): - ping = pingpong.ping[:truncate_size] - pong = "" if pingpong.pong is None else pingpong.pong[:truncate_size] - return f"""[INST] {ping} [/INST] {pong}""" - -class LLaMA2ChatPPManager(PPManager): - def build_prompts(self, from_idx: int=0, to_idx: int=-1, fmt: PromptFmt=LLaMA2ChatPromptFmt, truncate_size: int=None): - if to_idx == -1 or to_idx >= len(self.pingpongs): - to_idx = len(self.pingpongs) - - results = fmt.ctx(self.ctx) - - for idx, pingpong in enumerate(self.pingpongs[from_idx:to_idx]): - results += fmt.prompt(pingpong, truncate_size=truncate_size) - - return results - -class GradioLLaMA2ChatPPManager(LLaMA2ChatPPManager): - def build_uis(self, from_idx: int=0, to_idx: int=-1, fmt: UIFmt=GradioChatUIFmt): - if to_idx == -1 or to_idx >= len(self.pingpongs): - to_idx = len(self.pingpongs) - - results = [] - - for pingpong in self.pingpongs[from_idx:to_idx]: - results.append(fmt.ui(pingpong)) - - return results - -async def gen_text( - prompt, - hf_model='meta-llama/Llama-2-70b-chat-hf', - hf_token=None, - parameters=None -): - if hf_token is None: - raise ValueError("Hugging Face Token is not set") - - if parameters is None: - parameters = { - 'max_new_tokens': 512, - 'do_sample': True, - 'return_full_text': False, - 'temperature': 1.0, - 'top_k': 50, - # 'top_p': 1.0, - 'repetition_penalty': 1.2 - } - - url = f'https://api-inference.huggingface.co/models/{hf_model}' - headers={ - 'Authorization': f'Bearer {hf_token}', - 'Content-type': 'application/json' - } - data = { - 'inputs': prompt, - 'stream': True, - 'options': { - 'use_cache': False, - }, - 'parameters': parameters - } - - r = requests.post( - url, - headers=headers, - data=json.dumps(data), - stream=True - ) - - client = sseclient.SSEClient(r) - for event in client.events(): - yield json.loads(event.data)['token']['text'] - -def gen_text_none_stream( - prompt, - hf_model='meta-llama/Llama-2-70b-chat-hf', - hf_token=None, -): - parameters = { - 'max_new_tokens': 64, - 'do_sample': True, - 'return_full_text': False, - 'temperature': 0.7, - 'top_k': 10, - # 'top_p': 1.0, - 'repetition_penalty': 1.2 - } - - url = f'https://api-inference.huggingface.co/models/{hf_model}' - headers={ - 'Authorization': f'Bearer {hf_token}', - 'Content-type': 'application/json' - } - data = { - 'inputs': prompt, - 'stream': False, - 'options': { - 'use_cache': False, - }, - 'parameters': parameters - } - - r = requests.post( - url, - headers=headers, - data=json.dumps(data), - ) - - return json.loads(r.text)[0]["generated_text"] \ No newline at end of file diff --git a/spaces/whisper-event/whisper-demo/README.md b/spaces/whisper-event/whisper-demo/README.md deleted file mode 100644 index e5ad377bf894056e87aec47ff79be1a9af6b7159..0000000000000000000000000000000000000000 --- a/spaces/whisper-event/whisper-demo/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Whisper Demo -emoji: 🤫 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -tags: -- whisper-event ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/williamcfrancis/Deep-Blind-Motion-Deblurring/create_fft.py b/spaces/williamcfrancis/Deep-Blind-Motion-Deblurring/create_fft.py deleted file mode 100644 index cdb82425cabf442d8858b94eb392ac80e1b73a03..0000000000000000000000000000000000000000 --- a/spaces/williamcfrancis/Deep-Blind-Motion-Deblurring/create_fft.py +++ /dev/null @@ -1,31 +0,0 @@ -import numpy as np -import os -import cv2 -import argparse - -ap = argparse.ArgumentParser() -ap.add_argument('--input_dir', '-i', required=True, help='Path to input dir for images') -ap.add_argument('--output_dir', '-o', required=True, help='Path to output dir to store files. Must be created') - -args= vars(ap.parse_args()) - - -folder = args['input_dir'] -folder_save = args['output_dir'] - - -labels = {} -images_done = 0 -for filename in os.listdir(folder): - img = cv2.imread(os.path.join(folder,filename)) - if img is not None: - img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - img_gray = np.float32(img_gray) / 255.0 - f = np.fft.fft2(img_gray) - fshift = np.fft.fftshift(f) - mag_spec = 20 * np.log(np.abs(fshift)) - mag_spec = np.asarray(mag_spec, dtype=np.uint8) - cv2.imwrite(os.path.join(folder_save,filename), mag_spec) - images_done += 1 - print("%s done"%images_done) - diff --git a/spaces/wz758727829/ChuanhuChatGPT/chat_func.py b/spaces/wz758727829/ChuanhuChatGPT/chat_func.py deleted file mode 100644 index 374178f3d22c5c23d1dc2952336cdc298a77315d..0000000000000000000000000000000000000000 --- a/spaces/wz758727829/ChuanhuChatGPT/chat_func.py +++ /dev/null @@ -1,456 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import os -import requests -import urllib3 - -from tqdm import tqdm -import colorama -from duckduckgo_search import ddg -import asyncio -import aiohttp - -from presets import * -from llama_func import * -from utils import * - -# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s") - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - - -initial_prompt = "You are a helpful assistant." -API_URL = "https://api.openai.com/v1/chat/completions" -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -def get_response( - openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model -): - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", - } - - history = [construct_system(system_prompt), *history] - - payload = { - "model": selected_model, - "messages": history, # [{"role": "user", "content": f"{inputs}"}], - "temperature": temperature, # 1.0, - "top_p": top_p, # 1.0, - "n": 1, - "stream": stream, - "presence_penalty": 0, - "frequency_penalty": 0, - } - if stream: - timeout = timeout_streaming - else: - timeout = timeout_all - - # 获取环境变量中的代理设置 - http_proxy = os.environ.get("HTTP_PROXY") or os.environ.get("http_proxy") - https_proxy = os.environ.get("HTTPS_PROXY") or os.environ.get("https_proxy") - - # 如果存在代理设置,使用它们 - proxies = {} - if http_proxy: - logging.info(f"Using HTTP proxy: {http_proxy}") - proxies["http"] = http_proxy - if https_proxy: - logging.info(f"Using HTTPS proxy: {https_proxy}") - proxies["https"] = https_proxy - - # 如果有代理,使用代理发送请求,否则使用默认设置发送请求 - if proxies: - response = requests.post( - API_URL, - headers=headers, - json=payload, - stream=True, - timeout=timeout, - proxies=proxies, - ) - else: - response = requests.post( - API_URL, - headers=headers, - json=payload, - stream=True, - timeout=timeout, - ) - return response - - -def stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - def get_return_value(): - return chatbot, history, status_text, all_token_counts - - logging.info("实时回答模式") - partial_words = "" - counter = 0 - status_text = "开始实时传输回答……" - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - user_token_count = 0 - if len(all_token_counts) == 0: - system_prompt_token_count = count_token(construct_system(system_prompt)) - user_token_count = ( - count_token(construct_user(inputs)) + system_prompt_token_count - ) - else: - user_token_count = count_token(construct_user(inputs)) - all_token_counts.append(user_token_count) - logging.info(f"输入token计数: {user_token_count}") - yield get_return_value() - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - True, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - yield get_return_value() - return - except requests.exceptions.ReadTimeout: - status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt - yield get_return_value() - return - - yield get_return_value() - error_json_str = "" - - for chunk in tqdm(response.iter_lines()): - if counter == 0: - counter += 1 - continue - counter += 1 - # check whether each line is non-empty - if chunk: - chunk = chunk.decode() - chunklength = len(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - logging.info(chunk) - error_json_str += chunk - status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}" - yield get_return_value() - continue - # decode each line as response data is in bytes - if chunklength > 6 and "delta" in chunk["choices"][0]: - finish_reason = chunk["choices"][0]["finish_reason"] - status_text = construct_token_message( - sum(all_token_counts), stream=True - ) - if finish_reason == "stop": - yield get_return_value() - break - try: - partial_words = ( - partial_words + chunk["choices"][0]["delta"]["content"] - ) - except KeyError: - status_text = ( - standard_error_msg - + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: " - + str(sum(all_token_counts)) - ) - yield get_return_value() - break - history[-1] = construct_assistant(partial_words) - chatbot[-1] = (chatbot[-1][0], partial_words+display_append) - all_token_counts[-1] += 1 - yield get_return_value() - - -def predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - logging.info("一次性回答模式") - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - all_token_counts.append(count_token(construct_user(inputs))) - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - False, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - return chatbot, history, status_text, all_token_counts - except requests.exceptions.ProxyError: - status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - except requests.exceptions.SSLError: - status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - response = json.loads(response.text) - content = response["choices"][0]["message"]["content"] - history[-1] = construct_assistant(content) - chatbot[-1] = (chatbot[-1][0], content+display_append) - total_token_count = response["usage"]["total_tokens"] - all_token_counts[-1] = total_token_count - sum(all_token_counts) - status_text = construct_token_message(total_token_count) - return chatbot, history, status_text, all_token_counts - - -def predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], - use_websearch=False, - files = None, - should_check_token_count=True, -): # repetition_penalty, top_k - logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL) - if files: - msg = "构建索引中……(这可能需要比较久的时间)" - logging.info(msg) - yield chatbot, history, msg, all_token_counts - index = construct_index(openai_api_key, file_src=files) - msg = "索引构建完成,获取回答中……" - yield chatbot, history, msg, all_token_counts - history, chatbot, status_text = chat_ai(openai_api_key, index, inputs, history, chatbot) - yield chatbot, history, status_text, all_token_counts - return - - old_inputs = "" - link_references = [] - if use_websearch: - search_results = ddg(inputs, max_results=5) - old_inputs = inputs - web_results = [] - for idx, result in enumerate(search_results): - logging.info(f"搜索结果{idx + 1}:{result}") - domain_name = urllib3.util.parse_url(result["href"]).host - web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}') - link_references.append(f"{idx+1}. [{domain_name}]({result['href']})\n") - link_references = "\n\n" + "".join(link_references) - inputs = ( - replace_today(WEBSEARCH_PTOMPT_TEMPLATE) - .replace("{query}", inputs) - .replace("{web_results}", "\n\n".join(web_results)) - ) - else: - link_references = "" - - if len(openai_api_key) != 51: - status_text = standard_error_msg + no_apikey_msg - logging.info(status_text) - chatbot.append((inputs, "")) - if len(history) == 0: - history.append(construct_user(inputs)) - history.append("") - all_token_counts.append(0) - else: - history[-2] = construct_user(inputs) - yield chatbot, history, status_text, all_token_counts - return - - yield chatbot, history, "开始生成回答……", all_token_counts - - if stream: - logging.info("使用流式传输") - iter = stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=link_references - ) - for chatbot, history, status_text, all_token_counts in iter: - yield chatbot, history, status_text, all_token_counts - else: - logging.info("不使用流式传输") - chatbot, history, status_text, all_token_counts = predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=link_references - ) - yield chatbot, history, status_text, all_token_counts - - logging.info(f"传输完毕。当前token计数为{all_token_counts}") - if len(history) > 1 and history[-1]["content"] != inputs: - logging.info( - "回答为:" - + colorama.Fore.BLUE - + f"{history[-1]['content']}" - + colorama.Style.RESET_ALL - ) - - if stream: - max_token = max_token_streaming - else: - max_token = max_token_all - - if sum(all_token_counts) > max_token and should_check_token_count: - status_text = f"精简token中{all_token_counts}/{max_token}" - logging.info(status_text) - yield chatbot, history, status_text, all_token_counts - iter = reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - all_token_counts, - top_p, - temperature, - max_token//2, - selected_model=selected_model, - ) - for chatbot, history, status_text, all_token_counts in iter: - status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}" - yield chatbot, history, status_text, all_token_counts - - -def retry( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], -): - logging.info("重试中……") - if len(history) == 0: - yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count - return - history.pop() - inputs = history.pop()["content"] - token_count.pop() - iter = predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - token_count, - top_p, - temperature, - stream=stream, - selected_model=selected_model, - ) - logging.info("重试中……") - for x in iter: - yield x - logging.info("重试完毕") - - -def reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - max_token_count, - selected_model=MODELS[0], -): - logging.info("开始减少token数量……") - iter = predict( - openai_api_key, - system_prompt, - history, - summarize_prompt, - chatbot, - token_count, - top_p, - temperature, - selected_model=selected_model, - should_check_token_count=False, - ) - logging.info(f"chatbot: {chatbot}") - flag = False - for chatbot, history, status_text, previous_token_count in iter: - num_chat = find_n(previous_token_count, max_token_count) - if flag: - chatbot = chatbot[:-1] - flag = True - history = history[-2*num_chat:] if num_chat > 0 else [] - token_count = previous_token_count[-num_chat:] if num_chat > 0 else [] - msg = f"保留了最近{num_chat}轮对话" - yield chatbot, history, msg + "," + construct_token_message( - sum(token_count) if len(token_count) > 0 else 0, - ), token_count - logging.info(msg) - logging.info("减少token数量完毕") \ No newline at end of file diff --git a/spaces/xdecoder/Demo/xdecoder/language/LangEncoder/__init__.py b/spaces/xdecoder/Demo/xdecoder/language/LangEncoder/__init__.py deleted file mode 100644 index ebc0a5d2e6bc4a4a93935450838acf09455004f6..0000000000000000000000000000000000000000 --- a/spaces/xdecoder/Demo/xdecoder/language/LangEncoder/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from .build import build_lang_encoder -from .build import build_tokenizer - -from .transformer import * \ No newline at end of file diff --git a/spaces/xillegas/duolingo-bot/README.md b/spaces/xillegas/duolingo-bot/README.md deleted file mode 100644 index f11e78605844903e11a65e60cc96ac770de528e5..0000000000000000000000000000000000000000 --- a/spaces/xillegas/duolingo-bot/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: duolingobot -emoji: 📊 -colorFrom: purple -colorTo: indigo -sdk: docker -pinned: false -license: other -app_file: main.rb ---- - -# DuolingoBot -Duolingo Bot for Telegram -## Version 1.0 -Returns user experience points through /xp command as -```/xp myusername123```, the result is -> Hello telegramUser, the xp of myusername123 is 99999 -## Future improvements -- Add an user list of weekly experience -- Returns more statistics from an user diff --git a/spaces/xillegas/duolingo-bot/start.sh b/spaces/xillegas/duolingo-bot/start.sh deleted file mode 100644 index 9efcee9c5b1e89c7306a77aa9ce58caf49c2c98e..0000000000000000000000000000000000000000 --- a/spaces/xillegas/duolingo-bot/start.sh +++ /dev/null @@ -1,4 +0,0 @@ -# gem install sinatra-contrib - -bundle exec rackup --host 0.0.0.0 -p 7860 & -bundle exec ruby main.rb diff --git a/spaces/xin/PatentSolver/App/bin/ComplexParser.py b/spaces/xin/PatentSolver/App/bin/ComplexParser.py deleted file mode 100644 index 1ce5c5b81ddbcd3048235d870d827d8371abc2d7..0000000000000000000000000000000000000000 --- a/spaces/xin/PatentSolver/App/bin/ComplexParser.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Mon Nov 28 16:02:26 2016 - -@author: Achille Souili -""" -import re -import nltk - - - -class ComplexParser(object): - - def __init__(self, sentence): - self.sentence = sentence - - def extract_parameters(self): - sentence = self.sentence - concept = [] - - - words = nltk.word_tokenize(sentence) - sentence = nltk.pos_tag(words) - grammar = """CLAUSES: {
          ??
          <.*>??<.*>+} - """ - parameter_parser = nltk.RegexpParser(grammar) - tree = parameter_parser.parse(sentence) - for subtree in tree.subtrees(): - if subtree.label() == 'CLAUSES': - #print(subtree) - parameter_candidate = " ".join(word for word, tag in subtree.leaves()) - concept.append(parameter_candidate) - concept = "d".join(concept) - return concept - -if __name__ == "__main__": - - Paragraph = "in which the surface of diffusion (24) is concave." - words = nltk.word_tokenize(Paragraph) - tagged = nltk.pos_tag(words) - print(tagged) - get_parameter = ComplexParser(Paragraph) - parameters_list = get_parameter.extract_parameters() - - print (parameters_list) diff --git a/spaces/xnetba/MMS/vits/mel_processing.py b/spaces/xnetba/MMS/vits/mel_processing.py deleted file mode 100644 index 817f03756f64caf8cc54329a9325024c8fb9e0c3..0000000000000000000000000000000000000000 --- a/spaces/xnetba/MMS/vits/mel_processing.py +++ /dev/null @@ -1,112 +0,0 @@ -import math -import os -import random -import torch -from torch import nn -import torch.nn.functional as F -import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/yangheng/Super-Resolution-Anime-Diffusion/Waifu2x/utils/image_quality.py b/spaces/yangheng/Super-Resolution-Anime-Diffusion/Waifu2x/utils/image_quality.py deleted file mode 100644 index c7bf0e51cac541f1872e1bc82ff359c3b2b2fdaa..0000000000000000000000000000000000000000 --- a/spaces/yangheng/Super-Resolution-Anime-Diffusion/Waifu2x/utils/image_quality.py +++ /dev/null @@ -1,191 +0,0 @@ -# Pytorch Multi-Scale Structural Similarity Index (SSIM) -# This code is written by jorge-pessoa (https://github.com/jorge-pessoa/pytorch-msssim) -# MIT licence -import math -from math import exp - -import torch -import torch.nn.functional as F -from torch.autograd import Variable - - -# +++++++++++++++++++++++++++++++++++++ -# SSIM -# ------------------------------------- - - -def gaussian(window_size, sigma): - gauss = torch.Tensor( - [ - exp(-((x - window_size // 2) ** 2) / float(2 * sigma**2)) - for x in range(window_size) - ] - ) - return gauss / gauss.sum() - - -def create_window(window_size, channel): - _1D_window = gaussian(window_size, 1.5).unsqueeze(1) - _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) - window = Variable( - _2D_window.expand(channel, 1, window_size, window_size).contiguous() - ) - return window - - -def _ssim(img1, img2, window, window_size, channel, size_average=True, full=False): - padd = 0 - - mu1 = F.conv2d(img1, window, padding=padd, groups=channel) - mu2 = F.conv2d(img2, window, padding=padd, groups=channel) - - mu1_sq = mu1.pow(2) - mu2_sq = mu2.pow(2) - mu1_mu2 = mu1 * mu2 - - sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq - sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq - sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2 - - C1 = 0.01**2 - C2 = 0.03**2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ( - (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2) - ) - - v1 = 2.0 * sigma12 + C2 - v2 = sigma1_sq + sigma2_sq + C2 - cs = torch.mean(v1 / v2) - - if size_average: - ret = ssim_map.mean() - else: - ret = ssim_map.mean(1).mean(1).mean(1) - - if full: - return ret, cs - return ret - - -class SSIM(torch.nn.Module): - def __init__(self, window_size=11, size_average=True): - super(SSIM, self).__init__() - self.window_size = window_size - self.size_average = size_average - self.channel = 1 - self.window = create_window(window_size, self.channel) - - def forward(self, img1, img2): - (_, channel, _, _) = img1.size() - - if channel == self.channel and self.window.data.type() == img1.data.type(): - window = self.window - else: - window = create_window(self.window_size, channel) - - if img1.is_cuda: - window = window.cuda(img1.get_device()) - window = window.type_as(img1) - - self.window = window - self.channel = channel - - return _ssim(img1, img2, window, self.window_size, channel, self.size_average) - - -def ssim(img1, img2, window_size=11, size_average=True, full=False): - (_, channel, height, width) = img1.size() - - real_size = min(window_size, height, width) - window = create_window(real_size, channel) - - if img1.is_cuda: - window = window.cuda(img1.get_device()) - window = window.type_as(img1) - - return _ssim(img1, img2, window, real_size, channel, size_average, full=full) - - -def msssim(img1, img2, window_size=11, size_average=True): - # TODO: fix NAN results - if img1.size() != img2.size(): - raise RuntimeError( - "Input images must have the same shape (%s vs. %s)." - % (img1.size(), img2.size()) - ) - if len(img1.size()) != 4: - raise RuntimeError( - "Input images must have four dimensions, not %d" % len(img1.size()) - ) - - weights = torch.tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=img1.dtype) - if img1.is_cuda: - weights = weights.cuda(img1.get_device()) - - levels = weights.size()[0] - mssim = [] - mcs = [] - for _ in range(levels): - sim, cs = ssim( - img1, img2, window_size=window_size, size_average=size_average, full=True - ) - mssim.append(sim) - mcs.append(cs) - - img1 = F.avg_pool2d(img1, (2, 2)) - img2 = F.avg_pool2d(img2, (2, 2)) - - mssim = torch.stack(mssim) - mcs = torch.stack(mcs) - return torch.prod(mcs[0 : levels - 1] ** weights[0 : levels - 1]) * ( - mssim[levels - 1] ** weights[levels - 1] - ) - - -class MSSSIM(torch.nn.Module): - def __init__(self, window_size=11, size_average=True, channel=3): - super(MSSSIM, self).__init__() - self.window_size = window_size - self.size_average = size_average - self.channel = channel - - def forward(self, img1, img2): - # TODO: store window between calls if possible - return msssim( - img1, img2, window_size=self.window_size, size_average=self.size_average - ) - - -def calc_psnr(sr, hr, scale=0, benchmark=False): - # adapt from EDSR: https://github.com/thstkdgus35/EDSR-PyTorch - diff = (sr - hr).data - if benchmark: - shave = scale - if diff.size(1) > 1: - convert = diff.new(1, 3, 1, 1) - convert[0, 0, 0, 0] = 65.738 - convert[0, 1, 0, 0] = 129.057 - convert[0, 2, 0, 0] = 25.064 - diff.mul_(convert).div_(256) - diff = diff.sum(dim=1, keepdim=True) - else: - shave = scale + 6 - - valid = diff[:, :, shave:-shave, shave:-shave] - mse = valid.pow(2).mean() - - return -10 * math.log10(mse) - - -# +++++++++++++++++++++++++++++++++++++ -# PSNR -# ------------------------------------- -from torch import nn - - -def psnr(predict, target): - with torch.no_grad(): - criteria = nn.MSELoss() - mse = criteria(predict, target) - return -10 * torch.log10(mse) diff --git a/spaces/yefengzi/vits-models/transforms.py b/spaces/yefengzi/vits-models/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/yefengzi/vits-models/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/losses/stft_loss.py b/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/losses/stft_loss.py deleted file mode 100644 index adb5767eb6e48b79c9811139091522cf635b5697..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/losses/stft_loss.py +++ /dev/null @@ -1,154 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""STFT-based Loss modules.""" - -import torch -import torch.nn.functional as F - - -def stft(x, fft_size, hop_size, win_length, window): - """Perform STFT and convert to magnitude spectrogram. - - Args: - x (Tensor): Input signal tensor (B, T). - fft_size (int): FFT size. - hop_size (int): Hop size. - win_length (int): Window length. - window (str): Window function type. - - Returns: - Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1). - - """ - x_stft = torch.stft(x, fft_size, hop_size, win_length, window) - real = x_stft[..., 0] - imag = x_stft[..., 1] - - # NOTE(kan-bayashi): clamp is needed to avoid nan or inf - return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1) - - -class SpectralConvergengeLoss(torch.nn.Module): - """Spectral convergence loss module.""" - - def __init__(self): - """Initilize spectral convergence loss module.""" - super(SpectralConvergengeLoss, self).__init__() - - def forward(self, x_mag, y_mag): - """Calculate forward propagation. - - Args: - x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). - y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). - - Returns: - Tensor: Spectral convergence loss value. - - """ - return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro") - - -class LogSTFTMagnitudeLoss(torch.nn.Module): - """Log STFT magnitude loss module.""" - - def __init__(self): - """Initilize los STFT magnitude loss module.""" - super(LogSTFTMagnitudeLoss, self).__init__() - - def forward(self, x_mag, y_mag): - """Calculate forward propagation. - - Args: - x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). - y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). - - Returns: - Tensor: Log STFT magnitude loss value. - - """ - return F.l1_loss(torch.log(y_mag), torch.log(x_mag)) - - -class STFTLoss(torch.nn.Module): - """STFT loss module.""" - - def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"): - """Initialize STFT loss module.""" - super(STFTLoss, self).__init__() - self.fft_size = fft_size - self.shift_size = shift_size - self.win_length = win_length - self.window = getattr(torch, window)(win_length) - self.spectral_convergenge_loss = SpectralConvergengeLoss() - self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss() - - def forward(self, x, y): - """Calculate forward propagation. - - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - - Returns: - Tensor: Spectral convergence loss value. - Tensor: Log STFT magnitude loss value. - - """ - self.window = self.window.to(x.device) - x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window) - y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window) - sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) - mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag) - - return sc_loss, mag_loss - - -class MultiResolutionSTFTLoss(torch.nn.Module): - """Multi resolution STFT loss module.""" - - def __init__(self, - fft_sizes=[1024, 2048, 512], - hop_sizes=[120, 240, 50], - win_lengths=[600, 1200, 240], - window="hann_window"): - """Initialize Multi resolution STFT loss module. - - Args: - fft_sizes (list): List of FFT sizes. - hop_sizes (list): List of hop sizes. - win_lengths (list): List of window lengths. - window (str): Window function type. - - """ - super(MultiResolutionSTFTLoss, self).__init__() - assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) - self.stft_losses = torch.nn.ModuleList() - for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): - self.stft_losses += [STFTLoss(fs, ss, wl, window)] - - def forward(self, x, y): - """Calculate forward propagation. - - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - - Returns: - Tensor: Multi resolution spectral convergence loss value. - Tensor: Multi resolution log STFT magnitude loss value. - - """ - sc_loss = 0.0 - mag_loss = 0.0 - for f in self.stft_losses: - sc_l, mag_l = f(x, y) - sc_loss += sc_l - mag_loss += mag_l - sc_loss /= len(self.stft_losses) - mag_loss /= len(self.stft_losses) - - return sc_loss, mag_loss diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/blip/configuration_blip.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/blip/configuration_blip.py deleted file mode 100644 index 39760a7e22a96d92fd372e3e435f09c44fd727a3..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/blip/configuration_blip.py +++ /dev/null @@ -1,368 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Blip model configuration""" - -import os -from typing import Union - -from ...configuration_utils import PretrainedConfig -from ...utils import logging - - -logger = logging.get_logger(__name__) - -BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json", - "Salesforce/blip-vqa-capfit-large": ( - "https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json" - ), - "Salesforce/blip-image-captioning-base": ( - "https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json" - ), - "Salesforce/blip-image-captioning-large": ( - "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json" - ), - "Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json", - "Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json", - "Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json", - "Salesforce/blip-itm-large-flikr": ( - "https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json" - ), -} - - -class BlipTextConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`BlipTextModel`]. It is used to instantiate a BLIP - text model according to the specified arguments, defining the model architecture. Instantiating a configuration - with the defaults will yield a similar configuration to that of the `BlipText` used by the [base - architectures](https://huggingface.co/Salesforce/blip-vqa-base). - - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - - Args: - vocab_size (`int`, *optional*, defaults to 30522): - Vocabulary size of the `Blip` text model. Defines the number of different tokens that can be represented by - the `inputs_ids` passed when calling [`BlipModel`]. - hidden_size (`int`, *optional*, defaults to 768): - Dimensionality of the encoder layers and the pooler layer. - encoder_hidden_size (`int`, *optional*, defaults to 768): - Dimensionality of the encoder layers from the vision model. - intermediate_size (`int`, *optional*, defaults to 3072): - Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. - num_hidden_layers (`int`, *optional*, defaults to 12): - Number of hidden layers in the Transformer encoder. - num_attention_heads (`int`, *optional*, defaults to 8): - Number of attention heads for each attention layer in the Transformer encoder. - max_position_embeddings (`int`, *optional*, defaults to 77): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): - The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, - `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. - layer_norm_eps (`float`, *optional*, defaults to 1e-12): - The epsilon used by the layer normalization layers. - hidden_dropout_prob (`float`, *optional*, defaults to 0.0): - The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. - attention_dropout (`float`, *optional*, defaults to 0.0): - The dropout ratio for the attention probabilities. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - bos_token_id (`int`, *optional*, defaults to 30522): - The id of the `beginning-of-sequence` token. - eos_token_id (`int`, *optional*, defaults to 2): - The id of the `end-of-sequence` token. - pad_token_id (`int`, *optional*, defaults to 0): - The id of the `padding` token. - sep_token_id (`int`, *optional*, defaults to 102): - The id of the `separator` token. - is_decoder (`bool`, *optional*, defaults to `False`): - Whether the model is used as a decoder. - use_cache (`bool`, *optional*, defaults to `True`): - Whether or not the model should return the last key/values attentions (not used by all models). - - Example: - - ```python - >>> from transformers import BlipTextConfig, BlipTextModel - - >>> # Initializing a BlipTextConfig with Salesforce/blip-vqa-base style configuration - >>> configuration = BlipTextConfig() - - >>> # Initializing a BlipTextModel (with random weights) from the Salesforce/blip-vqa-base style configuration - >>> model = BlipTextModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - model_type = "blip_text_model" - - def __init__( - self, - vocab_size=30524, - hidden_size=768, - encoder_hidden_size=768, - intermediate_size=3072, - projection_dim=768, - num_hidden_layers=12, - num_attention_heads=8, - max_position_embeddings=512, - hidden_act="gelu", - layer_norm_eps=1e-12, - hidden_dropout_prob=0.0, - attention_probs_dropout_prob=0.0, - initializer_range=0.02, - bos_token_id=30522, - eos_token_id=2, - pad_token_id=0, - sep_token_id=102, - is_decoder=True, - use_cache=True, - **kwargs, - ): - super().__init__( - pad_token_id=pad_token_id, - bos_token_id=bos_token_id, - eos_token_id=eos_token_id, - sep_token_id=sep_token_id, - **kwargs, - ) - - self.vocab_size = vocab_size - self.hidden_size = hidden_size - self.encoder_hidden_size = encoder_hidden_size - self.intermediate_size = intermediate_size - self.projection_dim = projection_dim - self.hidden_dropout_prob = hidden_dropout_prob - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.max_position_embeddings = max_position_embeddings - self.layer_norm_eps = layer_norm_eps - self.hidden_act = hidden_act - self.initializer_range = initializer_range - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.is_decoder = is_decoder - self.use_cache = use_cache - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": - cls._set_token_in_kwargs(kwargs) - - config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) - - # get the text config dict if we are loading from BlipConfig - if config_dict.get("model_type") == "blip": - config_dict = config_dict["text_config"] - - if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: - logger.warning( - f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " - f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." - ) - - return cls.from_dict(config_dict, **kwargs) - - -class BlipVisionConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`BlipVisionModel`]. It is used to instantiate a - BLIP vision model according to the specified arguments, defining the model architecture. Instantiating a - configuration defaults will yield a similar configuration to that of the Blip-base - [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture. - - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - - Args: - hidden_size (`int`, *optional*, defaults to 768): - Dimensionality of the encoder layers and the pooler layer. - intermediate_size (`int`, *optional*, defaults to 3072): - Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. - num_hidden_layers (`int`, *optional*, defaults to 12): - Number of hidden layers in the Transformer encoder. - num_attention_heads (`int`, *optional*, defaults to 12): - Number of attention heads for each attention layer in the Transformer encoder. - image_size (`int`, *optional*, defaults to 224): - The size (resolution) of each image. - patch_size (`int`, *optional*, defaults to 32): - The size (resolution) of each patch. - hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): - The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, - `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. - layer_norm_eps (`float`, *optional*, defaults to 1e-5): - The epsilon used by the layer normalization layers. - attention_dropout (`float`, *optional*, defaults to 0.0): - The dropout ratio for the attention probabilities. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - - Example: - - ```python - >>> from transformers import BlipVisionConfig, BlipVisionModel - - >>> # Initializing a BlipVisionConfig with Salesforce/blip-vqa-base style configuration - >>> configuration = BlipVisionConfig() - - >>> # Initializing a BlipVisionModel (with random weights) from the Salesforce/blip-vqa-base style configuration - >>> model = BlipVisionModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - - model_type = "blip_vision_model" - - def __init__( - self, - hidden_size=768, - intermediate_size=3072, - projection_dim=512, - num_hidden_layers=12, - num_attention_heads=12, - image_size=384, - patch_size=16, - hidden_act="gelu", - layer_norm_eps=1e-5, - attention_dropout=0.0, - initializer_range=1e-10, - **kwargs, - ): - super().__init__(**kwargs) - - self.hidden_size = hidden_size - self.intermediate_size = intermediate_size - self.projection_dim = projection_dim - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.patch_size = patch_size - self.image_size = image_size - self.initializer_range = initializer_range - self.attention_dropout = attention_dropout - self.layer_norm_eps = layer_norm_eps - self.hidden_act = hidden_act - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": - cls._set_token_in_kwargs(kwargs) - - config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) - - # get the vision config dict if we are loading from BlipConfig - if config_dict.get("model_type") == "blip": - config_dict = config_dict["vision_config"] - - if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: - logger.warning( - f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " - f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." - ) - - return cls.from_dict(config_dict, **kwargs) - - -class BlipConfig(PretrainedConfig): - r""" - [`BlipConfig`] is the configuration class to store the configuration of a [`BlipModel`]. It is used to instantiate - a BLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating - a configuration with the defaults will yield a similar configuration to that of the BLIP-base - [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture. - - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - Args: - text_config (`dict`, *optional*): - Dictionary of configuration options used to initialize [`BlipTextConfig`]. - vision_config (`dict`, *optional*): - Dictionary of configuration options used to initialize [`BlipVisionConfig`]. - projection_dim (`int`, *optional*, defaults to 512): - Dimentionality of text and vision projection layers. - logit_scale_init_value (`float`, *optional*, defaults to 2.6592): - The inital value of the *logit_scale* paramter. Default is used as per the original BLIP implementation. - image_text_hidden_size (`int`, *optional*, defaults to 256): - Dimentionality of the hidden state of the image-text fusion layer. - kwargs (*optional*): - Dictionary of keyword arguments. - - Example: - - ```python - >>> from transformers import BlipConfig, BlipModel - - >>> # Initializing a BlipConfig with Salesforce/blip-vqa-base style configuration - >>> configuration = BlipConfig() - - >>> # Initializing a BlipPModel (with random weights) from the Salesforce/blip-vqa-base style configuration - >>> model = BlipModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - - >>> # We can also initialize a BlipConfig from a BlipTextConfig and a BlipVisionConfig - - >>> # Initializing a BLIPText and BLIPVision configuration - >>> config_text = BlipTextConfig() - >>> config_vision = BlipVisionConfig() - - >>> config = BlipConfig.from_text_vision_configs(config_text, config_vision) - ```""" - - model_type = "blip" - - def __init__( - self, - text_config=None, - vision_config=None, - projection_dim=512, - logit_scale_init_value=2.6592, - image_text_hidden_size=256, - **kwargs, - ): - super().__init__(**kwargs) - - if text_config is None: - text_config = {} - logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values.") - - if vision_config is None: - vision_config = {} - logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.") - - self.text_config = BlipTextConfig(**text_config) - self.vision_config = BlipVisionConfig(**vision_config) - - self.text_config.encoder_hidden_size = self.vision_config.hidden_size - - self.projection_dim = projection_dim - self.logit_scale_init_value = logit_scale_init_value - self.initializer_factor = 1.0 - self.initializer_range = 0.02 - self.image_text_hidden_size = image_text_hidden_size - - @classmethod - def from_text_vision_configs(cls, text_config: BlipTextConfig, vision_config: BlipVisionConfig, **kwargs): - r""" - Instantiate a [`BlipConfig`] (or a derived class) from blip text model configuration and blip vision model - configuration. - - Returns: - [`BlipConfig`]: An instance of a configuration object - """ - - return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) diff --git a/spaces/yuan2023/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/controlnet/controlnet_depth.py b/spaces/yuan2023/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/controlnet/controlnet_depth.py deleted file mode 100644 index e7d71f5e9716c0c8d6d492e1ced2d600580f238f..0000000000000000000000000000000000000000 --- a/spaces/yuan2023/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/controlnet/controlnet_depth.py +++ /dev/null @@ -1,176 +0,0 @@ -import gradio as gr -import numpy as np -import torch -from diffusers import ( - ControlNetModel, - StableDiffusionControlNetPipeline, - UniPCMultistepScheduler, -) -from PIL import Image -from transformers import pipeline - -stable_model_list = [ - "runwayml/stable-diffusion-v1-5", - "stabilityai/stable-diffusion-2-1", -] - -controlnet_depth_model_list = [ - "lllyasviel/sd-controlnet-depth", - "thibaud/controlnet-sd21-depth-diffusers", -] - - -stable_prompt_list = ["a photo of a man.", "a photo of a girl."] - -stable_negative_prompt_list = ["bad, ugly", "deformed"] - -data_list = [ - "data/test.png", -] - - -def controlnet_depth(image_path: str, depth_model_path: str): - depth_estimator = pipeline("depth-estimation") - - image = Image.open(image_path) - image = depth_estimator(image)["depth"] - image = np.array(image) - image = image[:, :, None] - image = np.concatenate([image, image, image], axis=2) - image = Image.fromarray(image) - - controlnet = ControlNetModel.from_pretrained( - depth_model_path, torch_dtype=torch.float16 - ) - - return controlnet, image - - -def stable_diffusion_controlnet_depth( - image_path: str, - stable_model_path: str, - depth_model_path: str, - prompt: str, - negative_prompt: str, - guidance_scale: int, - num_inference_step: int, -): - - controlnet, image = controlnet_depth( - image_path=image_path, depth_model_path=depth_model_path - ) - - pipe = StableDiffusionControlNetPipeline.from_pretrained( - pretrained_model_name_or_path=stable_model_path, - controlnet=controlnet, - safety_checker=None, - torch_dtype=torch.float16, - ) - - pipe.to("cuda") - pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) - pipe.enable_xformers_memory_efficient_attention() - - output = pipe( - prompt=prompt, - image=image, - negative_prompt=negative_prompt, - num_inference_steps=num_inference_step, - guidance_scale=guidance_scale, - ).images - - return output[0] - - -def stable_diffusion_controlnet_depth_app(): - with gr.Blocks(): - with gr.Row(): - with gr.Column(): - controlnet_depth_image_file = gr.Image( - type="filepath", label="Image" - ) - - controlnet_depth_stable_model_id = gr.Dropdown( - choices=stable_model_list, - value=stable_model_list[0], - label="Stable Model Id", - ) - - controlnet_depth_model_id = gr.Dropdown( - choices=controlnet_depth_model_list, - value=controlnet_depth_model_list[0], - label="ControlNet Model Id", - ) - - controlnet_depth_prompt = gr.Textbox( - lines=1, value=stable_prompt_list[0], label="Prompt" - ) - - controlnet_depth_negative_prompt = gr.Textbox( - lines=1, - value=stable_negative_prompt_list[0], - label="Negative Prompt", - ) - - with gr.Accordion("Advanced Options", open=False): - controlnet_depth_guidance_scale = gr.Slider( - minimum=0.1, - maximum=15, - step=0.1, - value=7.5, - label="Guidance Scale", - ) - - controlnet_depth_num_inference_step = gr.Slider( - minimum=1, - maximum=100, - step=1, - value=50, - label="Num Inference Step", - ) - - controlnet_depth_predict = gr.Button(value="Generator") - - with gr.Column(): - output_image = gr.Image(label="Output") - - gr.Examples( - fn=stable_diffusion_controlnet_depth, - examples=[ - [ - data_list[0], - stable_model_list[0], - controlnet_depth_model_list[0], - stable_prompt_list[0], - stable_negative_prompt_list[0], - 7.5, - 50, - ] - ], - inputs=[ - controlnet_depth_image_file, - controlnet_depth_stable_model_id, - controlnet_depth_model_id, - controlnet_depth_prompt, - controlnet_depth_negative_prompt, - controlnet_depth_guidance_scale, - controlnet_depth_num_inference_step, - ], - outputs=[output_image], - cache_examples=False, - label="ControlNet Depth Example", - ) - - controlnet_depth_predict.click( - fn=stable_diffusion_controlnet_depth, - inputs=[ - controlnet_depth_image_file, - controlnet_depth_stable_model_id, - controlnet_depth_model_id, - controlnet_depth_prompt, - controlnet_depth_negative_prompt, - controlnet_depth_guidance_scale, - controlnet_depth_num_inference_step, - ], - outputs=output_image, - ) diff --git a/spaces/zakiu/Personal-TTS/README.md b/spaces/zakiu/Personal-TTS/README.md deleted file mode 100644 index 4ce56ec74925011236e063b08a3e140f31dd00f2..0000000000000000000000000000000000000000 --- a/spaces/zakiu/Personal-TTS/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Personal TTS -emoji: 🐨 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -license: mit -duplicated_from: kevinwang676/Personal-TTS ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/zxc314/vits-uma-genshin-honkai/mel_processing.py b/spaces/zxc314/vits-uma-genshin-honkai/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/zxc314/vits-uma-genshin-honkai/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec