diff --git a/spaces/1-13-am/neural-style-transfer/app.py b/spaces/1-13-am/neural-style-transfer/app.py deleted file mode 100644 index 5b47da80c190876b3384074b2a83e9440fd00bfa..0000000000000000000000000000000000000000 --- a/spaces/1-13-am/neural-style-transfer/app.py +++ /dev/null @@ -1,57 +0,0 @@ -import gradio as gr -import torch -from utils import transformer, tensor_to_img -from network import Style_Transfer_Network - -check_point = torch.load("check_point1_0.pth", map_location = torch.device('cpu')) -model = Style_Transfer_Network() -model.load_state_dict(check_point['state_dict']) - -def style_transfer(content_img, style_strength, style_img_1 = None, iw_1 = 0, style_img_2 = None, iw_2 = 0, style_img_3 = None, iw_3 = 0, preserve_color = None): - transform = transformer(imsize = 512) - - content = transform(content_img).unsqueeze(0) - - iw = [iw_1, iw_2, iw_3] - interpolation_weights = [i/ sum(iw) for i in iw] - - style_imgs = [style_img_1, style_img_2, style_img_3] - styles = [] - for style_img in style_imgs: - if style_img is not None: - styles.append(transform(style_img).unsqueeze(0)) - if preserve_color == "None": preserve_color = None - elif preserve_color == "Whitening & Coloring": preserve_color = "whitening_and_coloring" - elif preserve_color == "Histogram matching": preserve_color = "histogram_matching" - with torch.no_grad(): - stylized_img = model(content, styles, style_strength, interpolation_weights, preserve_color = preserve_color) - return tensor_to_img(stylized_img) - -title = "Artistic Style Transfer" - -content_img = gr.components.Image(label="Content image", type = "pil") - -style_img_1 = gr.components.Image(label="Style images", type = "pil") -iw_1 = gr.components.Slider(0., 1., label = "Style 1 strength") -style_img_2 = gr.components.Image(label="Style images", type = "pil") -iw_2 = gr.components.Slider(0., 1., label = "Style 2 strength") -style_img_3 = gr.components.Image(label="Style images", type = "pil") -iw_3 = gr.components.Slider(0., 1., label = "Style 3 strength") -style_strength = gr.components.Slider(0., 1., label = "Adjust style strength") -preserve_color = gr.components.Dropdown(["None", "Whitening & Coloring", "Histogram matching"], label = "Choose color preserving mode") - -interface = gr.Interface(fn = style_transfer, - inputs = [content_img, - style_strength, - style_img_1, - iw_1, - style_img_2, - iw_2, - style_img_3, - iw_3, - preserve_color], - outputs = gr.components.Image(), - title = title - ) -interface.queue() -interface.launch(share = True, debug = True) \ No newline at end of file diff --git a/spaces/101-5/gpt4free/CONTRIBUTING.md b/spaces/101-5/gpt4free/CONTRIBUTING.md deleted file mode 100644 index 67aa60da1ce8322d31d71d9c8460f845f338bcde..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/CONTRIBUTING.md +++ /dev/null @@ -1,8 +0,0 @@ -gpt4free logo - -### Please, follow these steps to contribute: -1. Reverse a website from this list: [sites-to-reverse](https://github.com/xtekky/gpt4free/issues/40) -2. Add it to [./testing](https://github.com/xtekky/gpt4free/tree/main/testing) -3. Refractor it and add it to [./g4f](https://github.com/xtekky/gpt4free/tree/main/g4f) - -### We will be grateful to see you as a contributor! diff --git a/spaces/17TheWord/vits-models/text/cleaners.py b/spaces/17TheWord/vits-models/text/cleaners.py deleted file mode 100644 index d26581deb399609163518054718ad80ecca5d934..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/vits-models/text/cleaners.py +++ /dev/null @@ -1,475 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -import pyopenjtalk -from jamo import h2j, j2hcj -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba, cn2an - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text!='': - text+=' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil','pau']: - text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: - a2_next=-1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i7 Data Recovery Suite 4.4 Crack Download HERE ! -

Have you ever lost your important data due to accidental deletion, formatting, virus attack, or any other reason? If yes, then you might be looking for a reliable and effective data recovery tool that can help you get back your lost files. One such tool is 7 Data Recovery Suite 4.4, which is a popular and powerful software that can recover data from various scenarios and devices.

-

However, the official version of 7 Data Recovery Suite 4.4 is not free, and you need to pay for a license to use its full features. That's why some people may try to find a crack version of the software online, which claims to offer the same functionality without any cost. But is it safe and legal to use a cracked version of 7 Data Recovery Suite 4.4? And how can you download and install it on your computer?

-

7 Data Recovery Suite 4.4 Crack Download HERE !


Download >>> https://byltly.com/2uKyCw



-

In this article, we will answer these questions and provide you with a detailed guide on how to use 7 Data Recovery Suite 4.4 Crack. We will also warn you about the potential risks of using a cracked software and suggest a better alternative for data recovery.

-

What is 7 Data Recovery Suite 4.4?

-

7 Data Recovery Suite 4.4 is a comprehensive data recovery software that can recover deleted, formatted, or lost data from hard disks, memory cards, flash drives, and other storage devices. It supports various file types, such as photos, videos, audios, documents, emails, etc.

-

The software consists of four modules that can handle different data loss situations:

- -

Features of 7 Data Recovery Suite 4.4

-

Some of the main features of 7 Data Recovery Suite 4.4 are:

- -

Benefits of 7 Data Recovery Suite 4.4

-

Some of the benefits of using 7 Data Recovery Suite 4.4 are:

- -

How to download and install 7 Data Recovery Suite 4.4 Crack?

-

If you want to use the full features of 7 Data Recovery Suite 4.4 without paying for a license, you may be tempted to download and install a crack version of the software online. However, this is not recommended for several reasons that we will discuss later in this article.

-

If you still want to try it at your own risk, here are the steps to download and install 7 Data Recovery Suite 4.4 Crack:

-

How to get 7 Data Recovery Suite 4.4 Crack for free
-7 Data Recovery Suite 4.4 Crack full version download link
-Best data recovery software with 7 Data Recovery Suite 4.4 Crack
-7 Data Recovery Suite 4.4 Crack license key generator
-Download 7 Data Recovery Suite 4.4 Crack with serial key
-7 Data Recovery Suite 4.4 Crack activation code online
-Recover deleted files with 7 Data Recovery Suite 4.4 Crack
-7 Data Recovery Suite 4.4 Crack review and features
-7 Data Recovery Suite 4.4 Crack tutorial and guide
-7 Data Recovery Suite 4.4 Crack system requirements and compatibility
-Is 7 Data Recovery Suite 4.4 Crack safe and legit
-7 Data Recovery Suite 4.4 Crack alternatives and competitors
-Pros and cons of using 7 Data Recovery Suite 4.4 Crack
-How to update 7 Data Recovery Suite 4.4 Crack to the latest version
-How to uninstall 7 Data Recovery Suite 4.4 Crack completely
-How to fix errors and issues with 7 Data Recovery Suite 4.4 Crack
-How to backup and restore data with 7 Data Recovery Suite 4.4 Crack
-How to recover data from formatted or corrupted drives with 7 Data Recovery Suite 4.4 Crack
-How to recover data from SD card, USB flash drive, or external hard drive with 7 Data Recovery Suite 4.4 Crack
-How to recover data from Android or iOS devices with 7 Data Recovery Suite 4.4 Crack
-How to recover data from Windows or Mac computers with 7 Data Recovery Suite 4.4 Crack
-How to recover data from different file systems with 7 Data Recovery Suite 4.4 Crack
-How to recover data from various scenarios with 7 Data Recovery Suite 4.4 Crack
-How to recover photos, videos, audio, documents, emails, or other files with 7 Data Recovery Suite 4.4 Crack
-How to recover lost or forgotten passwords with 7 Data Recovery Suite 4.4 Crack
-How to recover data from encrypted or protected files with 7 Data Recovery Suite 4.4 Crack
-How to recover data from RAID arrays or partitions with 7 Data Recovery Suite 4.4 Crack
-How to recover data from cloud storage or online services with 7 Data Recovery Suite 4.4 Crack
-How to recover data from virtual machines or disks with 7 Data Recovery Suite 4.4 Crack
-How to recover data from optical discs or floppy disks with 7 Data Recovery Suite 4.4 Crack
-How to use advanced tools and settings in 7 Data Recovery Suite 4.4 Crack
-How to customize and optimize the performance of 7 Data Recovery Suite 4.4 Crack
-How to contact the support team of 7 Data Recovery Suite 4.4 Crack
-How to get a refund or exchange for the purchase of the product key of the software.

-

Download link

-

You can find many websites that offer a download link for 7 Data Recovery Suite 4.4 Crack online. However, you should be careful about the source and the authenticity of the file. Some websites may provide fake or malicious files that may harm your computer or steal your information.

-

One possible website that claims to provide a working download link for 7 Data Recovery Suite 4.4 Crack is https://kolompc.com/7-data-recovery-suite/. However, we cannot guarantee its safety or reliability.

-

Installation steps

-

After downloading the file from the website above or any other source, you need to follow these steps to install it on your computer:

-
    -
  1. Extract the ZIP file to a folder on your computer.
  2. -
  3. Run the setup.exe file as administrator and follow the instructions to install the software.
  4. -
  5. After installation, do not run the software yet.
  6. -
  7. Copy the crack file from the folder and paste it into the installation directory of the software (usually C:\Program Files\7-DataRecoverySuite).
  8. -
  9. Run the software and enjoy its full features.
  10. -
-

How to use 7 Data Recovery Suite 4.4 Crack?

-

After installing the crack version of 7 Data Recovery Suite 4.4 on your computer, you can use it to recover your lost data by following these steps:

-

Select a recovery mode

-

Launch the software and select one of the four recovery modes according to your data loss situation:

- -

Scan the device or partition

-

Select the device or partition where you lost your data and click Next to start scanning for recoverable files. The scanning process may take some time depending on the size and condition of your device or partition.

-

Preview and recover the data

-

After scanning is completed, you can preview the found files by clicking on them in the left pane. You can also filter them by file type or path in the right pane. Select the files that you want to recover and click Recover to save them to a location of your choice on your computer or another device.

-

Risks of using 7 Data Recovery Suite 4.4 Crack

-

While using a crack version of 7 Data Recovery Suite 4.4 may seem tempting for some people who want to save money and enjoy its full features without paying for a license, there are also some serious risks involved in doing so:

-

Virus or malware infection

-

The crack file that you download online may contain virus or malware that can infect your computer and damage your system files or programs. It may also steal your personal information or encrypt your data and demand ransom for decryption.

-

Privacy breach

-

Legal issues

-

The use of cracked software is also illegal in most countries, as it violates the software copyright law. By using a cracked version of 7 Data Recovery Suite 4.4, you are infringing on the rights of the software developers and distributors who invested time and money to create and market the product.

-

You may face legal consequences if you are caught using or distributing cracked software, such as fines, lawsuits, or even imprisonment. Moreover, you may also lose your academic or professional reputation if you use cracked software for your research or work projects.

-

Conclusion

-

7 Data Recovery Suite 4.4 is a powerful and comprehensive data recovery software that can help you recover your lost data from various scenarios and devices. However, using a crack version of the software is not a wise choice, as it comes with many risks and disadvantages.

-

Using cracked software can expose your computer to virus or malware infection, breach your privacy and security, and cause legal issues for you and your organization. Moreover, using cracked software is unethical and unfair to the software developers and distributors who deserve to be compensated for their work.

-

Therefore, we recommend that you avoid using 7 Data Recovery Suite 4.4 Crack and look for a better alternative for data recovery. One such alternative is Recoverit, which is a reliable and professional data recovery tool that can recover data from various scenarios and devices with high success rate and ease of use.

-

FAQs

- -

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Kung Fu Panda 2 Full Movie Download ) Discover the Legend of the Dragon Warrior.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Kung Fu Panda 2 Full Movie Download ) Discover the Legend of the Dragon Warrior.md deleted file mode 100644 index 02787c5454a7a1d79ba86c4addfd055419430f55..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Kung Fu Panda 2 Full Movie Download ) Discover the Legend of the Dragon Warrior.md +++ /dev/null @@ -1,134 +0,0 @@ - -

Kung Fu Panda 2: A Fun and Action-Packed Sequel

-

If you are looking for a movie that combines humor, adventure, emotion, and stunning animation, you might want to check out Kung Fu Panda 2. This film is the sequel to Kung Fu Panda (2008), which introduced us to Po, a clumsy but lovable panda who became the Dragon Warrior and saved China from the evil Tai Lung.

-

HD Online Player (Kung Fu Panda 2 Full Movie Download )


Download ✒ ✒ ✒ https://byltly.com/2uKxxH



-

In this film, Po faces a new challenge: Lord Shen, a peacock who has invented a weapon that can destroy kung fu and conquer China. Along with his friends, the Furious Five, Po must stop Shen before it is too late. But along the way, Po also discovers some secrets about his past and his true identity.

-

Kung Fu Panda 2 was released in 2011 by DreamWorks Animation and Paramount Pictures. It was directed by Jennifer Yuh Nelson, who became the first woman to solely direct an animated feature film from a major Hollywood studio. It was written by Jonathan Aibel and Glenn Berger, who also wrote the first film.

-

The film received critical acclaim for its story, characters, animation, music, and themes. It was nominated for an Academy Award for Best Animated Feature, losing to Rango. It also became the highest-grossing film directed by a woman until Frozen (2013), as well as the highest-grossing film solely directed by a woman until Wonder Woman (2017). It is also the sixth highest-grossing film of 2011, and the highest-grossing animated feature film of the year.

-

The Story of Kung Fu Panda 2

-

The film begins with a flashback that tells us how Lord Shen, the son of the peacock rulers of Gongmen City, became obsessed with using fireworks as a weapon. He learned of a prophecy that said he would be defeated by "a warrior of black and white". He then ordered his wolf army to kill all the pandas in China, hoping to prevent the prophecy from coming true.

-

Shen's parents were horrified by his actions and banished him from their city. Shen swore revenge and vowed to return with his weapon one day.

-

In the present day, Po is enjoying his life as the Dragon Warrior and the leader of the Furious Five: Tigress, Monkey, Viper, Crane, and Mantis. He is also learning more about kung fu from his mentor, Master Shifu.

-

One day, Po and his friends are sent to stop a group of wolf bandits who are stealing metal for Shen's weapon. Po has a flashback of his mother when he sees a symbol on one of the wolves' armor.

-

Po becomes curious about his past and asks his adoptive father, Mr. Ping, about where he came from.

-

Watch Kung Fu Panda 2 in HD quality online for free
-How to download Kung Fu Panda 2 full movie in HD
-Kung Fu Panda 2 streaming online with subtitles
-Best HD online player for Kung Fu Panda 2 movie
-Kung Fu Panda 2 full movie download link
-Kung Fu Panda 2 HD online player without ads
-Where to watch Kung Fu Panda 2 full movie online
-Kung Fu Panda 2 full movie HD download torrent
-Kung Fu Panda 2 online streaming HD quality
-Kung Fu Panda 2 full movie download in Hindi
-Kung Fu Panda 2 HD online player for Android
-Kung Fu Panda 2 full movie download in Tamil
-Kung Fu Panda 2 online watch HD free
-Kung Fu Panda 2 full movie download in Telugu
-Kung Fu Panda 2 HD online player for PC
-Kung Fu Panda 2 full movie download in Malayalam
-Kung Fu Panda 2 online HD with English subtitles
-Kung Fu Panda 2 full movie download in Kannada
-Kung Fu Panda 2 HD online player for iOS
-Kung Fu Panda 2 full movie download in Bengali
-Kung Fu Panda 2 online HD with Hindi dubbing
-Kung Fu Panda 2 full movie download in Marathi
-Kung Fu Panda 2 HD online player for Mac
-Kung Fu Panda 2 full movie download in Urdu
-Kung Fu Panda 2 online HD with Tamil dubbing
-Kung Fu Panda 2 full movie download in Gujarati
-Kung Fu Panda 2 HD online player for Windows
-Kung Fu Panda 2 full movie download in Punjabi
-Kung Fu Panda 2 online HD with Telugu dubbing
-Kung Fu Panda 2 full movie download in Nepali
-Kung Fu Panda 2 HD online player for Linux
-Kung Fu Panda 2 full movie download in Sinhala
-Kung Fu Panda 2 online HD with Malayalam dubbing
-Kung Fu Panda 2 full movie download in Indonesian
-Kung Fu Panda 2 HD online player for Chromebook
-Kung Fu Panda 2 full movie download in Filipino
-Kung Fu Panda 2 online HD with Kannada dubbing
-Kung Fu Panda 2 full movie download in Vietnamese
-Kung Fu Panda 2 HD online player for Roku
-Kung Fu Panda 2 full movie download in Thai
-Kung Fu Panda 2 online HD with Bengali dubbing
-Kung Fu Panda 2 full movie download in Arabic
-Kung Fu Panda 2 HD online player for Firestick
-Kung Fu Panda 2 full movie download in Persian
-Kung Fu Panda 2 online HD with Urdu dubbing
-Kung Fu Panda 2 full movie download in Turkish
-Kung Fu Panda 2 HD online player for Smart TV
-Kung Fu Panda 2 full movie download in Korean
-Kung Fu Panda 2 online HD with Gujarati dubbing

-

Mr. Ping tells him that he found him in a radish crate when he was a baby and decided to raise him as his son.

-

Po is not satisfied with this answer and decides to find out more about his origins.

-

He learns from Master Shifu that Shen has returned to Gongmen City with his weapon, which is a cannon that can fire metal balls with explosive force.

-

Po and his friends travel to Gongmen City to stop Shen.

-

There they meet two other kung fu masters who have been hiding from Shen: Master Ox and Master Croc.

-

They also encounter Shen's old nanny, a goat named Soothsayer, who can see the future.

-

Po tries to confront Shen several times but fails due to his flashbacks.

-

He eventually learns that Shen was responsible for killing his parents and destroying his village.

-

Po is devastated by this revelation but also determined to stop Shen once and for all.

-

He realizes that he must achieve inner peace in order to overcome his past trauma.

-

With the help of Soothsayer, Po meditates on his memories and accepts them as part of who he is.

-

He then leads his friends into a final battle against Shen and his army.

-

Po uses his kung fu skills to deflect Shen's cannonballs back at him.

-

He also tries to persuade Shen to let go of his hatred and find inner peace.

-

Shen refuses to listen and attacks Po with his blades.

-

Po dodges them but one of them cuts through Shen's cannon ropes, causing it to fall on him.

-

Shen is crushed by his own weapon while Po watches in sadness.

-

Po then returns to Mr. Ping's noodle shop with his friends.

-

He tells Mr. Ping that he knows he is not his biological father but he still loves him as his dad.

-

Mr. Ping hugs him and tells him that he loves him too.

-

The film ends with a scene showing that Po's biological father is still alive somewhere in China with other pandas.

-

The Characters of Kung Fu Panda 2

- - - - - - - - - - - - - - - - - -

The Animation and Music of Kung Fu Panda 2

-

The animation of Kung Fu Panda 2 is a remarkable achievement that combines computer animation and traditional animation techniques. The film uses computer animation for the main characters and the backgrounds, but also incorporates hand-drawn animation for some of the flashback scenes and the opening sequence. The hand-drawn animation gives the film a more stylized and artistic look that pays homage to Chinese painting and calligraphy.

-

The visual style of the film is also influenced by the culture and history of China. The film features various locations and landmarks that are based on real places in China, such as the Great Wall, the Forbidden City, and the Terracotta Army. The film also uses elements of Chinese mythology, such as dragons, phoenixes, and kites. The film also incorporates symbols and motifs that are relevant to the story and the characters, such as peacock feathers, lotus flowers, yin and yang, and fireworks.

-

The music of Kung Fu Panda 2 is composed by Hans Zimmer and John Powell, who also composed the music for the first film. The score blends orchestral music with traditional Chinese instruments, such as erhu, pipa, guzheng, dizi, and suona. The score also features themes and motifs that reflect the characters and their emotions, such as Po's theme, Shen's theme, and the inner peace theme. The score also includes some original songs by CeeLo Green and Jay Chou, who sing "Kung Fu Fighting" and "Nunchucks" respectively.

-

The Reception and Legacy of Kung Fu Panda 2

-

Kung Fu Panda 2 was released in theaters on May 26, 2011 in 2D, RealD 3D, Digital 3D and 4DX. The film received positive reviews from critics and audiences alike, who praised its story, characters, animation, music, and themes. The film has a 81% approval rating on Rotten Tomatoes based on 179 reviews, with an average rating of 7/10. The website's critical consensus reads: "Kung Fu Panda 2 offers enough action, comedy, and visual sparkle to compensate for its somewhat familiar plot."

-

The film was also a commercial success, grossing $665.7 million worldwide against its $150 million budget. It became the highest-grossing film directed by a woman until Frozen (2013), as well as the highest-grossing film solely directed by a woman until Wonder Woman (2017). It is also the sixth highest-grossing film of 2011, and the highest-grossing animated feature film of the year.

-

The film was nominated for various awards, including an Academy Award for Best Animated Feature at the 84th Academy Awards, losing to Rango. It also received nominations from the Annie Awards, the Golden Globe Awards, the BAFTA Awards, the Critics' Choice Awards, and the Kids' Choice Awards.

-

The film had a significant impact on female directors and animators in Hollywood. Jennifer Yuh Nelson became the second woman to be nominated for an Academy Award for Best Animated Feature after Marjane Satrapi for Persepolis (2007). She also became one of the most successful female directors in terms of box office gross. She returned to direct Kung Fu Panda 3, which was co-directed by Alessandro Carloni.

-

The film also spawned a sequel and a franchise expansion. Kung Fu Panda 3 was released in January 2016 and continued Po's story as he reunited with his biological father and faced a new enemy. The film was also well-received by critics and audiences and grossed $521.2 million worldwide. A fourth film is currently in development at DreamWorks Animation.

-

Besides the films, the franchise also includes several short films, such as Kung Fu Panda: Secrets of the Masters, Kung Fu Panda: Secrets of the Scroll, and Kung Fu Panda: Secrets of the Furious Five. It also includes two television series: Kung Fu Panda: Legends of Awesomeness, which ran from 2011 to 2016 on Nickelodeon; and Kung Fu Panda: The Paws of Destiny, which premiered in 2018 on Amazon Prime Video.

-

Conclusion

-

Kung Fu Panda 2 is a fun and action-packed sequel that delivers on its promise of humor, adventure,

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameVoice ActorDescription
PoJack BlackThe Dragon Warrior and the leader of the Furious Five. He is a panda who loves kung fu, food, and fun. He is brave, loyal, optimistic, friendly, clumsy, naive, but also smart when it matters most. -
TigressAngelina JoliePo's closest friend and a fierce fighter. She is a tiger who is strong, serious, disciplined, stoic, -and sometimes cold but also caring deep down.
MonkeyJackie Chan -emotion,

and adults alike, as it offers a compelling story, engaging characters, beautiful animation, and memorable music. It is a film that celebrates the art of kung fu, the culture of China, and the themes of family, destiny, and inner peace. It is a film that showcases the talents of its director, writers, voice actors, animators, and composers. It is a film that deserves to be watched and enjoyed by everyone.

-

FAQs

-

Here are some frequently asked questions about Kung Fu Panda 2:

-
    -
  • Is Kung Fu Panda 2 based on a true story?
  • -

    No, Kung Fu Panda 2 is not based on a true story. It is a fictional story that takes place in a fantasy world of anthropomorphic animals who practice kung fu. However, the film does draw inspiration from real aspects of Chinese culture, history, and mythology.

    -
  • What is the name of Lord Shen's weapon?
  • -

    Lord Shen's weapon is a cannon that can fire metal balls with explosive force. It is based on the real invention of gunpowder and firearms in China during the Song dynasty (960-1279 CE).

    -
  • What is the meaning of Po's name?
  • -

    Po's name means "precious" or "treasure" in Chinese. It is also a homophone for the word "potato" in Mandarin, which is a reference to Po's chubby appearance and his love for food.

    -
  • What are the names of Po's biological parents?
  • -

    Po's biological parents are Li Shan and Mei Mei. They are both giant pandas who live in a hidden panda village in the mountains. They appear in Kung Fu Panda 3, where Po reunites with them and learns more about his heritage.

    -
  • How many films are there in the Kung Fu Panda franchise?
  • -

    There are currently three films in the Kung Fu Panda franchise: Kung Fu Panda (2008), Kung Fu Panda 2 (2011), and Kung Fu Panda 3 (2016). A fourth film is currently in development at DreamWorks Animation.

    -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Ad-aware 6.0 Professional REPACK Keygen Serial Key.md b/spaces/1gistliPinn/ChatGPT4/Examples/Ad-aware 6.0 Professional REPACK Keygen Serial Key.md deleted file mode 100644 index a74f4edae195dcef698f4b7b41d687c961f3a423..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Ad-aware 6.0 Professional REPACK Keygen Serial Key.md +++ /dev/null @@ -1,6 +0,0 @@ -

Ad-aware 6.0 Professional Keygen Serial Key


DOWNLOADhttps://imgfil.com/2uy1Ko



-
-If the product continues to prompt you for an activation code you have been unsuccessful. ... Try for Free Buy Now; Nessus Professional is for security pros on the front lines ... So now you are aware of the excellent features provided by Sage 50 ... I called HP and you can't get a real person. slmgr /ad-activation-get-IID (start ... 1fdad05405
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing 2 APK Customize Your Car and Compete Against the Best Drifters in the World.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing 2 APK Customize Your Car and Compete Against the Best Drifters in the World.md deleted file mode 100644 index 5bcef2ad805aa9a93e044fd184fb6f8387ebbbe6..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing 2 APK Customize Your Car and Compete Against the Best Drifters in the World.md +++ /dev/null @@ -1,123 +0,0 @@ - -

CarX Drift Racing 2: The Ultimate Drifting Game for Android

-

If you are a fan of drifting games, you must have heard of CarX Drift Racing 2. This is one of the most popular and realistic drifting games for Android devices. In this article, we will tell you everything you need to know about this game, including its features, how to download and install it, and some frequently asked questions.

-

Introduction

-

What is CarX Drift Racing 2?

-

CarX Drift Racing 2 is a sequel of the original CarX Drift Racing game, which has over 100 million fans around the world. It is a racing game that focuses on drifting, which is a driving technique where the driver intentionally oversteers the car to make it slide sideways. Drifting is not only fun, but also challenging and rewarding, as it requires skill, precision, and timing.

-

carx drift racing 2 latest version apk


Download Filehttps://urlin.us/2uT12l



-

Why should you play CarX Drift Racing 2?

-

There are many reasons why you should play CarX Drift Racing 2, such as:

-
    -
  • It has stunning graphics and sound effects that make you feel like you are in a real drift car.
  • -
  • It has a variety of cars, tracks, and modes to choose from, so you will never get bored.
  • -
  • It has a realistic physics engine that simulates the behavior of different cars, surfaces, and weather conditions.
  • -
  • It has an online mode where you can drift with your friends or other players from around the world.
  • -
  • It has a visual auto tuning feature where you can customize your car's appearance and performance.
  • -
  • It has an XDS mode where you can practice tandem drifting with yourself or other players.
  • -
  • It has a TOP-32 mode where you can compete against the best drifters in the world.
  • -
-

Features of CarX Drift Racing 2

-

Online Rooms

-

This is a new feature that allows you to drift in real time with your friends or other players. You can create or join an online room, pick a location, drift, and earn points. You can also watch other players drift using the drone camera. You can earn valuable rewards for achieving different ranks in the online mode.

-

Visual Auto Tuning

-

This feature allows you to customize your car's appearance and performance. You can replace mirrors, lights, bumpers, rims, and many other parts. You can also create a unique image of your car with body kits, vinyls, stickers, and paint. You can express your creativity and style with this feature.

-

Improved Performance Tuning

-

This feature allows you to adjust your car's performance according to your preferences and needs. You can tune the suspension, springs, tyre pressure, wheel angle, engine, turbo pressure, gearbox, brakes, and differential. You can fine tune your car to achieve the best drifting results.

-

Realistic Racing Physics

-

This feature makes CarX Drift Racing 2 one of the most realistic drifting games on Android. The game uses a physics engine that simulates the behavior of different cars, surfaces, and weather conditions. You can see the smoke, dust, sparks, and tyre tracks that result from your drifting. You can also feel the difference between asphalt, grass, sand, and snow. You can also experience different weather conditions such as rain, fog, and sun. You can enjoy the realistic racing physics of this game.

-

XDS Mode

-

This feature allows you to practice tandem drifting with yourself or other players. Tandem drifting is a technique where two or more cars drift together in a synchronized manner. It is one of the most spectacular and difficult forms of drifting. In XDS mode, you can choose a leader car and a follower car, and try to match the leader's trajectory and angle. You can also switch roles and become the leader or the follower. You can improve your drifting skills and coordination with this feature.

-

TOP-32 Mode

-

This feature allows you to compete against the best drifters in the world. TOP-32 mode is a tournament mode where you have to qualify for the final round by beating 31 other opponents. You have to drift on different tracks and earn points based on your speed, angle, and line. You have to be fast, precise, and consistent to win this mode. You can earn fame and glory by becoming the champion of TOP-32 mode.

-

carx drift racing 2 apk download latest version
-carx drift racing 2 mod apk latest version
-carx drift racing 2 update apk latest version
-carx drift racing 2 online rooms apk latest version
-carx drift racing 2 android game apk latest version
-carx drift racing 2 free download apk latest version
-carx drift racing 2 unlimited money apk latest version
-carx drift racing 2 hack apk latest version
-carx drift racing 2 xds mode apk latest version
-carx drift racing 2 visual tuning apk latest version
-carx drift racing 2 performance tuning apk latest version
-carx drift racing 2 realistic physics apk latest version
-carx drift racing 2 tandem drifting apk latest version
-carx drift racing 2 top-32 mode apk latest version
-carx drift racing 2 new tracks apk latest version
-carx drift racing 2 new cars apk latest version
-carx drift racing 2 new features apk latest version
-carx drift racing 2 best settings apk latest version
-carx drift racing 2 tips and tricks apk latest version
-carx drift racing 2 cheats and codes apk latest version
-carx drift racing 2 gameplay and review apk latest version
-carx drift racing 2 offline and online apk latest version
-carx drift racing 2 multiplayer and singleplayer apk latest version
-carx drift racing 2 custom and stock cars apk latest version
-carx drift racing 2 vinyls and body kits apk latest version
-carx drift racing 2 leaderboards and rankings apk latest version
-carx drift racing 2 rewards and achievements apk latest version
-carx drift racing 2 challenges and missions apk latest version
-carx drift racing 2 events and tournaments apk latest version
-carx drift racing 2 skins and stickers apk latest version
-carx drift racing 2 sounds and music apk latest version
-carx drift racing 2 controls and steering apk latest version
-carx drift racing 2 graphics and animations apk latest version
-carx drift racing 2 bugs and fixes apk latest version
-carx drift racing 2 news and updates apk latest version
-carx drift racing 2 guide and walkthrough apk latest version
-carx drift racing 2 fun and addictive apk latest version
-carx drift racing 2 pro and beginner apk latest version
-carx drift racing 2 premium and free apk latest version
-carx drift racing 2 full and lite apk latest version

-

How to download and install CarX Drift Racing 2 APK OBB?

-

If you want to play CarX Drift Racing 2 on your Android device, you have to download and install the APK OBB files. APK is the application package file that contains the game's code and resources. OBB is the data file that contains the game's graphics and sound files. Here are the steps to download and install CarX Drift Racing 2 APK OBB:

-

Step 1: Download the APK and OBB files from a trusted source

-

You can find many websites that offer CarX Drift Racing 2 APK OBB files for free download. However, not all of them are safe and reliable. Some of them may contain viruses, malware, or fake files that can harm your device or steal your data. Therefore, you have to be careful and choose a trusted source to download the files. One of the best sources is [CarX Drift Racing 2 APK OBB], which provides the latest version of the game with high-quality graphics and sound.

-

Step 2: Enable unknown sources on your device

-

Before you can install the APK file, you have to enable unknown sources on your device. This is a security setting that prevents you from installing apps from sources other than the Google Play Store. To enable unknown sources, go to Settings > Security > Unknown Sources and toggle it on.

-

Step 3: Install the APK file

-

After you have enabled unknown sources, you can install the APK file. To do this, locate the downloaded APK file on your device using a file manager app. Tap on it and follow the instructions on the screen to complete the installation.

-

Step 4: Extract and copy the OBB folder to Android/OBB

-

After you have installed the APK file, you have to extract and copy the OBB folder to Android/OBB on your device's internal storage. To do this, locate the downloaded OBB file on your device using a file manager app. Tap on it and select Extract Here or Extract To depending on your app. You will see a folder named com.carxtech.carxdr2. Copy this folder and paste it in Android/OBB on your device's internal storage.

-

Step 5: Launch the game and enjoy

-

After you have copied the OBB folder, you are ready to launch the game and enjoy it. To do this, go to your app drawer and tap on CarX Drift Racing 2 icon. The game will start and load the data from the OBB folder. You can now drift away with CarX Drift Racing 2.

-

Conclusion

-

CarX Drift Racing 2 is one of the best drifting games for Android devices. It has amazing graphics, realistic physics, online mode, visual auto tuning, XDS mode, TOP-32 mode, and many other features that make it fun and exciting. If you want to play this game, you have to download and install CarX Drift Racing 2 APK OBB files from a trusted source. Follow our guide above to do it easily and safely.

-

FAQs

-
    -
  • Is CarX Drift Racing 2 free?
  • -

    Yes, CarX Drift Racing 2 is free to download and play. However, it contains in app purchases that allow you to buy coins, cars, and other items. You can also watch ads to earn free coins.

    -
  • What are the minimum requirements to play CarX Drift Racing 2?
  • -

    The minimum requirements to play CarX Drift Racing 2 are:

    -
      -
    • Android version 5.0 or higher
    • -
    • 2 GB of RAM or more
    • -
    • 1.5 GB of free storage space or more
    • -
    • A stable internet connection
    • -
    -
  • How can I get more coins in CarX Drift Racing 2?
  • -

    You can get more coins in CarX Drift Racing 2 by:

    -
      -
    • Drifting and earning points in the game modes
    • -
    • Completing daily tasks and achievements
    • -
    • Participating in online rooms and tournaments
    • -
    • Watching ads and videos
    • -
    • Buying them with real money
    • -
    -
  • How can I unlock more cars and tracks in CarX Drift Racing 2?
  • -

    You can unlock more cars and tracks in CarX Drift Racing 2 by:

    -
      -
    • Earning enough coins to buy them
    • -
    • Reaching certain levels and ranks in the game modes
    • -
    • Winning them as rewards or prizes in online rooms and tournaments
    • -
    • Buying them with real money
    • -
    -
  • How can I contact the developers of CarX Drift Racing 2?
  • -

    You can contact the developers of CarX Drift Racing 2 by:

    -
      -
    • Sending an email to support@carx-tech.com
    • -
    • Filling out the feedback form on their website [CarX Technologies]
    • -
    • Following them on their social media accounts [Facebook], [Twitter], [Instagram], [YouTube]
    • -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dolphin Emulator v5.0 APK for 32 Bit Android - The Best Way to Enjoy Retro Games.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dolphin Emulator v5.0 APK for 32 Bit Android - The Best Way to Enjoy Retro Games.md deleted file mode 100644 index 75e9beaf485be840d0cb80f471ba0741a37dd544..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dolphin Emulator v5.0 APK for 32 Bit Android - The Best Way to Enjoy Retro Games.md +++ /dev/null @@ -1,118 +0,0 @@ - -

    Dolphin Emulator: The Ultimate Guide for Android Users

    -

    Do you love playing Nintendo GameCube and Wii games? Do you wish you could play them on your Android device? If yes, then you are in luck. Dolphin Emulator is a free and open-source software that allows you to do just that. In this article, we will show you how to download, install, configure, and use Dolphin Emulator on your Android device. We will also answer some of the frequently asked questions about this amazing app. Let's get started!

    -

    dolphin emulator v5 0 32 bit apk


    Download File ✒ ✒ ✒ https://urlin.us/2uSTlF



    -

    What is Dolphin Emulator and why you should use it

    -

    Dolphin Emulator is a software that emulates the hardware and software of Nintendo GameCube and Wii consoles. It enables you to play games from these consoles on your Android device, as well as other platforms such as Windows, Linux, and macOS. Dolphin Emulator offers many features and benefits, such as:

    -
      -
    • High-resolution graphics: You can enjoy your games in HD quality, up to 1080p or even 4K, depending on your device capabilities.
    • -
    • Save states: You can save and load your game progress at any point, without relying on the in-game save system.
    • -
    • Cheats: You can use various cheat codes and hacks to modify your game experience, such as unlocking hidden items, increasing your health, or skipping levels.
    • -
    • Online multiplayer: You can play online with other Dolphin Emulator users, using the Nintendo Wi-Fi Connection service or the Netplay feature.
    • -
    • Customization: You can customize your emulator settings, such as graphics, audio, controls, enhancements, hacks, and more, to suit your preferences and device specifications.
    • -
    -

    Dolphin Emulator supports both 32-bit and 64-bit Android devices, but the 32-bit version has some limitations and compatibility issues. For example, the 32-bit version cannot run games that require more than 2 GB of RAM, such as The Legend of Zelda: Skyward Sword or Xenoblade Chronicles. The 32-bit version also has lower performance and stability than the 64-bit version. Therefore, if you have a 64-bit device, we recommend you to use the 64-bit version of Dolphin Emulator for a better gaming experience.

    -

    How to download and install Dolphin Emulator on your Android device

    -

    Downloading and installing Dolphin Emulator on your Android device is very easy and straightforward. You can follow these steps:

    -
      -
    1. You can download the latest version of Dolphin Emulator from the official website or from APKCombo. The official website has both the 32-bit and the 64-bit versions of the app, while APKCombo only has the 32-bit version. Make sure you download the correct version for your device.
    2. -
    3. Before you install the APK file, you need to enable unknown sources in your device settings. To do this, go to Settings > Security > Unknown sources and toggle it on. This will allow you to install apps from sources other than the Google Play Store.
    4. -
    5. Once you have enabled unknown sources, you can install the APK file by tapping on it and following the instructions on the screen. You may need to grant some permissions to the app during the installation process.
    6. -
    7. You also need to have enough storage space on your device or SD card for the emulator and the games. The emulator itself takes about 15 MB of space, while the games can vary from a few hundred MB to several GB. You can check your available storage space by going to Settings > Storage.
    8. -
    -

    Congratulations! You have successfully installed Dolphin Emulator on your Android device. Now you are ready to configure it and play your favorite games.

    -

    dolphin emulator 5.0 download for android 32 bit
    -dolphin emulator 5.0 apk for 32 bit devices
    -dolphin emulator 5.0 32 bit android free
    -dolphin emulator 5.0 apk 32 bit no verification
    -dolphin emulator 5.0 apk for android 32 bit offline
    -dolphin emulator 5.0 apk for 32 bit phones
    -dolphin emulator 5.0 apk for android 32 bit latest version
    -dolphin emulator 5.0 apk for 32 bit mobiles
    -dolphin emulator 5.0 apk for android 32 bit mod
    -dolphin emulator 5.0 apk for 32 bit tablet
    -dolphin emulator 5.0 apk for android 32 bit online
    -dolphin emulator 5.0 apk for 32 bit windows
    -dolphin emulator 5.0 apk for android 32 bit x86
    -dolphin emulator 5.0 apk for 32 bit zip
    -dolphin emulator 5.0 apk for android 32 bit youtube
    -dolphin emulator v5.0 apk download for android (32-bit)
    -dolphin emulator v5.0 apk free download for android (32-bit)
    -dolphin emulator v5.0 apk latest version download for android (32-bit)
    -dolphin emulator v5.0 apk mod download for android (32-bit)
    -dolphin emulator v5.0 apk offline download for android (32-bit)
    -dolphin emulator v5.0 apk online download for android (32-bit)
    -dolphin emulator v5.0 apk update download for android (32-bit)
    -dolphin emulator v5.0 apk working download for android (32-bit)
    -dolphin emulator v5.0 apk x86 download for android (32-bit)
    -dolphin emulator v5.0 apk youtube download for android (32-bit)
    -how to install dolphin emulator v5.0 on android (32-bit)
    -how to use dolphin emulator v5.0 on android (32-bit)
    -how to play games on dolphin emulator v5.0 on android (32-bit)
    -how to fix lag on dolphin emulator v5.0 on android (32-bit)
    -how to configure dolphin emulator v5.0 on android (32-bit)
    -best settings for dolphin emulator v5.0 on android (32-bit)
    -best games for dolphin emulator v5.0 on android (32-bit)
    -best controller for dolphin emulator v5.0 on android (32-bit)
    -best roms for dolphin emulator v5.0 on android (32-bit)
    -best cheats for dolphin emulator v5.0 on android (32-bit)
    -is there a dolphin emulator v5.0 for android (32-bit)
    -is dolphin emulator v5.0 compatible with android (32-bit)
    -is dolphin emulator v5.0 safe for android (32-bit)
    -is dolphin emulator v5.0 legal for android (32-bit)
    -is dolphin emulator v5.0 worth it for android (32-bit)

    -

    How to configure Dolphin Emulator settings for optimal performance

    -

    Dolphin Emulator has a lot of settings that you can adjust to optimize its performance and compatibility with different games and devices. You can access the settings menu by tapping on the three dots icon in the top right corner of the emulator screen. You will see various tabs, such as graphics, audio, controls, enhancements, hacks, and more. You can tap on each tab to see and change the settings related to it. You can also create custom profiles for different games and devices by tapping on the plus icon in the top right corner of the settings menu.

    -

    Some of the settings that you should pay attention to are:

    -
      -
    • Graphics backend: This determines how Dolphin Emulator renders the graphics of the games. There are three options: OpenGL, Vulkan, and Software Renderer. OpenGL is the default option and works well with most games and devices. Vulkan is a newer option that may offer better performance and compatibility with some games, but it may also cause some issues with others. Software Renderer is a slow option that does not use hardware acceleration and should only be used for debugging purposes.
    • -
    • Aspect ratio: This determines how Dolphin Emulator displays the games on your screen. There are four options: Auto, Force 16:9, Force 4:3, and Stretch to Window. Auto is the default option and preserves the original aspect ratio of the game. Force 16:9 and Force 4:3 force the game to fit a widescreen or a standard screen respectively, which may result in some cropping or stretching of the image. Stretch to Window stretches the game to fill your entire screen, which may distort the image quality.
    • -
    • Internal resolution: This determines how Dolphin Emulator scales up or down the resolution of the game. There are several options, ranging from 1x (native) to 8x (8 times the native resolution). The higher the resolution, the better the image quality, but also the higher the performance requirements. The default option is 1x, which matches the original resolution of the game. You can increase the resolution if your device can handle it, but you may experience some slowdowns or crashes. For 32-bit devices, we recommend not going beyond 2x resolution.
    • -
    • Anti-aliasing: This determines how Dolphin Emulator smooths out the edges of the game graphics. There are several options, ranging from None to 8x MSAA (multisample anti-aliasing). The higher the anti-aliasing, the smoother the edges, but also the higher the performance requirements. The default option is None, which means no anti-aliasing is applied. You can enable anti-aliasing if your device can handle it, but you may experience some slowdowns or crashes. For 32-bit devices, we recommend not enabling anti-aliasing.
    • -
    • Anisotropic filtering: This determines how Dolphin Emulator enhances the quality of the game textures. There are several options, ranging from 1x to 16x. The higher the anisotropic filtering, the sharper the textures, but also the higher the performance requirements. The default option is 1x, which means no anisotropic filtering is applied. You can increase the anisotropic filtering if your device can handle it, but you may experience some slowdowns or crashes. For 32-bit devices, we recommend not going beyond 4x anisotropic filtering.
    • -
    • Scaled EFB copy: This determines how Dolphin Emulator handles some special effects in the game, such as heat waves or water reflections. If this option is on, Dolphin Emulator will scale these effects according to your internal resolution setting. If this option is off, Dolphin Emulator will use the native resolution of the game for these effects. The default option is on, which means scaled EFB copy is applied. You can turn this option off if you want to improve your performance or compatibility with some games, but you may lose some visual quality.
    • -
    • Skip EFB access from CPU: This determines how Dolphin Emulator handles some advanced features in the game, such as motion blur or depth of field. If this option is on, Dolphin Emulator will skip these features and improve your performance. If this option is off, Dolphin Emulator will render these features and improve your visual quality. The default option is off, which means EFB access from CPU is not skipped. You can turn this option on if you want to improve your performance or compatibility with some games, but you may lose some visual quality.
    • -
    • Ignore format changes: This determines how Dolphin Emulator handles some color conversions in the game. If this option is on, Dolphin Emulator will ignore these conversions and improve your performance. If this option is off, Dolphin Emulator will perform these conversions and improve your visual quality. The default option is on, which means format changes are ignored. You can turn this option off if you want to improve your visual quality or compatibility with some games, but you may lose some performance.
    • -
    • Store EFB copies to texture only: This determines how Dolphin Emulator handles some memory operations in the game. If this option is on, Dolphin Emulator will store these operations as textures and improve your performance. If this option is off, Dolphin Emulator will store these operations as RAM and improve your compatibility with some games. The default option is on, which means EFB copies are stored as textures only. You can turn this option off if you want to improve your compatibility with some games, but you may lose some performance.
    • -
    • Texture cache accuracy: This determines how Dolphin Emulator handles some texture updates in the game. There are three options: Low, Medium, and High. The lower the accuracy, the faster the updates, but also the higher the chance of graphical glitches. The higher the accuracy, the slower the updates, but also the lower the chance of graphical glitches. The default option is Low, which means low accuracy texture cache is used. You can increase the accuracy if you want to reduce graphical glitches in some games, but you may lose some performance.
    • -
    • External frame buffer (XFB): This determines how Dolphin Emulator handles some video output in the game. There are two options: Disable and Virtual. If this option is Disable, Dolphin Emulator will bypass the XFB and improve your performance. If this option is Virtual, Dolphin Emulator will emulate the XFB and improve your compatibility with some games. The default option is Disable, which means XFB is disabled. You can enable this option if you want to improve your compatibility with some games, but you may lose some performance.
    • -
    • Fast depth calculation: This determines how Dolphin Emulator handles some depth calculations in the game. If this option is on, Dolphin Emulator will use a faster but less accurate method and improve your performance. If this option is off, Dolphin Emulator will use a slower but more accurate method and improve your visual quality. The default option is on, which means fast depth calculation is used. You can turn this option off if you want to improve your visual quality in some games, but you may lose some performance.
    • -
    • Disable bounding box: This determines how Dolphin Emulator handles some bounding box calculations in the game. If this option is on, Dolphin Emulator will skip these calculations and improve your performance. If this option is off, Dolphin Emulator will perform these calculations and improve your compatibility with some games. The default option is on, which means bounding box is disabled. You can turn this option off if you want to improve your compatibility with some games, but you may lose some performance.
    • -
    -

    These are some of the most important settings that you should consider when configuring Dolphin Emulator for 32-bit devices. However, you may need to experiment with different settings to find the best balance between performance and quality for your device and game. You can also check the Dolphin Wiki for more information and tips on specific games.

    -

    How to load and play your favorite games on Dolphin Emulator

    -

    Now that you have installed and configured Dolphin Emulator on your Android device, you are ready to load and play your favorite games. Here are the steps to do so:

    -
      -
    1. You need to have the game files (ISO or GCM) on your device or SD card. You can get these files from your own GameCube or Wii discs, or from other sources online. However, you should only use game files that you own legally and that match the region of your device.
    2. -
    3. You can browse and select the game files from the emulator menu by tapping on the plus icon in the bottom right corner of the screen. This will open a file explorer where you can navigate to the folder where you stored your game files.
    4. -
    5. You can also scan your device for game files by tapping on the refresh icon in the top right corner of the screen. This will automatically detect and add any game files that are compatible with Dolphin Emulator.
    6. -
    7. Once you have loaded a game, you can start playing by tapping on the play icon in the bottom right corner of the screen. This will launch the game in full-screen mode.
    8. -
    9. You can also pause, save, load, or exit the game by tapping on the menu icon in the top left corner of the screen. This will open a menu where you can access various options, such as:
    10. -
        -
      • Save state: This allows you to save your game progress at any point in a slot. You can have up to 10 slots per game.
      • -
      • Load state: This allows you to load your game progress from a slot.
      • -
      • Exit: This allows you to quit the game and return to the emulator menu.
      • -
      -
    -

    That's it! You can now enjoy playing GameCube and Wii games on your Android device with Dolphin Emulator.

    -

    How to troubleshoot common issues with Dolphin Emulator

    -

    Dolphin Emulator is a complex software that may not work perfectly with every device and game. Some of the common issues that you may encounter with Dolphin Emulator are:

    -
      -
    • The emulator crashes or freezes during gameplay.
    • -
    • The game runs too slow or too fast.
    • -
    • The game graphics are distorted or glitchy.
    • -
    • The game audio is choppy or missing.
    • -
    • The game controls are not responsive or accurate.
    • -
    -

    Some of the possible solutions for these issues are:

    -
      -
    • Update your device software and drivers: Make sure that your device is running on the latest version of Android and that your drivers are up to date. This may fix some compatibility and performance issues with Dolphin Emulator.
    • -
    • Clear your emulator cache and data: Sometimes, your emulator cache and data may get corrupted or outdated, causing some problems with Dolphin Emulator. You can clear them by going to Settings > Apps > Dolphin Emulator > Storage > Clear cache and Clear data. This may reset some of your emulator settings : You can use a controller to play games on Dolphin Emulator, as it will give you more accuracy and comfort than using the touchscreen. You can use a Bluetooth controller, a USB controller, or a Wii remote. You can configure your controller settings by going to Settings > Controls > Configure.
    • -
    • Use cheats and hacks: You can use cheats and hacks to modify your game experience on Dolphin Emulator, such as unlocking hidden items, increasing your health, or skipping levels. You can enable cheats and hacks by going to Settings > Enhancements > Cheats or Settings > Hacks.
    • -
    • Use online multiplayer: You can use online multiplayer to play games with other Dolphin Emulator users, using the Nintendo Wi-Fi Connection service or the Netplay feature. You can enable online multiplayer by going to Settings > Online > Wi-Fi or Settings > Online > Netplay.
    • -
    -

    These are some of the tips that you can use to improve your gaming experience on Dolphin Emulator. However, you may find other ways to enhance your gaming experience by experimenting with different settings and features.

    -

    -

    This is the end of the article. I hope you enjoyed reading it and learned something new. Thank you for your attention and have a nice day!

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/AetherSX2 for Windows and Android - The Best Way to Relive Your Favorite PS2 Games in 2023 - Download Here.md b/spaces/1phancelerku/anime-remove-background/AetherSX2 for Windows and Android - The Best Way to Relive Your Favorite PS2 Games in 2023 - Download Here.md deleted file mode 100644 index ad3bc66e3b3954f91f5c477e6011330e2de0d47d..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/AetherSX2 for Windows and Android - The Best Way to Relive Your Favorite PS2 Games in 2023 - Download Here.md +++ /dev/null @@ -1,159 +0,0 @@ - -

    AetherSX2 2023 Download: How to Play PS2 Games on Your Android Device

    -

    Do you miss playing your favorite PlayStation 2 games? Do you wish you could relive the nostalgia of playing classic PS2 titles on your Android device? If yes, then you are in luck. There is a new PS2 emulator for Android that lets you play PS2 games on your smartphone with ease. It is called AetherSX2, and it is the best PS2 emulator for Android by far.

    -

    In this article, we will tell you everything you need to know about AetherSX2, including what it is, how it works, what features and benefits it offers, how to download and install it on your Android device, and how to play PS2 games on your Android device using AetherSX2. By the end of this article, you will be able to enjoy playing PS2 games on your smartphone with AetherSX2.

    -

    aethersx2 2023 download


    Download Ziphttps://jinyurl.com/2uNLLs



    -

    What is AetherSX2?

    -

    AetherSX2 is a PS2 emulator for Android that allows you to play PS2 games on your smartphone. An emulator is a software that mimics the hardware and software of another device, in this case, a PS2 console. By using an emulator, you can run games and applications that are designed for another platform, such as a PS2 game on an Android device.

    -

    A brief history of AetherSX2

    -

    AetherSX2 is the brainchild of one person, a developer who goes by the handle Tahlreth. The developer actually used the PCSX2 emulator as the basis for their Android-based emulator. PCSX2 is a long-running, well-established emulator on PC, so it makes sense to take advantage of the work that has gone into this program.

    -

    aethersx2 ps2 emulator for android download 2023 (search volume: 10-100, competition: low)
    -how to download and install aethersx2 on windows pc 2023 (search volume: 10-100, competition: low)
    -aethersx2 latest update fastest settings for low end devices 2023 (search volume: 10-100, competition: low)
    -aethersx2 best ps2 emulator for android review 2023 (search volume: 10-100, competition: low)
    -aethersx2 vs damonps2 which is better in 2023 (search volume: 10-100, competition: low)
    -aethersx2 compatible games list and performance 2023 (search volume: 10-100, competition: low)
    -aethersx2 bios file download and setup guide 2023 (search volume: 10-100, competition: low)
    -aethersx2 cheats codes and hacks for ps2 games 2023 (search volume: 10-100, competition: low)
    -aethersx2 controller support and configuration 2023 (search volume: 10-100, competition: low)
    -aethersx2 graphics enhancement and resolution options 2023 (search volume: 10-100, competition: low)
    -aethersx2 save and load states feature 2023 (search volume: 10-100, competition: low)
    -aethersx2 multiplayer and online mode 2023 (search volume: 10-100, competition: low)
    -aethersx2 custom shaders and filters 2023 (search volume: 10-100, competition: low)
    -aethersx2 sound quality and audio settings 2023 (search volume: 10-100, competition: low)
    -aethersx2 system requirements and compatibility 2023 (search volume: 10-100, competition: low)
    -aethersx2 tips and tricks for better performance 2023 (search volume: 10-100, competition: low)
    -aethersx2 troubleshooting and common errors 2023 (search volume: 10-100, competition: low)
    -aethersx2 development history and future plans 2023 (search volume: <10, competition: low)
    -aethersx2 donation and support options 2023 (search volume: <10, competition: low)
    -aethersx2 fan community and discord server 2023 (search volume: <10, competition: low)

    -

    The developer of AetherSX2 got the green light to use the PCSX2 code from the developers themselves and is licensed under the LGPL license — unlike the DamonPS2 developers, who stole the code and didn’t follow the requisite license. In any event, the emulator was initially released in December 2021 via the Google Play Store as an open beta. You can also sideload the APK via the AetherSX2 website. We’d recommend you steer clear of any other websites claiming to offer the APK.

    -

    The AetherSX2 emulator is a major step forward for emulation on Android devices. It’s also worth noting that the app is free to download and use, so don’t be duped by anyone saying you need to pay for it. This is in contrast to the DamonPS2 emulator, which is filled to the brim with ads and charges for a Pro version limited to two devices.

    -

    Features and benefits of AetherSX2

    -

    AetherSX2 is not just another PS2 emulator for Android. It is a powerful and feature-rich emulator that offers many advantages over other emulators. Here are some of the features and benefits of AetherSX2 that make it stand out from the crowd.

    -

    High compatibility

    -

    AetherSX2 boasts high compatibility with a wide range of PS2 games

    from various genres and regions. You can play popular games like God of War, Final Fantasy, Grand Theft Auto, Metal Gear Solid, Kingdom Hearts, and many more on your Android device with AetherSX2. You can also play games from different regions, such as Japan, Europe, and North America, with the appropriate BIOS files. AetherSX2 supports both ISO and CSO formats for PS2 games.

    -

    Enhanced graphics

    -

    AetherSX2 does not just emulate the PS2 graphics faithfully, but also enhances them to make them look better on your Android device. You can adjust the resolution, aspect ratio, anti-aliasing, texture filtering, and other graphical settings to improve the visual quality of the games. You can also use shaders to add effects like scanlines, CRT, bloom, and more to the games. AetherSX2 supports both Vulkan and OpenGL renderers for graphics.

    -

    Save and load states

    -

    AetherSX2 allows you to save and load your game progress at any point with the save and load state feature. This is very convenient for playing on your Android device, as you can resume your game from where you left off without having to go through the in-game save system. You can also use this feature to skip difficult or boring parts of the game by loading a state from another source. AetherSX2 supports up to 10 save slots for each game.

    -

    Controller support

    -

    AetherSX2 lets you play PS2 games on your Android device with a variety of controllers. You can use the touchscreen controls that are customizable and responsive, or you can use an external controller that connects via Bluetooth or USB. AetherSX2 supports many popular controllers, such as Xbox One, PS4, PS3, Switch Pro, and more. You can also map the buttons and analog sticks to your liking.

    -

    Fast and smooth performance

    -

    AetherSX2 delivers fast and smooth performance for PS2 games on your Android device. You can play most games at full speed without any lag or stuttering. You can also tweak the performance settings to optimize the emulator for your device. You can adjust the frame rate, frame skip, speed hack, audio latency, and other options to improve the performance of the emulator. AetherSX2 runs well on most modern Android devices with decent hardware.

    -

    How to download and install AetherSX2 on your Android device

    -

    Now that you know what AetherSX2 is and what it can do, you might be wondering how to download and install it on your Android device. Well, it is very easy and simple to do so. Just follow these steps and you will be ready to play PS2 games on your smartphone in no time.

    -

    Step 1: Check system requirements

    -

    Before you download and install AetherSX2 on your Android device, you need to make sure that your device meets the minimum system requirements for running the emulator. Here are the system requirements for AetherSX2:

    -
      -
    • Android version: 5.0 or higher
    • -
    • CPU: Quad-core or higher (preferably with ARMv8 support)
    • -
    • GPU: Adreno 5xx or higher (or equivalent)
    • -
    • RAM: 3 GB or higher
    • -
    • Storage: At least 1 GB of free space (plus more for PS2 games)
    • -
    -

    If your device meets these requirements, then you can proceed to the next step. If not, then you might want to upgrade your device or look for another emulator.

    -

    Step 2: Download AetherSX2 APK from the official website or Google Play Store

    -

    The next step is to download the AetherSX2 APK file from a trusted source. There are two ways to do this: either from the official website or from the Google Play Store.

    -

    The official website of AetherSX2 is https://aethersx.com/. Here you can find the latest version of the emulator as well as other information and updates about it. You can download the APK file directly from the website by clicking on the "Download" button on the homepage.

    -

    The Google Play Store is another option for downloading the AetherSX2 APK file. The Google Play Store is a safe and convenient way to download apps for your Android device. You can find the AetherSX2 app on the Google Play Store by searching for it or by following this link: https://play.google.com/store/apps/details?id=com.aethersx.aethersx&hl=en_US&gl=US. You can download the app by tapping on the "Install" button on the app page.

    -

    Either way, you will get the same APK file that is about 30 MB in size. Make sure you have enough space on your device before downloading it.

    -

    Step 3: Install AetherSX2 APK on your Android device

    -

    Once you have downloaded the AetherSX2 APK file, you need to install it on your Android device. To do this, you need to enable the installation of apps from unknown sources on your device. This is a security feature that prevents malicious apps from being installed on your device without your permission. Here is how to enable it:

    -
      -
    • Go to the Settings app on your device.
    • -
    • Tap on Security or Privacy (depending on your device).
    • -
    • Find and enable the option that says "Unknown sources" or "Install unknown apps" (depending on your device).
    • -
    • Confirm your choice by tapping OK or Allow (depending on your device).
    • -
    -

    Now you can install the AetherSX2 APK file by following these steps:

    -
      -
    • Locate the AetherSX2 APK file on your device using a file manager app or the Downloads app.
    • -
    • Tap on the AetherSX2 APK file to start the installation process.
    • -
    • Follow the instructions on the screen to complete the installation.
    • -
    • Wait for the installation to finish and then tap Open or Done (depending on your device).
    • -
    -

    Congratulations, you have successfully installed AetherSX2 on your Android device. You can now launch the app from your app drawer or home screen.

    -

    Step 4: Load PS2 games on your Android device

    -

    The next step is to load PS2 games on your Android device. You can do this by either transferring PS2 games from your PC or downloading PS2 games from the internet. Here is how to do both:

    -

    Transfer PS2 games from your PC

    -

    If you have PS2 games on your PC, you can transfer them to your Android device using a USB cable or a wireless method. Here is how to do it using a USB cable:

    -
      -
    • Connect your Android device to your PC using a USB cable.
    • -
    • Select the option that says "File Transfer" or "MTP" (depending on your device) on your Android device.
    • -
    • Open the File Explorer or My Computer app on your PC and find your Android device.
    • -
    • Create a folder named "PS2" on your Android device's internal storage or SD card (depending on where you want to store the games).
    • -
    • Copy and paste the PS2 games from your PC to the PS2 folder on your Android device. The games should be in ISO or CSO format.
    • -
    • Eject your Android device from your PC and disconnect the USB cable.
    • -
    -

    If you want to use a wireless method, you can use an app like AirDroid or ShareIt to transfer files between your PC and Android device over Wi-Fi. Just follow the instructions of the app you choose to use.

    -

    Download PS2 games from the internet

    -

    If you don't have PS2 games on your PC, you can download them from the internet. However, you need to be careful about where you download them from, as some websites may contain viruses, malware, or fake files. You also need to make sure that you own the original PS2 games that you download, as downloading pirated games is illegal and unethical.

    -

    We recommend that you use reputable and trusted websites that offer PS2 games for download, such as Emuparadise, CoolROM, RomHustler, and The ISO Zone. These websites have a large collection of PS2 games from various regions and genres that you can download for free. Here is how to download PS2 games from these websites:

    -
      -
    • Go to the website of your choice using a web browser app on your Android device.
    • -
    • Search for the PS2 game that you want to download using the search bar or browse through the categories.
    • -
    • Select the game that you want to download and tap on the download link or button.
    • -
    • Wait for the download to finish and then locate the downloaded file using a file manager app or the Downloads app.
    • -
    -

    Note: Some websites may require you to extract the downloaded file using an app like ZArchiver or RAR before you can play it. If this is the case, just follow these steps:

    -
      -
    • Select the downloaded file and tap on Extract here or Extract to (depending on the app).
    • -
    • Wait for the extraction to finish and then delete the original file to save space.
    • -
    • Find the extracted file, which should be in ISO or CSO format, and move it to the PS2 folder on your Android device.
    • -
    -

    Now you have PS2 games on your Android device that you can play with AetherSX2.

    -

    Step 5: Configure settings and controls according to your preference

    -

    The last step before you can play PS2 games on your Android device with AetherSX2 is to configure the settings and controls according to your preference. AetherSX2 has many options that you can customize to enhance your gaming experience. Here are some of the settings and controls that you can configure:

    -

    Settings

    -

    To access the settings menu, tap on the three-dot icon on the top right corner of the app and select Settings. Here you can find various options that affect the performance, graphics, audio, and input of the emulator. Some of the options that you can adjust are:

    -
      -
    • Frame rate: You can set the frame rate limit for the emulator, which affects the speed of the games. You can choose between 30 FPS, 60 FPS, or Unlimited.
    • -
    • Frame skip: You can enable or disable frame skipping, which is a technique that skips rendering some frames to improve performance. You can also set the frame skip value from 0 to 9.
    • -
    • Speed hack: You can enable or disable speed hack, which is a feature that boosts the speed of the emulator by reducing the CPU load. You can also set the speed hack value from 0 to 9.
    • -
    • Audio latency: You can set the audio latency for the emulator, which affects the synchronization of the sound and the video. You can choose between Low, Medium, or High.
    • -
    • Graphics renderer: You can choose between Vulkan or OpenGL as the graphics renderer for the emulator, which affects the quality and compatibility of the graphics. Vulkan is recommended for better performance and compatibility.
    • -
    • Resolution: You can set the resolution for the emulator, which affects the sharpness and clarity of the graphics. You can choose between Native (480x272), 2x Native (960x544), 3x Native (1440x816), or 4x Native (1920x1088).
    • -
    • Aspect ratio: You can set the aspect ratio for the emulator, which affects how the games are displayed on your screen. You can choose between Auto (based on game), 4:3 (standard), or 16:9 (widescreen).
    • -
    • Anti-aliasing: You can enable or disable anti-aliasing, which is a technique that smooths out jagged edges in the graphics. You can also set the anti-aliasing level from 2x to 16x.
    • -
    • Texture filtering: You can enable or disable texture filtering, which is a technique that improves the quality of textures in the graphics. You can also set the texture filtering level from Bilinear to Anisotropic 16x.
    • -
    • Shaders: You can enable or disable shaders, which are effects that add visual enhancements to the graphics. You can also choose from a variety of shaders, such as Scanlines, CRT, Bloom, and more.
    • -
    -

    You can experiment with different settings and see what works best for you and your device. You can also reset the settings to default by tapping on the Reset button at the bottom of the menu.

    -

    Controls

    -

    To access the controls menu, tap on the three-dot icon on the top right corner of the app and select Controls. Here you can find various options that affect the input and layout of the emulator. Some of the options that you can adjust are:

    -
      -
    • Touchscreen controls: You can enable or disable the touchscreen controls, which are virtual buttons and analog sticks that appear on your screen. You can also customize the size, position, opacity, and vibration of the touchscreen controls.
    • -
    • External controller: You can enable or disable the external controller, which is a physical controller that connects to your device via Bluetooth or USB. You can also map the buttons and analog sticks of the external controller to the PS2 controller.
    • -
    • Accelerometer: You can enable or disable the accelerometer, which is a sensor that detects the tilt and motion of your device. You can also map the accelerometer to the PS2 controller.
    • -
    • Gyroscope: You can enable or disable the gyroscope, which is a sensor that detects the orientation and rotation of your device. You can also map the gyroscope to the PS2 controller.
    • -
    -

    You can experiment with different controls and see what works best for you and your device. You can also reset the controls to default by tapping on the Reset button at the bottom of the menu.

    -

    How to play PS2 games on your Android device using AetherSX2

    -

    Now that you have downloaded and installed AetherSX2 on your Android device, loaded PS2 games on your device, and configured the settings and controls according to your preference, you are ready to play PS2 games on your smartphone with AetherSX2. Here is how to do it:

    -

    Choose a game from the game list or browse for a game file

    -

    When you launch AetherSX2, you will see a game list that shows all the PS2 games that you have on your device. You can scroll through the game list and tap on any game that you want to play. The game will start loading automatically.

    -

    If you don't see the game that you want to play on the game list, you can browse for it manually by tapping on the folder icon on the top left corner of the app. This will open a file browser that lets you navigate through your device's storage. You can find and select any PS2 game file that you have on your device in ISO or CSO format. The game will start loading automatically.

    -

    Select a graphics renderer (Vulkan or OpenGL)

    -

    Before the game starts, you will be asked to select a graphics renderer for the emulator. You can choose between Vulkan or OpenGL as the graphics renderer. Vulkan is recommended for better performance and compatibility, while OpenGL is recommended for older devices or games that have issues with Vulkan. You can change this option later in the settings menu if you want.

    -

    Enjoy playing PS2 games on your Android device with AetherSX2

    -

    After selecting a graphics renderer, the game will start running on your Android device with AetherSX2. You can use the touchscreen controls or the external controller to play the game as you would on a PS2 console. You can also access the emulator menu by tapping on the three-dot icon on the top right corner of the app. Here you can save and load states, change settings and controls, pause and resume the game, and exit the game.

    -

    That's it. You can now enjoy playing PS2 games on your Android device with AetherSX2. You can play as many games as you want, as long as you have enough space on your device. You can also switch between different games by going back to the game list or the file browser.

    -

    Conclusion

    -

    AetherSX2 is a PS2 emulator for Android that lets you play PS2 games on your smartphone with ease. It is a powerful and feature-rich emulator that offers high compatibility, enhanced graphics, save and load states, controller support, fast and smooth performance, and more. It is also free to download and use, unlike some other emulators that charge money or show ads.

    -

    In this article, we have shown you how to download and install AetherSX2 on your Android device, how to load PS2 games on your device, how to configure settings and controls according to your preference, and how to play PS2 games on your device using AetherSX2. By following these steps, you will be able to enjoy playing PS2 games on your smartphone with AetherSX2.

    -

    We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!

    -

    FAQs

    -

    Here are some frequently asked questions about AetherSX2 and PS2 emulation on Android.

    -

    Q: Is AetherSX2 legal?

    -

    A: AetherSX2 is legal as long as you use it for personal and non-commercial purposes. You also need to own the original PS2 games that you play with AetherSX2, as downloading pirated games is illegal and unethical.

    -

    Q: Is AetherSX2 safe?

    -

    A: AetherSX2 is safe as long as you download it from the official website or the Google Play Store. You also need to be careful about where you download PS2 games from, as some websites may contain viruses, malware, or fake files.

    -

    Q: How can I update AetherSX2?

    -

    A: You can update AetherSX2 by downloading the latest version of the APK file from the official website or the Google Play Store. You can also enable automatic updates for AetherSX2 on the Google Play Store by tapping on the three-dot icon on the app page and selecting Enable auto-update.

    -

    Q: How can I report bugs or issues with AetherSX2?

    -

    A: You can report bugs or issues with AetherSX2 by contacting the developer via email at aethersx@gmail.com. You can also join the official Discord server of AetherSX2 at https://discord.gg/6J9f8wM. Here you can chat with other users and get support from the developer and moderators.

    -

    Q: How can I support the development of AetherSX2?

    -

    A: You can support the development of AetherSX2 by donating to the developer via PayPal at https://www.paypal.me/aethersx. You can also share your feedback and suggestions with the developer via email or Discord. You can also rate and review AetherSX2 on the Google Play Store and spread the word about it to your friends and family.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Compress PDFs Online for Free Download Smaller PDF Files in Seconds.md b/spaces/1phancelerku/anime-remove-background/Compress PDFs Online for Free Download Smaller PDF Files in Seconds.md deleted file mode 100644 index 768eca16718854da5aa78e9bcd90a94223558767..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Compress PDFs Online for Free Download Smaller PDF Files in Seconds.md +++ /dev/null @@ -1,208 +0,0 @@ - -

    How to Download a 1 MB PDF File in Minutes

    -

    PDF files are one of the most popular and versatile document formats in the digital world. They can contain text, images, graphics, links, forms, annotations, and more. They can also preserve the layout and appearance of your document across different devices and platforms.

    -

    download 1 mb pdf file


    Download File === https://jinyurl.com/2uNQZJ



    -

    But sometimes, you might need to download a small PDF file that is only 1 MB or less in size. Maybe you have a limited bandwidth or storage space on your device. Maybe you want to save time and data when downloading a document. Maybe you need to send or receive a document via email or messaging app that has a file size limit.

    -

    Whatever your reason is, downloading a 1 MB PDF file is not as hard as you might think. In this article, we will show you how to download a 1 MB PDF file from the internet, how to compress a larger PDF file to 1 MB or less, and how to open and view a 1 MB PDF file on your device.

    -

    What is a PDF File?

    -

    PDF stands for Portable Document Format. It is a file format that was created by Adobe in 1993 to enable users to share and print documents without losing the original formatting. PDF files can be opened by various software and apps, such as Adobe Acrobat Reader, Google Chrome, Microsoft Edge, and more.

    -

    Some of the benefits of PDF files over other formats are:

    -
      -
    • They are compatible with different operating systems and devices
    • -
    • They can protect the content and integrity of your document with encryption and passwords
    • -
    • They can compress large amounts of data without compromising the quality
    • -
    • They can support interactive features such as hyperlinks, bookmarks, annotations, and forms
    • -
    • They can be easily converted to or from other formats such as Word, Excel, PowerPoint, JPG, PNG, and more
    • -
    -

    Why Do You Need to Download a 1 MB PDF File?

    -

    There are many scenarios where you might need to download a small PDF file that is only 1 MB or less in size. For example:

    -

    How to download a 1 mb pdf file online
    -Download 1 mb pdf file from email
    -Best tools to download 1 mb pdf file fast
    -Download 1 mb pdf file without losing quality
    -Download 1 mb pdf file on mobile device
    -Download 1 mb pdf file from Google Drive
    -Download 1 mb pdf file from Dropbox
    -Download 1 mb pdf file from OneDrive
    -Download 1 mb pdf file from iCloud
    -Download 1 mb pdf file from SharePoint
    -Download 1 mb pdf file from Adobe Acrobat
    -Download 1 mb pdf file from Smallpdf
    -Download 1 mb pdf file from PDF Compressor
    -Download 1 mb pdf file from PDF Candy
    -Download 1 mb pdf file from PDF2Go
    -Download 1 mb pdf file from Soda PDF
    -Download 1 mb pdf file from iLovePDF
    -Download 1 mb pdf file from Sejda
    -Download 1 mb pdf file from PDF24 Tools
    -Download 1 mb pdf file from PDFescape
    -Download 1 mb pdf file from DocuPub
    -Download 1 mb pdf file from Free PDF Compressor
    -Download 1 mb pdf file from PDF Resizer
    -Download 1 mb pdf file from PDFelement
    -Download 1 mb pdf file from Nitro PDF
    -Download 1 mb pdf file from Foxit Software
    -Download 1 mb pdf file from Wondershare PDF Converter Pro
    -Download 1 mb pdf file from Zamzar
    -Download 1 mb pdf file from Online2PDF
    -Download 1 mb pdf file from Convertio
    -Download 1 mb pdf file from CloudConvert
    -Download 1 mb pdf file from Apowersoft PDF Converter
    -Download 1 mb pdf file from LightPDF
    -Download 1 mb pdf file from EasePDF
    -Download 1 mb pdf file from CleverPDF
    -Download 1 mb pdf file from Hipdf
    -Download 1 mb pdf file from AvePDF
    -Download 1 mb pdf file from PDFChef by Movavi
    -Download 1 mb pdf file from PDF4me
    -Download 1 mb pdf file from DocFly by FormSwift
    -Download 1 mb pdf file with Chrome extension
    -Download 1 mb pdf file with Firefox add-on
    -Download 1 mb pdf file with Safari plugin
    -Download 1 mb pdf file with Edge extension
    -Why download a 1 mb pdf file instead of a larger one?
    -Benefits of downloading a 1 mb pdf file for storage and sharing purposes.
    -Challenges of downloading a 1 mb pdf file on slow internet connection.
    -Tips and tricks to download a 1 mb pdf file securely and safely.
    -How to open and view a downloaded 1 mb pdf file on different platforms.
    -How to edit and modify a downloaded 1 mb pdf file with various software.

    -
      -
    • You want to download a short article, report, brochure, flyer, or resume that is available online as a PDF file
    • -
    • You want to download a sample or preview of a longer document or book that is offered as a PDF file
    • -
    • You want to download a form or application that you need to fill out and submit as a PDF file
    • -
    • You want to download a certificate, receipt, invoice, or ticket that is issued as a PDF file
    • -
    • You want to download a coupon, voucher, or discount code that is provided as a PDF file
    • -
    -

    However, downloading a small PDF file might not always be easy or convenient. Sometimes, you might encounter some challenges or limitations when trying to download a large PDF file. For example:

    -
      -
    • You have a slow or unstable internet connection that makes downloading large files take too long or fail
    • -
    • You have a limited data plan or quota that makes downloading large files consume too much data or incur extra charges
    • -
    • You have a low storage space or memory on your device that makes downloading large files impossible or cause errors
    • -
    • You have a strict firewall or antivirus software that blocks or restricts downloading large files from unknown sources
    • -
    • You have a file size limit or restriction on your email or messaging app that prevents you from sending or receiving large files as attachments
    • -
    -

    How to Download a 1 MB PDF File from the Internet

    -

    If you need to download a 1 MB PDF file from the internet, you need to find and access a 1 MB PDF file online first. There are many sources or websites that offer free or low-cost PDF files for various purposes and topics. Some of them are:

    -
      -
    • PDF Drive: A free online library that has over 90 million PDF files for free download
    • -
    • PDF Books World: A free online platform that has thousands of PDF books for free download
    • -
    • PDF Candy: A free online tool that has hundreds of PDF templates for free download
    • -
    • PDF Zone: A free online resource that has dozens of PDF guides and tutorials for free download
    • -
    • PDF Archive: A free online archive that has millions of PDF documents for free download
    • -

    Once you find a 1 MB PDF file that you want to download, you need to download and save it to your device. The steps may vary depending on the source or website, but generally, they are:

    -
      -
    1. Click on the PDF file link or icon to open it in your browser or software
    2. -
    3. Look for a download button or option on the page or toolbar
    4. -
    5. Click on the download button or option and choose a location or folder on your device where you want to save the PDF file
    6. -
    7. Wait for the download to complete and check if the PDF file is successfully saved on your device
    8. -
    -

    If you encounter any problems or errors when downloading a 1 MB PDF file, you can try some of these solutions:

    -
      -
    • Refresh the page or reload the PDF file
    • -
    • Check your internet connection and speed
    • -
    • Clear your browser cache and cookies
    • -
    • Disable or adjust your firewall or antivirus settings
    • -
    • Use a different browser or software
    • -
    • Contact the source or website for support or feedback
    • -
    -

    How to Compress a Larger PDF File to 1 MB or Less

    -

    Sometimes, you might not be able to find a 1 MB PDF file that suits your needs. You might have a larger PDF file that you want to download, but it exceeds your bandwidth, storage, or file size limit. In that case, you might want to compress a larger PDF file to a smaller size.

    -

    Compressing a PDF file means reducing its file size by removing or optimizing some of its elements, such as images, fonts, metadata, and more. Compressing a PDF file can help you save time, data, and space when downloading, uploading, sending, or storing it.

    -

    There are many tools or services that can help you compress PDF files online for free. Some of them are:

    -
      -
    • Smallpdf: A free online tool that can compress PDF files up to 80% without losing quality
    • -
    • iLovePDF: A free online tool that can compress PDF files up to 70% without losing quality
    • -
    • PDF Compressor: A free online tool that can compress PDF files up to 90% without losing quality
    • -
    • PDF2Go: A free online tool that can compress PDF files up to 50% without losing quality
    • -
    • Soda PDF: A free online tool that can compress PDF files up to 75% without losing quality
    • -

    To use one of these tools or services to compress your PDF file, you need to follow these steps:

    -
      -
    1. Go to the website or app of the tool or service that you want to use
    2. -
    3. Upload your PDF file from your device or cloud storage
    4. -
    5. Select the compression level or quality that you want for your PDF file
    6. -
    7. Wait for the tool or service to compress your PDF file
    8. -
    9. Download and save the compressed PDF file to your device or cloud storage
    10. -
    -

    If you encounter any problems or errors when compressing your PDF file, you can try some of these solutions:

    -
      -
    • Check the file size and format of your PDF file
    • -
    • Check the compression level and quality of your PDF file
    • -
    • Check the compatibility and security of the tool or service that you use
    • -
    • Try a different tool or service
    • -
    • Contact the tool or service provider for support or feedback
    • -
    -

    How to Open and View a 1 MB PDF File on Your Device

    -

    After you download or compress a 1 MB PDF file, you need to open and view it on your device. You can use various software or apps that can help you open and view PDF files. Some of them are:

    -
      -
    • Adobe Acrobat Reader: A free software that can open and view PDF files on Windows, Mac, Android, and iOS devices
    • -
    • Google Chrome: A free web browser that can open and view PDF files on Windows, Mac, Linux, Android, and iOS devices
    • -
    • Microsoft Edge: A free web browser that can open and view PDF files on Windows, Mac, Android, and iOS devices
    • -
    • Foxit Reader: A free software that can open and view PDF files on Windows, Mac, Linux, Android, and iOS devices
    • -
    • PDF Viewer: A free app that can open and view PDF files on Android and iOS devices
    • -
    -

    To open and view a 1 MB PDF file on your device, you need to follow these steps:

    -
      -
    1. Install or update the software or app that you want to use on your device
    2. -
    3. Locate the 1 MB PDF file on your device or cloud storage
    4. -
    5. Open the 1 MB PDF file with the software or app that you use
    6. -
    7. View the 1 MB PDF file on your device screen
    8. -

    To adjust the settings or preferences of your software or app to optimize your viewing experience, you can try some of these options:

    -
      -
    • Zoom in or out to change the size of the PDF file on your screen
    • -
    • Rotate or flip the PDF file to change the orientation of the PDF file on your screen
    • -
    • Search or find a word or phrase in the PDF file
    • -
    • Highlight or annotate a part of the PDF file
    • -
    • Print or share the PDF file with others
    • -
    -

    Conclusion

    -

    Downloading a 1 MB PDF file is not a difficult task if you know how to do it. In this article, we have shown you how to download a 1 MB PDF file from the internet, how to compress a larger PDF file to 1 MB or less, and how to open and view a 1 MB PDF file on your device. We hope that this article has helped you learn something new and useful.

    -

    Here are some tips or advice on how to download, compress, and view PDF files efficiently:

    -
      -
    • Choose a reliable and reputable source or website that offers free or low-cost PDF files
    • -
    • Check the file size and format of the PDF file before downloading or compressing it
    • -
    • Use a fast and stable internet connection and a compatible and secure software or app
    • -
    • Compress your PDF file only if necessary and without compromising the quality or content
    • -
    • Open and view your PDF file with the best software or app for your device and preference
    • -
    -

    If you have any questions or comments about downloading, compressing, or viewing PDF files, please feel free to leave them below. We would love to hear from you!

    -

    FAQs

    -

    What is the difference between a PDF file and a Word file?

    -

    A PDF file is a document format that preserves the layout and appearance of your document across different devices and platforms. A Word file is a document format that allows you to edit and format your document with various features and options.

    -

    How can I convert a PDF file to a Word file or vice versa?

    -

    You can use various tools or services that can help you convert PDF files to Word files or vice versa online for free. Some of them are:

    -
      -
    • PDF to Word Converter: A free online tool that can convert PDF files to Word files in seconds
    • -
    • Word to PDF Converter: A free online tool that can convert Word files to PDF files in seconds
    • -
    • PDFelement: A free software that can convert PDF files to Word files and vice versa on Windows, Mac, Android, and iOS devices
    • -
    • WPS Office: A free software that can convert PDF files to Word files and vice versa on Windows, Mac, Linux, Android, and iOS devices
    • -
    • Zamzar: A free online service that can convert PDF files to Word files and vice versa by email
    • -
    -

    How can I edit a PDF file?

    -

    You can use various tools or services that can help you edit PDF files online for free. Some of them are:

    -
      -
    • PDFescape: A free online tool that can help you edit text, images, links, forms, and more in PDF files
    • -
    • PDF Buddy: A free online tool that can help you edit text, images, signatures, annotations, and more in PDF files
    • -
    • PDF-XChange Editor: A free software that can help you edit text, images, comments, stamps, and more in PDF files on Windows devices
    • -
    • PDF Expert: A free software that can help you edit text, images, links, forms, and more in PDF files on Mac devices
    • -
    • Xodo: A free app that can help you edit text, images, annotations, bookmarks, and more in PDF files on Android and iOS devices
    • -
    -

    How can I merge or split a PDF file?

    -

    You can use various tools or services that can help you merge or split PDF files online for free. Some of them are:

    -
      -
    • PDF Merge: A free online tool that can help you merge multiple PDF files into one PDF file
    • -
    • PDF Splitter: A free online tool that can help you split one PDF file into multiple PDF files
    • -
    • PDF SAM: A free online tool that can help you merge or split PDF files with drag and drop
    • -
    • PDFill: A free software that can help you merge or split PDF files with various options on Windows devices
    • -
    • PDFsam Basic: A free software that can help you merge or split PDF files with various options on Windows, Mac, and Linux devices
    • -
    -

    How can I sign a PDF file?

    -

    You can use various tools or services that can help you sign PDF files online for free. Some of them are:

    -
      -
    • DocuSign: A free online service that can help you sign PDF files with your electronic signature or digital certificate
    • -
    • HelloSign: A free online service that can help you sign PDF files with your electronic signature or digital certificate
    • -
    • Adobe Sign: A free online service that can help you sign PDF files with your electronic signature or digital certificate
    • -
    • SignEasy: A free app that can help you sign PDF files with your electronic signature or digital certificate on Android and iOS devices
    • -
    • SignNow: A free app that can help you sign PDF files with your electronic signature or digital certificate on Android and iOS devices
    • -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Genshin Impact and Embark on an Epic Quest Across Seven Nations.md b/spaces/1phancelerku/anime-remove-background/Download Genshin Impact and Embark on an Epic Quest Across Seven Nations.md deleted file mode 100644 index c87227fbde27ba5a679549c564945d35e268fab4..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Genshin Impact and Embark on an Epic Quest Across Seven Nations.md +++ /dev/null @@ -1,128 +0,0 @@ - -

    Genshin Download: How to Play the Free-to-Play RPG on PC, Mobile, and Console

    -

    Genshin Impact is one of the most popular games of 2020 and 2021, attracting millions of players from all over the world. It is a free-to-play open-world action RPG that lets you explore a beautiful fantasy world called Teyvat, where you can meet a diverse cast of characters, fight against powerful enemies, and uncover the mysteries of your lost sibling. Whether you are a fan of anime-style graphics, engaging storylines, or dynamic combat systems, Genshin Impact has something for everyone.

    -

    But how do you download Genshin Impact on your preferred platform? And what are the system requirements and tips and tricks that you need to know before you start your adventure? In this article, we will answer all these questions and more. Read on to find out how to play Genshin Impact on PC, mobile, or console today!

    -

    genshin download


    Download File ····· https://jinyurl.com/2uNJWl



    -

    Genshin Download for PC

    -

    If you want to play Genshin Impact on your PC, you have two options. You can either download it from the official website or from the Epic Games Store. Both methods are free and easy to follow.

    -

    To download Genshin Impact from the official website, you need to visit [Genshin Impact – Step Into a Vast Magical World of Adventure](^1^) and click on the "Windows" button. This will start downloading the launcher file. Once it is downloaded, run it and follow the instructions to install the launcher. Then, open the launcher and log in with your miHoYo account or create one if you don't have one already. After that, click on "Get Game" to start downloading the game files. The download size is about 8.2 GB, so it may take some time depending on your internet speed. When the download is complete, click on "Launch" to start playing.

    -

    genshin impact download pc
    -genshin impact download size
    -genshin impact download apk
    -genshin impact download android
    -genshin impact download ios
    -genshin impact download mac
    -genshin impact download ps4
    -genshin impact download error
    -genshin impact download slow
    -genshin impact download link
    -genshin impact download windows 10
    -genshin impact download steam
    -genshin impact download update
    -genshin impact download free
    -genshin impact download reddit
    -genshin impact download speed
    -genshin impact download failed
    -genshin impact download for laptop
    -genshin impact download google play
    -genshin impact download requirements
    -genshin impact download time
    -genshin impact download data
    -genshin impact download problem
    -genshin impact download not working
    -genshin impact download stuck
    -genshin impact download zip file
    -genshin impact download emulator
    -genshin impact download obb file
    -genshin impact download latest version
    -genshin impact download official website
    -genshin impact download without launcher
    -genshin impact download qr code
    -genshin impact download verification failed
    -genshin impact download on phone
    -genshin impact download on switch
    -genshin impact download on xbox one
    -genshin impact download on chromebook
    -genshin impact download on linux
    -genshin impact download on bluestacks
    -genshin impact download on macbook air
    -genshin impact download on macbook pro
    -genshin impact download on ipad pro
    -genshin impact download on iphone 6s
    -genshin impact download on iphone 7 plus
    -genshin impact download on iphone 8 plus
    -genshin impact download on iphone x

    -

    To download Genshin Impact from the Epic Games Store, you need to visit [Genshin Impact | Download and Play for Free - Epic Games Store](^3^) and click on "Get". This will prompt you to log in with your Epic Games account or create one if you don't have one already. Then, click on "Place Order" to confirm your purchase (don't worry, it's still free). After that, you will be redirected to the Epic Games Launcher. If you don't have it installed on your PC, you can download it from [Epic Games Launcher]. Once you have the launcher, install it and open it. Then, go to the "Library" tab and find Genshin Impact. Click on "Install" to start downloading the game files. The download size is about 8.2 GB, so it may take some time depending on your internet speed. When the download is complete, click on "Launch" to start playing.

    -

    PC System Requirements

    -

    Before you download Genshin Impact on your PC, you should check if your PC meets the minimum or recommended system requirements for the game. Here are the system requirements for PC according to the official website:

    - | Minimum System Requirements | Recommended System Requirements | | --- | --- | | OS: Windows 7 SP1 64-bit, Windows 8.1 64-bit, or Windows 10 64-bit | OS: Windows 10 64-bit | | Processor: Intel Core i5 or equivalent | Processor: Intel Core i7 or equivalent | | Memory: 8 GB RAM | Memory: 16 GB RAM | | Graphics: NVIDIA GeForce GT 1030 or higher | Graphics: NVIDIA GeForce RTX 1060 6 GB or higher | | DirectX: Version 11 | DirectX: Version 11 | | Storage: 30 GB available space | Storage: 30 GB available space |

    If your PC does not meet the minimum system requirements, you may experience low frame rates, crashes, or other issues while playing the game. If your PC meets or exceeds the recommended system requirements, you can enjoy the game at higher graphics settings and smoother performance.

    -

    PC Tips and Tricks

    -

    Here are some tips and tricks that can help you optimize your PC performance and gameplay experience while playing Genshin Impact:

    -
      -
    • Update your graphics drivers and DirectX to the latest version. This can improve your graphics quality and stability.
    • -
    • Adjust your graphics settings in the game options. You can lower some settings such as anti-aliasing, shadows, or render resolution to increase your frame rate and reduce lag. You can also enable or disable some features such as V-sync, FPS limit, or window mode to suit your preference.
    • -
    • Close other programs or background processes that may consume your CPU, memory, or bandwidth. This can free up some resources for the game and prevent potential conflicts or errors.
    • -
    • Use a wired connection instead of a wireless one if possible. This can reduce latency and packet loss and improve your online experience.
    • -
    • Check the official website or social media accounts for any news, updates, or maintenance announcements. This can help you stay informed of any changes, issues, or events that may affect the game.
    • -
    of storage space | 8 GB of storage space or more | | iOS 9.0 or higher | iOS 10.0 or higher | | iPhone 8 Plus, iPad Air 3, or higher | iPhone XR, iPad Pro, or higher |

    If your device does not meet the minimum system requirements, you may experience low graphics quality, slow loading times, or other issues while playing the game. If your device meets or exceeds the recommended system requirements, you can enjoy the game at higher graphics quality and smoother performance.

    -

    Mobile Tips and Tricks

    -

    Here are some tips and tricks that can help you optimize your mobile performance and gameplay experience while playing Genshin Impact:

    -
      -
    • Update your device software and app to the latest version. This can fix some bugs and improve your compatibility and stability.
    • -
    • Adjust your graphics settings in the game options. You can lower some settings such as render resolution, shadow quality, or FPS to save battery life and reduce overheating. You can also enable or disable some features such as auto-adjust graphics, custom controls, or HD assets to suit your preference.
    • -
    • Use a Wi-Fi connection instead of a mobile data connection if possible. This can reduce data usage and improve your online experience.
    • -
    • Check the official website or social media accounts for any news, updates, or maintenance announcements. This can help you stay informed of any changes, issues, or events that may affect the game.
    • -
    • Use headphones or earphones to enjoy the game's immersive sound effects and music.
    • -
    -

    Genshin Download for Console

    -

    If you want to play Genshin Impact on your console, you can download it from PlayStation Store if you have a PlayStation 4 or PlayStation 5. The method is free and easy to follow.

    -

    To download Genshin Impact from PlayStation Store, you need to visit [Genshin Impact on PS4 | Official PlayStation™Store US] or [Genshin Impact on PS5 | Official PlayStation™Store US] depending on your console. Then, click on the "Add to Library" button. This will add the game to your library. Then, go to your library and find Genshin Impact. Click on the "Download" button to start downloading the game files. The download size is about 12 GB for PS4 and 14 GB for PS5, so it may take some time depending on your internet speed and console storage. When the download is complete, click on the game icon to start playing.

    -

    Console System Requirements

    -

    Before you download Genshin Impact on your console, you should check if your console meets the minimum system requirements for the game. Here are the system requirements for consoles according to the official website:

    - | Minimum System Requirements | Recommended System Requirements | | --- | --- | | PS4 with 30 GB of storage space | PS4 Pro with 30 GB of storage space | | PS5 with 50 GB of storage space | PS5 with 50 GB of storage space |

    If your console does not meet the minimum system requirements, you may experience low graphics quality, slow loading times, or other issues while playing the game. If your console meets or exceeds the recommended system requirements, you can enjoy the game at higher graphics quality and smoother performance.

    -

    Console Tips and Tricks

    -

    Here are some tips and tricks that can help you optimize your console performance and gameplay experience while playing Genshin Impact:

    -
      -
    • Update your console software and app to the latest version. This can fix some bugs and improve your compatibility and stability.
    • -
    • Adjust your graphics settings in the game options. You can choose between "Favor Resolution" or "Favor Performance" modes to balance between graphics quality and frame rate. You can also enable or disable some features such as motion blur, anti-aliasing, or HDR to suit your preference.
    • -
    • Link your miHoYo account to your PlayStation Network account. This can allow you to access some online features such as cross-play, cross-save, mail system, events, and more.
    • -
    • Check the official website or social media accounts for any news, updates, or maintenance announcements. This can help you stay informed of any changes, issues, or events that may affect the game.
    • -
    • Use a controller that suits your play style and comfort. You can customize your controller layout in the game options.
    • -
    -

    Genshin Impact Game Features

    -

    Now that you know how to download Genshin Impact on your preferred platform, you may be wondering what you can expect from the game in terms of gameplay, story, characters, combat, exploration, and more. In this section, we will give you a brief overview of some of the main features of the game that make it so fun and addictive.

    -

    Gameplay

    -

    Genshin Impact is an open-world action RPG that combines exploration, combat, and gacha elements. You can explore the vast world of Teyvat at your own pace, discovering new locations, secrets, and treasures along the way. You can also interact with various NPCs, complete quests, participate in events, and join co-op sessions with other players.

    -

    The game also features a gacha system that allows you to obtain new characters, weapons, and items by spending a currency called Primogems. You can earn Primogems by playing the game or by purchasing them with real money. The gacha system is based on a random chance, so you may not always get what you want. However, the game is generous enough to give you some free pulls and rewards as you progress.

    -

    Story

    -

    Genshin Impact has a rich and immersive story that unfolds as you play the game. The main story revolves around your quest to find your lost sibling, who was separated from you by a mysterious god. Along the way, you will encounter different factions, cultures, and conflicts that shape the world of Teyvat. You will also meet various characters who will join your party and help you in your journey.

    -

    The game has seven major regions, each based on a different element and inspired by a real-world culture. So far, only two regions are available: Mondstadt (Anemo/Wind) and Liyue (Geo/Earth). The other five regions are Inazuma (Electro/Lightning), Sumeru (Dendro/Nature), Fontaine (Hydro/Water), Natlan (Pyro/Fire), and Snezhnaya (Cryo/Ice). The game developers plan to release more regions and content in the future through updates and patches.

    -

    Characters

    -

    Genshin Impact has a diverse and colorful cast of characters that you can play as or interact with. There are currently 37 playable characters in the game, each with their own personality, backstory, element, weapon, and abilities. You can switch between four characters in your party at any time, depending on the situation and your preference.

    -

    The characters are divided into five rarity tiers: 1-star, 2-star, 3-star, 4-star, and 5-star. The higher the rarity, the more powerful and rare the character is. You can obtain new characters by using the gacha system or by completing certain quests or events. You can also upgrade your characters by leveling them up, ascending them, enhancing their weapons and artifacts, and unlocking their constellations.

    -

    Combat

    -

    Genshin Impact has a dynamic and fluid combat system that relies on elemental interactions and strategy. You can use your character's basic attacks, elemental skills, and elemental bursts to deal damage to your enemies. You can also switch between different characters to create elemental reactions that can amplify or modify your damage output.

    -

    The game has seven elements: Anemo (Wind), Geo (Earth), Electro (Lightning), Dendro (Nature), Hydro (Water), Pyro (Fire), and Cryo (Ice). Each element has its own strengths and weaknesses against other elements. For example, Pyro can melt Cryo, but is weak against Hydro. You can use this knowledge to your advantage and create powerful combos that can wipe out your foes.

    -

    Exploration

    -

    Genshin Impact has a vast and beautiful world that you can explore at your own pace. You can travel across different terrains, climates, and biomes using various methods such as walking, running, climbing, gliding, swimming, or riding. You can also use fast travel points to teleport to locations that you have already visited.

    -

    The world of Teyvat is full of secrets, quests, events, and activities that you can discover and enjoy. You can find chests, resources, puzzles, enemies, and more that can reward you with items, experience, or currency. You can also interact with various NPCs, complete quests, participate in events, and join co-op sessions with other players.

    -

    The game also has a feature called the Serenitea Pot, which allows you to create your own personal realm and customize it with furniture, decorations, and buildings. You can invite your characters and friends to your realm and enjoy some relaxing time.

    -

    Conclusion

    -

    Genshin Impact is a free-to-play open-world action RPG that offers a lot of fun and excitement for players of all ages and preferences. You can download the game on PC, mobile, or console and enjoy the stunning graphics, captivating story, diverse characters, dynamic combat, and endless exploration. Whether you want to play solo or with friends, Genshin Impact has something for everyone.

    -

    So what are you waiting for? Download Genshin Impact today and start your adventure in the magical world of Teyvat!

    -

    FAQs

    -

    Here are some frequently asked questions about Genshin Impact and how to download it:

    -
      -
    • Q: Is Genshin Impact free?
    • -
    • A: Yes, Genshin Impact is free to download and play. However, it does have some optional in-game purchases that can enhance your gameplay experience.
    • -
    • Q: Is Genshin Impact cross-platform?
    • -
    • A: Yes, Genshin Impact supports cross-platform play between PC, mobile, and console. You can play with your friends on different devices as long as you are on the same server and have the same game version.
    • -
    • Q: How do I update Genshin Impact?
    • -
    • A: Genshin Impact updates automatically when you launch the game on your platform. You can also check for updates manually in the game options or on the official website.
    • -
    • Q: How do I contact Genshin Impact customer service?
    • -
    • A: You can contact Genshin Impact customer service by using the in-game feedback system or by visiting [Genshin Impact – Step Into a Vast Magical World of Adventure] and clicking on "Support".
    • -
    • Q: How do I get more Primogems?
    • -
    • A: You can get more Primogems by playing the game and completing various tasks such as quests, events, achievements, daily commissions, spiral abyss, etc. You can also buy Primogems with real money by using the in-game shop or by redeeming gift codes.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Granny Chapter 2 Outwitt Mod Menu APK Madin for Free - No Root Required!.md b/spaces/1phancelerku/anime-remove-background/Download Granny Chapter 2 Outwitt Mod Menu APK Madin for Free - No Root Required!.md deleted file mode 100644 index d2bf447b055b09aaf587fa2e8a12fe521d8bfaeb..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Granny Chapter 2 Outwitt Mod Menu APK Madin for Free - No Root Required!.md +++ /dev/null @@ -1,79 +0,0 @@ - -

    Granny Chapter 2 Mod Menu Outwitt Mod Free Download Link APK Madin

    -

    If you are a fan of horror games, you might have heard of Granny Chapter 2, a popular game that challenges you to escape from a creepy house with two evil characters: Granny and Grandpa. But what if you want to make the game more fun and exciting? Well, you can try Outwitt Mod, a mod menu that gives you access to various cheats and hacks for Granny Chapter 2. In this article, we will tell you everything you need to know about Outwitt Mod, including how to download and install it for free on your Android device.

    -

    What is Granny Chapter 2?

    -

    Granny Chapter 2 is a horror game developed by DVloper, the same creator of the original Granny game. It was released in September 2019 and has since gained millions of downloads and positive reviews from players around the world. The game is available for Android, iOS, and Windows devices.

    -

    granny chapter 2 mod menu outwitt mod free download link apk madin


    Download Zip ⚙⚙⚙ https://jinyurl.com/2uNPUm



    -

    The gameplay of Granny Chapter 2

    -

    The gameplay of Granny Chapter 2 is similar to the first game, but with some new twists and features. You are trapped in a dark and spooky house with two enemies: Granny and Grandpa. Granny can hear everything and will chase you if she hears any noise. Grandpa is hard of hearing but he will attack you if he sees you. You have to find a way to escape from the house within five days, or else you will face a horrible fate. You can explore different rooms, find items, solve puzzles, and hide from the enemies. But be careful, because they are always on the lookout for you.

    -

    The features of Granny Chapter 2

    -

    Granny Chapter 2 has many features that make it an enjoyable and thrilling game. Some of them are:

    -
      -
    • You can choose between four difficulty levels: Easy, Normal, Hard, and Extreme.
    • -
    • You can play in two modes: Practice or Normal. In Practice mode, you can explore the house without any enemies. In Normal mode, you have to face both Granny and Grandpa.
    • -
    • You can customize your character's appearance, such as hair color, skin color, and clothes.
    • -
    • You can use different weapons, such as a shotgun, a crossbow, a stun gun, or a crowbar, to fight back against the enemies.
    • -
    • You can interact with various objects, such as doors, windows, cabinets, drawers, traps, cameras, etc.
    • -
    • You can enjoy realistic graphics, sound effects, and music that create a scary atmosphere.
    • -
    -

    What is Outwitt Mod?

    -

    Outwitt Mod is a mod menu that allows you to modify the game settings and enable various cheats and hacks for Granny Chapter 2. It was created by Outwitt, a YouTube channel that uploads videos about Granny games and mods. Outwitt Mod is one of the most popular mods for Granny Chapter 2 and has been downloaded by thousands of players.

    -

    The benefits of Outwitt Mod

    -

    Outwitt Mod gives you many benefits that make the game more fun and easy. Some of them are:

    -
      -
    • You can access a mod menu that lets you change the game settings, such as the difficulty level, the enemy speed, the enemy damage, etc.
    • -
    • You can enable cheats that give you unlimited health, unlimited ammo, invisibility, teleportation, etc.
    • -
    • You can unlock all items in the game, such as weapons, keys, tools, etc. -

      granny chapter 2 outwitt mod apk unlimited money
      -granny chapter 2 outwitt mod menu god mode
      -granny chapter 2 outwitt mod apk latest version
      -granny chapter 2 outwitt mod apk download for android
      -granny chapter 2 outwitt mod menu download link
      -granny chapter 2 outwitt mod apk no ads
      -granny chapter 2 outwitt mod menu unlimited ammo
      -granny chapter 2 outwitt mod apk free fire
      -granny chapter 2 outwitt mod menu how to install
      -granny chapter 2 outwitt mod apk offline
      -granny chapter 2 outwitt mod menu no root
      -granny chapter 2 outwitt mod apk hack
      -granny chapter 2 outwitt mod menu features
      -granny chapter 2 outwitt mod apk easy escape
      -granny chapter 2 outwitt mod menu tutorial
      -granny chapter 2 outwitt mod apk new update
      -granny chapter 2 outwitt mod menu gameplay
      -granny chapter 2 outwitt mod apk horror game
      -granny chapter 2 outwitt mod menu review
      -granny chapter 2 outwitt mod apk best settings
      -granny chapter 2 outwitt mod menu cheats
      -granny chapter 2 outwitt mod apk fun mode
      -granny chapter 2 outwitt mod menu tips and tricks
      -granny chapter 2 outwitt mod apk all weapons
      -granny chapter 2 outwitt mod menu hidden items
      -granny chapter 2 outwitt mod apk madin edition
      -granny chapter 2 outwitt mod menu vs nullzerep
      -granny chapter 2 outwitt mod apk multiplayer
      -granny chapter 2 outwitt mod menu speed hack
      -granny chapter 2 outwitt mod apk invisible mode

      -

      Summary of the main points

      -

      Here are the main points of this article:

      -
        -
      • Granny Chapter 2 is a horror game that challenges you to escape from a house with two enemies: Granny and Grandpa.
      • -
      • Outwitt Mod is a mod menu that gives you access to various cheats and hacks for Granny Chapter 2.
      • -
      • Outwitt Mod has many benefits, such as unlimited health, unlimited ammo, invisibility, teleportation, etc.
      • -
      • Outwitt Mod also has some drawbacks, such as security risks, game bans, loss of originality, and compatibility issues.
      • -
      • To download and install Outwitt Mod for Granny Chapter 2, you need to uninstall the original game, enable unknown sources, download the APK file, install it, and launch the game.
      • -
      • Before downloading and installing Outwitt Mod for Granny Chapter 2, you should backup your data, scan the APK file, use a VPN or a proxy server, and not use it for illegal or unethical purposes.
      • -
      -

      Call to action for the readers

      -

      If you liked this article, please share it with your friends and family who are interested in Granny Chapter 2 and Outwitt Mod. Also, don't forget to subscribe to our website for more articles like this one. And if you have any questions or feedback about this article, please leave a comment below. We would love to hear from you.

      -

      FAQs

      -

      Here are some frequently asked questions about Granny Chapter 2 and Outwitt Mod:

      -
        -
      1. Q: Is Outwitt Mod safe to use?
        A: Outwitt Mod is not an official app from the game developer, so it may contain some viruses, malware, or spyware that can harm your device or data. Therefore, you should always scan the APK file with an antivirus or anti-malware software before installing it. You should also use a VPN or a proxy server to hide your IP address and location when playing the game online.
      2. -
      3. Q: Is Outwitt Mod legal to use?
        A: Outwitt Mod is not legal to use because it violates the terms and conditions of the game developer. By using Outwitt Mod, you are cheating and hacking the game, which is unfair to other players and disrespectful to the game creator. You may also get banned from playing the game online if the game developer detects your mod usage.
      4. -
      5. Q: Does Outwitt Mod work on iOS devices?
        A: No, Outwitt Mod only works on Android devices. It is not compatible with iOS devices because it is an APK file that can only be installed on Android devices. If you want to use Outwitt Mod on your iOS device, you will need to jailbreak your device first, which is not recommended because it can damage your device or void your warranty.
      6. -
      7. Q: Does Outwitt Mod work on Windows devices?
        A: Yes, Outwitt Mod can work on Windows devices if you use an Android emulator. An Android emulator is a software that allows you to run Android apps on your Windows device. You can download an Android emulator such as BlueStacks or NoxPlayer on your Windows device and then install Outwitt Mod on it. However, this may affect the performance of your device or the game.
      8. -
      9. Q: Where can I find more mods for Granny Chapter 2?
        A: You can find more mods for Granny Chapter 2 on various websites or YouTube channels that offer mod downloads or tutorials. Some of them are NullZerep Mods, GodisAGamer Mods, Platinmods Mods, etc. But be careful when downloading mods from unknown sources because they may contain viruses or malware that can harm your device or data.
      10. -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Logo Quiz MOD APK and Enjoy the Fun of Recognizing Logos.md b/spaces/1phancelerku/anime-remove-background/Download Logo Quiz MOD APK and Enjoy the Fun of Recognizing Logos.md deleted file mode 100644 index bf0da9f7b232565ef442062ced8ab4cf133ff41d..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Logo Quiz MOD APK and Enjoy the Fun of Recognizing Logos.md +++ /dev/null @@ -1,118 +0,0 @@ -
      -

      Logo Quiz Mod APK: A Fun and Challenging Game for Logo Lovers

      -

      Do you think you can recognize hundreds of logos from different brands and companies? Do you want to test your logo knowledge and have fun at the same time? If you answered yes, then you should try Logo Quiz Mod APK, a free trivia game that will keep you entertained for hours.

      -

      logo quiz mod apk


      Downloadhttps://jinyurl.com/2uNMmE



      -

      What is Logo Quiz Mod APK?

      -

      Logo Quiz Mod APK is a modified version of the original Logo Quiz game by Bubble Quiz Games. In this game, you have to guess the names of thousands of logos from popular companies all over the world. You will see various logos on the screen, and you have to type in the correct answer using the available letters. You can also use hints to help you if you get stuck.

      -

      Logo Quiz Mod APK is different from the original game in that it gives you unlimited hints, extra levels, offline mode, and daily challenges. These features make the game more enjoyable and less frustrating. You can also play the game without an internet connection, which is great if you want to kill some time when you are offline.

      -

      Features of Logo Quiz Mod APK

      -

      Unlimited hints

      -

      One of the best features of Logo Quiz Mod APK is that it gives you unlimited hints. Each logo has 5 hints that you can use to get a clue about the answer. You can also get new hints by answering correctly or watching ads. However, with Logo Quiz Mod APK, you don't have to worry about running out of hints or watching ads. You can use as many hints as you want without any limitations.

      -

      Extra levels

      -

      Another feature of Logo Quiz Mod APK is that it gives you access to extra levels that are not available in the original game. These levels include:

      -
        -
      • Slogans: Guess the slogans of 200 famous brands.
      • -
      • Minimalist: Guess the logos that are simplified to their basic shapes and colors.
      • -
      • Retro: Guess the logos that are based on old versions or designs.
      • -
      • Expert: Guess the logos that are very hard or obscure.
      • -
      -

      These extra levels add more variety and challenge to the game, making it more interesting and fun.

      -

      Offline mode

      -

      Logo Quiz Mod APK also allows you to play the game offline, which means you don't need an internet connection to enjoy it. This is great if you want to play the game when you are traveling, waiting, or bored. You can also save your data and battery by playing offline.

      -

      logo quiz mod apk unlimited hints
      -logo quiz mod apk download for android
      -logo quiz mod apk latest version
      -logo quiz mod apk 2023
      -logo quiz mod apk an1
      -logo quiz mod apk revdl
      -logo quiz mod apk hack
      -logo quiz mod apk all levels unlocked
      -logo quiz mod apk offline
      -logo quiz mod apk no ads
      -logo quiz mod apk free shopping
      -logo quiz mod apk unlimited money
      -logo quiz mod apk android 1
      -logo quiz mod apk bubble quiz games
      -logo quiz mod apk unlimited coins
      -logo quiz mod apk premium
      -logo quiz mod apk rexdl
      -logo quiz mod apk 33.6
      -logo quiz mod apk world brands
      -logo quiz mod apk ultimate logos
      -logo quiz mod apk guess the brand
      -logo quiz mod apk trivia games
      -logo quiz mod apk car logos
      -logo quiz mod apk food logos
      -logo quiz mod apk sports logos
      -logo quiz mod apk fashion logos
      -logo quiz mod apk music logos
      -logo quiz mod apk movie logos
      -logo quiz mod apk tv logos
      -logo quiz mod apk game logos
      -logo quiz mod apk web logos
      -logo quiz mod apk country logos
      -logo quiz mod apk city logos
      -logo quiz mod apk animal logos
      -logo quiz mod apk cartoon logos
      -logo quiz mod apk superhero logos
      -logo quiz mod apk celebrity logos
      -logo quiz mod apk flag logos
      -logo quiz mod apk fun plus answers
      -logo quiz mod apk level 1 to 20 answers
      -logo quiz mod apk level 21 to 40 answers
      -logo quiz mod apk level 41 to 60 answers
      -logo quiz mod apk level 61 to 80 answers
      -logo quiz mod apk level 81 to 100 answers
      -logo quiz ultimate pro hd full unlocked premium features unlocked adfree latest version download free for android devices

      -

      Daily challenges

      -

      Logo Quiz Mod APK also offers daily challenges that give you a new puzzle every day. These puzzles are different from the regular levels and require more skill and speed. You have to guess as many logos as possible in a limited time and earn points and extra hints. You can also compare your scores with other players and see how well you rank.

      -

      How to download and install Logo Quiz Mod APK

      -

      If you want to try Logo Quiz Mod APK, you have to download and install it on your Android device. Here are the steps to follow these steps:

      Step 1: Enable unknown sources

      -

      Before you can install Logo Quiz Mod APK, you need to allow your device to install apps from unknown sources. This means that you can install apps that are not from the Google Play Store. To do this, go to your device settings and tap on Security. Then, look for the option that says Install unknown apps or Unknown sources and enable it. You may see a warning message that tells you about the risks of installing unknown apps, but you can ignore it if you trust the source of the APK file.

      -

      Step 2: Download the APK file

      -

      Next, you need to download the APK file of Logo Quiz Mod APK from a reliable website. You can use your browser to find and download the file, or you can use a computer and transfer the file to your device via USB. Make sure you download the latest version of the APK file and that it is compatible with your device. You can check the file size and version number before downloading it.

      -

      Step 3: Install the APK file

      -

      Once you have downloaded the APK file, you need to locate it on your device and tap on it to start the installation process. You can use a file manager app to find the file in your Downloads folder or wherever you saved it. You may see a pop-up window that asks you to confirm the installation and grant some permissions to the app. Tap on Install and wait for the installation to finish.

      -

      Step 4: Launch the game and enjoy

      -

      After the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You will see a welcome screen that introduces you to the game and its features. You can also access the settings menu to adjust some options such as sound, language, and notifications. Now you are ready to play Logo Quiz Mod APK and have fun guessing logos from different brands.

      Tips and tricks for playing Logo Quiz Mod APK

      -

      Logo Quiz Mod APK is a fun and challenging game, but it can also be frustrating if you don't know the answers. Here are some tips and tricks that can help you play the game better and enjoy it more:

      -

      Use hints wisely

      -

      As mentioned earlier, Logo Quiz Mod APK gives you unlimited hints, which can be very helpful if you are stuck. However, you should not rely on hints too much, as they can make the game too easy and boring. You should try to guess the logo first before using a hint, and only use one hint at a time. You can also save your hints for later levels, where the logos are harder and more obscure.

      -

      Learn from your mistakes

      -

      Another tip for playing Logo Quiz Mod APK is to learn from your mistakes. If you guess a logo wrong, don't just skip it and move on. Instead, try to remember the correct answer and the logo design. This way, you can improve your memory and recognition skills, and avoid making the same mistake again. You can also review your answers at the end of each level and see where you went wrong.

      -

      Use online resources

      -

      If you are really stuck and can't figure out a logo, you can always use online resources to help you. There are many websites and apps that offer logo quizzes and answers, such as Logo Quiz Answers or Logo Quiz Cheats. You can also use search engines or image recognition tools to find the logo you are looking for. However, you should use these resources sparingly, as they can spoil the fun and challenge of the game.

      -

      Challenge yourself and your friends

      -

      One of the best ways to enjoy Logo Quiz Mod APK is to challenge yourself and your friends. You can set a goal for yourself, such as completing a level without using any hints or within a certain time limit. You can also compete with your friends and see who can guess more logos or score higher. You can share your results on social media or via messaging apps, and invite your friends to join the game.

      -

      Benefits of playing Logo Quiz Mod APK

      -

      Logo Quiz Mod APK is not only a fun and entertaining game, but also a beneficial one. Here are some of the benefits of playing Logo Quiz Mod APK:

      -

      Enhance your memory and recognition skills

      -

      Playing Logo Quiz Mod APK can help you enhance your memory and recognition skills, as you have to remember and identify thousands of logos from different brands and companies. This can improve your brain function and cognitive abilities, as well as your attention span and concentration. You can also learn new things and facts about logos and brands that you may not have known before.

      -

      Expand your knowledge of brands and logos

      -

      Playing Logo Quiz Mod APK can also help you expand your knowledge of brands and logos, as you have to guess logos from various categories, such as food, fashion, sports, technology, etc. You can discover new brands and logos that you may not have heard of before, or learn more about the ones that you already know. You can also appreciate the creativity and design of logos, and how they convey the identity and message of a brand.

      -

      Have fun and relax

      -

      Finally, playing Logo Quiz Mod APK can help you have fun and relax, as it is a simple and enjoyable game that anyone can play. You can play it anytime and anywhere, whether you are online or offline. You can also play it at your own pace, without any pressure or stress. You can also have fun with your friends and family, by playing together or competing with each other.

      -

      Conclusion

      -

      Logo Quiz Mod APK is a fun and challenging game for logo lovers who want to test their logo knowledge and have fun at the same time. It offers unlimited hints, extra levels, offline mode, and daily challenges that make the game more enjoyable and less frustrating. It also helps enhance memory and recognition skills, expand knowledge of brands and logos, and have fun and relax. If you want to try Logo Quiz Mod APK, you can download it from a reliable website and install it on your Android device by following the steps above.

      -

      Frequently Asked Questions

      -
        -
      1. What is the difference between Logo Quiz Mod APK and Logo Quiz?
      2. -

        Logo Quiz Mod APK is a modified version of Logo Quiz that gives you unlimited hints, extra levels, offline mode, and daily challenges. Logo Quiz is the original game that has limited hints, fewer levels, online mode only, and no daily challenges.

        -
      3. How many logos are there in Logo Quiz Mod APK?
      4. Logo Quiz Mod APK has over 4000 logos from different categories and levels. You can also play the extra levels that have 200 logos each.

        -
      5. How can I get more hints in Logo Quiz Mod APK?
      6. -

        You don't need to get more hints in Logo Quiz Mod APK, as it gives you unlimited hints. You can use as many hints as you want without any restrictions. However, if you want to challenge yourself, you can try to guess the logos without using hints or use them sparingly.

        -
      7. Is Logo Quiz Mod APK safe to download and install?
      8. -

        Logo Quiz Mod APK is safe to download and install, as long as you get it from a reliable website that does not contain any viruses or malware. You should also scan the APK file before installing it on your device, and make sure you have enabled unknown sources in your device settings.

        -
      9. Can I play Logo Quiz Mod APK on my PC or iOS device?
      10. -

        Logo Quiz Mod APK is designed for Android devices only, so you cannot play it on your PC or iOS device. However, you can use an Android emulator on your PC to run the game, such as BlueStacks or Nox Player. You can also play the original Logo Quiz game on your PC or iOS device by downloading it from the Google Play Store or the App Store.

        -
      11. What are some of the best logo quiz games for Android?
      12. -

        Some of the best logo quiz games for Android are:

        -
          -
        • Logo Game: Guess Brand Quiz by Logos Box
        • -
        • Guess The Brand - Logo Mania by IcoMania - Logo Quiz - Logos Quiz
        • -
        • Logo Quiz World by MSI Apps
        • -
        • Ultimate Logo Quiz by Bubble Quiz Games
        • -
        • Logo Trivial Quiz by Carlos Alcarria
        • -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/2023Liu2023/bingo/src/lib/hooks/chat-history.ts b/spaces/2023Liu2023/bingo/src/lib/hooks/chat-history.ts deleted file mode 100644 index c6fbf3fecfa86fe553f56acc8253236b8f22a775..0000000000000000000000000000000000000000 --- a/spaces/2023Liu2023/bingo/src/lib/hooks/chat-history.ts +++ /dev/null @@ -1,62 +0,0 @@ -import { zip } from 'lodash-es' -import { ChatMessageModel, BotId } from '@/lib/bots/bing/types' -import { Storage } from '../storage' - -/** - * conversations:$botId => Conversation[] - * conversation:$botId:$cid:messages => ChatMessageModel[] - */ - -interface Conversation { - id: string - createdAt: number -} - -type ConversationWithMessages = Conversation & { messages: ChatMessageModel[] } - -async function loadHistoryConversations(botId: BotId): Promise { - const key = `conversations:${botId}` - const { [key]: value } = await Storage.get(key) - return value || [] -} - -async function deleteHistoryConversation(botId: BotId, cid: string) { - const conversations = await loadHistoryConversations(botId) - const newConversations = conversations.filter((c) => c.id !== cid) - await Storage.set({ [`conversations:${botId}`]: newConversations }) -} - -async function loadConversationMessages(botId: BotId, cid: string): Promise { - const key = `conversation:${botId}:${cid}:messages` - const { [key]: value } = await Storage.get(key) - return value || [] -} - -export async function setConversationMessages(botId: BotId, cid: string, messages: ChatMessageModel[]) { - const conversations = await loadHistoryConversations(botId) - if (!conversations.some((c) => c.id === cid)) { - conversations.unshift({ id: cid, createdAt: Date.now() }) - await Storage.set({ [`conversations:${botId}`]: conversations }) - } - const key = `conversation:${botId}:${cid}:messages` - await Storage.set({ [key]: messages }) -} - -export async function loadHistoryMessages(botId: BotId): Promise { - const conversations = await loadHistoryConversations(botId) - const messagesList = await Promise.all(conversations.map((c) => loadConversationMessages(botId, c.id))) - return zip(conversations, messagesList).map(([c, messages]) => ({ - id: c!.id, - createdAt: c!.createdAt, - messages: messages!, - })) -} - -export async function deleteHistoryMessage(botId: BotId, conversationId: string, messageId: string) { - const messages = await loadConversationMessages(botId, conversationId) - const newMessages = messages.filter((m) => m.id !== messageId) - await setConversationMessages(botId, conversationId, newMessages) - if (!newMessages.length) { - await deleteHistoryConversation(botId, conversationId) - } -} diff --git a/spaces/839871171w/newbingAI/Dockerfile b/spaces/839871171w/newbingAI/Dockerfile deleted file mode 100644 index 52372e308ff493c9bbb0392492ed3f081c8e8770..0000000000000000000000000000000000000000 --- a/spaces/839871171w/newbingAI/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -# 添加 git,以便之后能从GitHub克隆项目 -RUN apk --no-cache add git - -# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app - -# 设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app - -# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# 使用轻量级的 alpine 镜像作为运行时的基础镜像 -FROM alpine - -# 设置工作目录 -WORKDIR /workspace/app - -# 从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . - -# 设置环境变量,此处为随机字符 -ENV Go_Proxy_BingAI_USER_TOKEN_1="1B4yYaAOHZW5A5NEj-BnRHc80SQbg-Mlu83S-t36PdJ8LU_pqBAxHhhNqgpGSWGfRlGepqeiaYk2sKyR8a8w8ehi6V5SenYspGG0DC0n0iHuML-VoNMsbH64tPWJNPwzpBlse3566VRGzaOafGtk8gk1SX1dYvkFvzlK1hucI40aMUKOO2sjmiMFU1lgEgWu2ZPMYIoIZ_pnw32mlIgRdn1XA6Kml6GFf_3_2oYt6Fw4" - -# 暴露8080端口 -EXPOSE 8080 - -# 容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] \ No newline at end of file diff --git a/spaces/A666sxr/Genshin_TTS/utils.py b/spaces/A666sxr/Genshin_TTS/utils.py deleted file mode 100644 index 92e696511242a28a5a929b286f143c1b4d235009..0000000000000000000000000000000000000000 --- a/spaces/A666sxr/Genshin_TTS/utils.py +++ /dev/null @@ -1,263 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.WARNING) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - ckptname = checkpoint_path.split("/")[-1] - newest_step = int(ckptname.split(".")[0].split("_")[1]) - last_ckptname = checkpoint_path.replace(str(newest_step), str(newest_step-3000)) - if newest_step >= 3000: - os.system(f"rm {last_ckptname}") - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/AB-TW/team-ai/agents/code_generate_agent.py b/spaces/AB-TW/team-ai/agents/code_generate_agent.py deleted file mode 100644 index 9db0d11b2a25fcf187654e1f9977d914590d3b2b..0000000000000000000000000000000000000000 --- a/spaces/AB-TW/team-ai/agents/code_generate_agent.py +++ /dev/null @@ -1,229 +0,0 @@ -import re -from typing import List, Union -from langchain.chains import LLMChain -from langchain.agents import Tool, LLMSingleActionAgent, AgentExecutor, AgentOutputParser -from langchain.schema import AgentAction, AgentFinish -from langchain.agents import initialize_agent -from langchain.prompts import StringPromptTemplate -from agents.promopts import code_generate_agent_template -from agents.tools.smart_domain.api_layer_code_tool import apiLayerCodeGenerator -from agents.tools.smart_domain.domain_layer_code_tool import domainLayerCodeGenerator -from agents.tools.smart_domain.entity import entityCodeGenerator -from agents.tools.smart_domain.association import associationCodeGenerator -from agents.tools.smart_domain.db_entity_repository import dbEntityRepositoryCodeGenerator -from agents.tools.smart_domain.association_impl import asociationImplCodeGenerator -from agents.tools.smart_domain.persistent_layer_code_tool import persistentLayerCodeGenerator -from models import llm - - -class CustomPromptTemplate(StringPromptTemplate): - # The template to use - template: str - # The list of tools available - tools: List[Tool] - - def format(self, **kwargs) -> str: - # Get the intermediate steps (AgentAction, Observation tuples) - # Format them in a particular way - intermediate_steps = kwargs.pop("intermediate_steps") - thoughts = "" - for action, observation in intermediate_steps: - thoughts += action.log - thoughts += f"\nObservation: {observation}\nThought: " - # Set the agent_scratchpad variable to that value - kwargs["agent_scratchpad"] = thoughts - # Create a tools variable from the list of tools provided - kwargs["tools"] = "\n".join( - [f"{tool.name}: {tool.description}" for tool in self.tools]) - # Create a list of tool names for the tools provided - kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools]) - return self.template.format(**kwargs) - - -class CustomOutputParser(AgentOutputParser): - - def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]: - # Check if agent should finish - if "Final Answer:" in llm_output: - return AgentFinish( - # Return values is generally always a dictionary with a single `output` key - # It is not recommended to try anything else at the moment :) - return_values={"output": llm_output.split( - "Final Answer:")[-1].strip()}, - log=llm_output, - ) - # Parse out the action and action input - regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" - match = re.search(regex, llm_output, re.DOTALL) - if not match: - raise ValueError(f"Could not parse LLM output: `{llm_output}`") - action = match.group(1).strip() - action_input = match.group(2) - # Return the action and action input - return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output) - -# chatllm=ChatOpenAI(temperature=0) -# code_genenrate_memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) -# code_generate_agent = initialize_agent(tools, chatllm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, memory=memory, verbose=True) - - - -# agent = initialize_agent( -# tools=tools, llm=llm_chain, template=AGENT_PROMPT, stop=["\nObservation:"], agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True) -code_agent_tools = [domainLayerCodeGenerator, entityCodeGenerator, associationCodeGenerator, persistentLayerCodeGenerator, dbEntityRepositoryCodeGenerator, asociationImplCodeGenerator, apiLayerCodeGenerator] - -def code_agent_executor() -> AgentExecutor: - output_parser = CustomOutputParser() - AGENT_PROMPT = CustomPromptTemplate( - template=code_generate_agent_template, - tools=code_agent_tools, - # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically - # This includes the `intermediate_steps` variable because that is needed - input_variables=["input", "intermediate_steps"] - ) - - code_llm_chain = LLMChain(llm=llm(temperature=0.7), prompt=AGENT_PROMPT) - - tool_names = [tool.name for tool in code_agent_tools] - code_agent = LLMSingleActionAgent( - llm_chain=code_llm_chain, - output_parser=output_parser, - stop=["\nObservation:"], - allowed_tools=tool_names, - ) - - code_agent_executor = AgentExecutor.from_agent_and_tools( - agent=code_agent, tools=code_agent_tools, verbose=True) - return code_agent_executor - -# if __name__ == "__main__": -# response = domainLayerChain.run("""FeatureConfig用于配置某个Feature中控制前端展示效果的配置项 -# FeatureConfig主要属性包括:featureKey(feature标识)、data(配置数据)、saData(埋点数据)、status(状态)、标题、描述、创建时间、更新时间 -# FeatureConfig中status为枚举值,取值范围为(DRAFT、PUBLISHED、DISABLED) -# FeatureConfig新增后status为DRAFT、执行发布操作后变为PUBLISHED、执行撤销操作后变为DISABLED -# 状态为DRAFT的FeatureConfig可以执行编辑、发布、撤销操作 -# 发布后FeatureConfig变为PUBLISHED状态,可以执行撤销操作 -# 撤销后FeatureConfig变为DISABLED状态,不可以执行编辑、发布、撤销操作 -# """) - -# print(response) - - -# response = persistentChain.run(""" -# Entity: -# ``` -# public class FeatureConfig { -# private FeatureConfigId id; -# private FeatureConfigDescription description; - -# public enum FeatureConfigStatus { -# DRAFT, PUBLISHED, DISABLED; -# } - -# public record FeatureConfigId(String id) {} -# public record FeatureKey(String key) {} -# public record FeatureConfigData(String data) {} -# public record FeatureConfigSaData(String saData) {} - -# @Builder -# public record FeatureConfigDescription(FeatureKey featureKey, FeatureConfigData data, FeatureConfigSaData saData, String title, String description, -# FeatureConfigStatus status, LocalDateTime createTime, LocalDateTime updateTime) {} - -# public void update(FeatureConfigDescription description) { -# this.title = description.title(); -# this.description = description.description(); -# this.updateTime = LocalDateTime.now(); -# } - -# public void publish() { -# this.status = FeatureConfigStatus.PUBLISHED; -# this.updateTime = LocalDateTime.now(); -# } - -# public void disable() { -# this.status = FeatureConfigStatus.DISABLED; -# this.updateTime = LocalDateTime.now(); -# } -# } -# ``` - -# Association: -# ``` -# public interface FeatureConfigs { -# Flux findAllByFeatureKey(String featureKey); -# Mono findById(FeatureConfigId id); -# Mono save(FeatureConfig featureConfig); -# } -# ``` -# """) - -# print(response) - - -# response = apiChain.run(""" -# Entity: -# ``` -# public class FeatureConfig { -# private FeatureConfigId id; -# private FeatureConfigDescription description; - -# public enum FeatureConfigStatus { -# DRAFT, PUBLISHED, DISABLED; -# } - -# public record FeatureConfigId(String id) {} -# public record FeatureKey(String key) {} -# public record FeatureConfigData(String data) {} -# public record FeatureConfigSaData(String saData) {} - -# @Builder -# public record FeatureConfigDescription(FeatureKey featureKey, FeatureConfigData data, FeatureConfigSaData saData, String title, String description, -# FeatureConfigStatus status, LocalDateTime createTime, LocalDateTime updateTime) {} - -# public void update(FeatureConfigDescription description) { -# this.title = description.title(); -# this.description = description.description(); -# this.updateTime = LocalDateTime.now(); -# } - -# public void publish() { -# this.status = FeatureConfigStatus.PUBLISHED; -# this.updateTime = LocalDateTime.now(); -# } - -# public void disable() { -# this.status = FeatureConfigStatus.DISABLED; -# this.updateTime = LocalDateTime.now(); -# } -# } -# ``` - -# Association: -# ``` -# public interface FeatureConfigs { -# Flux findAllByFeatureKey(String featureKey); -# Mono findById(FeatureConfigId id); -# Mono save(FeatureConfig featureConfig); -# Mono update(FeatureConfigId id, FeatureConfigDescription description); -# Mono publish(FeatureConfigId id); -# Mono disable(FeatureConfigId id); -# } -# ``` -# """) - -# print(response) - -# if __name__ == "code_generate": -# response = code_agent_executor.run(""" -# 根据如下需求generate domain layer code: -# --- -# FeatureConfig用于配置某个Feature中控制前端展示效果的配置项 -# FeatureConfig主要属性包括:featureKey(feature标识)、data(配置数据)、saData(埋点数据)、status(状态)、标题、描述、创建时间、更新时间 -# FeatureConfig中status为枚举值,取值范围为(DRAFT、PUBLISHED、DISABLED) -# FeatureConfig新增后status为DRAFT、执行发布操作后变为PUBLISHED、执行撤销操作后变为DISABLED -# 状态为DRAFT的FeatureConfig可以执行编辑、发布、撤销操作 -# 发布后FeatureConfig变为PUBLISHED状态,可以执行撤销操作 -# 撤销后FeatureConfig变为DISABLED状态,不可以执行编辑、发布、撤销操作 -# --- -# """) -# print(response) \ No newline at end of file diff --git a/spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/models.py b/spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/models.py deleted file mode 100644 index 5e4b2e72383efaee1fae4f5c42e3db2c627e4190..0000000000000000000000000000000000000000 --- a/spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/models.py +++ /dev/null @@ -1,1124 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/single_thread_env.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/single_thread_env.py deleted file mode 100644 index 849219afd2cddec2ec6d489f12f60a34994bfb80..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/single_thread_env.py +++ /dev/null @@ -1,5 +0,0 @@ -import os - -os.environ["OMP_NUM_THREADS"] = "1" -os.environ['TF_NUM_INTEROP_THREADS'] = '1' -os.environ['TF_NUM_INTRAOP_THREADS'] = '1' diff --git a/spaces/AIWaves/Debate/src/agents/Component/ToolComponent.py b/spaces/AIWaves/Debate/src/agents/Component/ToolComponent.py deleted file mode 100644 index 95da2abdb7e8b7b5283763587f23ecc29e8ec35f..0000000000000000000000000000000000000000 --- a/spaces/AIWaves/Debate/src/agents/Component/ToolComponent.py +++ /dev/null @@ -1,887 +0,0 @@ -from abc import abstractmethod -import uuid -from text2vec import semantic_search -from utils import ( - get_relevant_history, - load_knowledge_base_qa, - load_knowledge_base_UnstructuredFile, - get_embedding, - extract, -) -import json -from typing import Dict, List -import os -from googleapiclient.discovery import build -import requests -from selenium import webdriver -from selenium.webdriver.common.by import By -from selenium.webdriver.support.ui import WebDriverWait -from selenium.webdriver.support import expected_conditions as EC -from bs4 import BeautifulSoup -import base64 -import re -from datetime import datetime, timedelta -from typing import Tuple, List, Any, Dict -from email.mime.text import MIMEText -from email.mime.multipart import MIMEMultipart -from google.auth.transport.requests import Request -from google.oauth2.credentials import Credentials -from google_auth_oauthlib.flow import InstalledAppFlow -from googleapiclient.discovery import build -from googleapiclient.errors import HttpError -from tqdm import tqdm - -class ToolComponent: - def __init__(self): - pass - - @abstractmethod - def func(self): - pass - -class KnowledgeBaseComponent(ToolComponent): - """ - Inject knowledge base - top_k : Top_k with the highest matching degree - type : "QA" or others - knowledge_base(json_path) : knowledge_base_path - """ - def __init__(self, top_k, type, knowledge_base): - super().__init__() - self.top_k = top_k - self.type = type - self.knowledge_base = knowledge_base - - if self.type == "QA": - ( - self.kb_embeddings, - self.kb_questions, - self.kb_answers, - self.kb_chunks, - ) = load_knowledge_base_qa(self.knowledge_base) - else: - self.kb_embeddings, self.kb_chunks = load_knowledge_base_UnstructuredFile( - self.knowledge_base - ) - - def func(self, agent): - query = ( - agent.long_term_memory[-1]["content"] - if len(agent.long_term_memory) > 0 - else "" - ) - knowledge = "" - query = extract(query, "query") - query_embedding = get_embedding(query) - hits = semantic_search(query_embedding, self.kb_embeddings, top_k=50) - hits = hits[0] - temp = [] - if self.type == "QA": - for hit in hits: - matching_idx = hit["corpus_id"] - if self.kb_chunks[matching_idx] in temp: - pass - else: - knowledge = ( - knowledge - + f"question:{self.kb_questions[matching_idx]},answer:{self.kb_answers[matching_idx]}\n\n" - ) - temp.append(self.kb_answers[matching_idx]) - if len(temp) == 1: - break - print(hits[0]["score"]) - score = hits[0]["score"] - if score < 0.5: - return {"prompt": "No matching knowledge base"} - else: - return {"prompt": "The relevant content is: " + knowledge + "\n"} - else: - for hit in hits: - matching_idx = hit["corpus_id"] - if self.kb_chunks[matching_idx] in temp: - pass - else: - knowledge = knowledge + f"{self.kb_answers[matching_idx]}\n\n" - temp.append(self.kb_answers[matching_idx]) - if len(temp) == self.top_k: - break - print(hits[0]["score"]) - score = hits[0]["score"] - if score < 0.5: - return {"prompt": "No matching knowledge base"} - else: - print(knowledge) - return {"prompt": "The relevant content is: " + knowledge + "\n"} - - -class StaticComponent(ToolComponent): - "Return static response" - def __init__(self, output): - super().__init__() - self.output = output - - def func(self, agent): - outputdict = {"response": self.output} - return outputdict - - -class ExtractComponent(ToolComponent): - """ - Extract keywords based on the current scene and store them in the environment - extract_words(list) : Keywords to be extracted - system_prompt & last_prompt : Prompt to extract keywords - """ - def __init__( - self, - extract_words, - system_prompt, - last_prompt=None, - ): - super().__init__() - self.extract_words = extract_words - self.system_prompt = system_prompt - self.default_prompt = ( - "Please strictly adhere to the following format for outputting:\n" - ) - for extract_word in extract_words: - self.default_prompt += ( - f"<{extract_word}> the content you need to extract " - ) - self.last_prompt = last_prompt if last_prompt else self.default_prompt - - def func(self, agent): - response = agent.LLM.get_response( - agent.long_term_memory, - self.system_prompt, - self.last_prompt, - stream=False, - ) - for extract_word in self.extract_words: - key = extract(response, extract_word) - key = key if key else response - agent.environment.shared_memory[extract_word] = key - - return {} - - -"""Search sources: chatgpt/search engines/specific search sources/can even be multimodal (if it comes to clothing)""" - - -class WebSearchComponent(ToolComponent): - """search engines""" - - __ENGINE_NAME__: List = ["google", "bing"] - - def __init__(self, engine_name: str, api: Dict): - """ - :param engine_name: The name of the search engine used - :param api: Pass in a dictionary, such as {"bing":"key1", "google":"key2", ...}, of course each value can also be a list, or more complicated - """ - super(WebSearchComponent, self).__init__() - """Determine whether the key and engine_name of the api are legal""" - - assert engine_name in WebSearchComponent.__ENGINE_NAME__ - for api_name in api: - assert api_name in WebSearchComponent.__ENGINE_NAME__ - - self.api = api - self.engine_name = engine_name - - self.search: Dict = {"bing": self._bing_search, "google": self._google_search} - - def _bing_search(self, query: str, **kwargs): - """Initialize search hyperparameters""" - subscription_key = self.api["bing"] - search_url = "https://api.bing.microsoft.com/v7.0/search" - headers = {"Ocp-Apim-Subscription-Key": subscription_key} - params = { - "q": query, - "textDecorations": True, - "textFormat": "HTML", - "count": 10, - } - """start searching""" - response = requests.get(search_url, headers=headers, params=params) - response.raise_for_status() - results = response.json()["webPages"]["value"] - """execute""" - metadata_results = [] - for result in results: - metadata_result = { - "snippet": result["snippet"], - "title": result["name"], - "link": result["url"], - } - metadata_results.append(metadata_result) - return {"meta data": metadata_results} - - def _google_search(self, query: str, **kwargs): - """Initialize search hyperparameters""" - api_key = self.api[self.engine_name]["api_key"] - cse_id = self.api[self.engine_name]["cse_id"] - service = build("customsearch", "v1", developerKey=api_key) - """start searching""" - results = ( - service.cse().list(q=query, cx=cse_id, num=10, **kwargs).execute()["items"] - ) - """execute""" - metadata_results = [] - for result in results: - metadata_result = { - "snippet": result["snippet"], - "title": result["title"], - "link": result["link"], - } - metadata_results.append(metadata_result) - return {"meta data": metadata_results} - - def func(self, agent, **kwargs) -> Dict: - query = ( - agent.long_term_memory[-1]["content"] - if len(agent.long_term_memory) > 0 - else " " - ) - response = agent.LLM.get_response( - None, - system_prompt=f"Please analyze the provided conversation and identify keywords that can be used for a search engine query. Format the output as extracted keywords:\nConversation:\n{query}", - stream=False, - ) - response = extract(response, "keywords") - query = response if response else query - - search_results = self.search[self.engine_name](query=query, **kwargs) - information = "" - for i in search_results["meta data"][:5]: - information += i["snippet"] - return { - "prompt": "You can refer to the following information to reply:\n" - + information - } - - def convert_search_engine_to(self, engine_name): - assert engine_name in WebSearchComponent.__ENGINE_NAME__ - self.engine_name = engine_name - - -class WebCrawlComponent(ToolComponent): - """Open a single web page for crawling""" - - def __init__(self): - super(WebCrawlComponent, self).__init__() - - def func(self, agent_dict) -> Dict: - url = agent_dict["url"] - print(f"crawling {url} ......") - content = "" - """Crawling content from url may need to be carried out according to different websites, such as wiki, baidu, zhihu, etc.""" - driver = webdriver.Chrome() - try: - """open url""" - driver.get(url) - - """wait 20 second""" - wait = WebDriverWait(driver, 20) - wait.until(EC.presence_of_element_located((By.TAG_NAME, "body"))) - - """crawl code""" - page_source = driver.page_source - - """parse""" - soup = BeautifulSoup(page_source, "html.parser") - - """concatenate""" - for paragraph in soup.find_all("p"): - content = f"{content}\n{paragraph.get_text()}" - except Exception as e: - print("Error:", e) - finally: - """quit""" - driver.quit() - return {"content": content.strip()} - - -class MailComponent(ToolComponent): - __VALID_ACTION__ = ["read", "send"] - - def __init__( - self, cfg_file: str, default_action: str = "read", name: str = "e-mail" - ): - """'../config/google_mail.json'""" - super(MailComponent, self).__init__(name) - self.name = name - assert ( - default_action.lower() in self.__VALID_ACTION__ - ), f"Action `{default_action}` is not allowed! The valid action is in `{self.__VALID_ACTION__}`" - self.action = default_action.lower() - self.credential = self._login(cfg_file) - - def _login(self, cfg_file: str): - SCOPES = [ - "https://www.googleapis.com/auth/gmail.readonly", - "https://www.googleapis.com/auth/gmail.send", - ] - creds = None - if os.path.exists("token.json"): - print("Login Successfully!") - creds = Credentials.from_authorized_user_file("token.json", SCOPES) - if not creds or not creds.valid: - print("Please authorize in an open browser.") - if creds and creds.expired and creds.refresh_token: - creds.refresh(Request()) - else: - flow = InstalledAppFlow.from_client_secrets_file(cfg_file, SCOPES) - creds = flow.run_local_server(port=0) - # Save the credentials for the next run - with open("token.json", "w") as token: - token.write(creds.to_json()) - return creds - - def _read(self, mail_dict: dict): - credential = self.credential - state = mail_dict["state"] if "state" in mail_dict else None - time_between = ( - mail_dict["time_between"] if "time_between" in mail_dict else None - ) - sender_mail = mail_dict["sender_mail"] if "sender_mail" in mail_dict else None - only_both = mail_dict["only_both"] if "only_both" in mail_dict else False - order_by_time = ( - mail_dict["order_by_time"] if "order_by_time" in mail_dict else "descend" - ) - include_word = ( - mail_dict["include_word"] if "include_word" in mail_dict else None - ) - exclude_word = ( - mail_dict["exclude_word"] if "exclude_word" in mail_dict else None - ) - MAX_SEARCH_CNT = ( - mail_dict["MAX_SEARCH_CNT"] if "MAX_SEARCH_CNT" in mail_dict else 50 - ) - number = mail_dict["number"] if "number" in mail_dict else 10 - if state is None: - state = "all" - if time_between is not None: - assert isinstance(time_between, tuple) - assert len(time_between) == 2 - assert state in ["all", "unread", "read", "sent"] - if only_both: - assert sender_mail is not None - if sender_mail is not None: - assert isinstance(sender_mail, str) - assert credential - assert order_by_time in ["descend", "ascend"] - - def generate_query(): - query = "" - if state in ["unread", "read"]: - query = f"is:{state}" - if state in ["sent"]: - query = f"in:{state}" - if only_both: - query = f"{query} from:{sender_mail} OR to:{sender_mail}" - if sender_mail is not None and not only_both: - query = f"{query} from:({sender_mail})" - if include_word is not None: - query = f"{query} {include_word}" - if exclude_word is not None: - query = f"{query} -{exclude_word}" - if time_between is not None: - TIME_FORMAT = "%Y/%m/%d" - t1, t2 = time_between - if t1 == "now": - t1 = datetime.now().strftime(TIME_FORMAT) - if t2 == "now": - t2 = datetime.now().strftime(TIME_FORMAT) - if isinstance(t1, str) and isinstance(t2, str): - t1 = datetime.strptime(t1, TIME_FORMAT) - t2 = datetime.strptime(t2, TIME_FORMAT) - elif isinstance(t1, str) and isinstance(t2, int): - t1 = datetime.strptime(t1, TIME_FORMAT) - t2 = t1 + timedelta(days=t2) - elif isinstance(t1, int) and isinstance(t2, str): - t2 = datetime.strptime(t2, TIME_FORMAT) - t1 = t2 + timedelta(days=t1) - else: - assert False, "invalid time" - if t1 > t2: - t1, t2 = t2, t1 - query = f"{query} after:{t1.strftime(TIME_FORMAT)} before:{t2.strftime(TIME_FORMAT)}" - return query.strip() - - def sort_by_time(data: List[Dict]): - if order_by_time == "descend": - reverse = True - else: - reverse = False - sorted_data = sorted( - data, - key=lambda x: datetime.strptime(x["time"], "%Y-%m-%d %H:%M:%S"), - reverse=reverse, - ) - return sorted_data - - try: - service = build("gmail", "v1", credentials=credential) - results = ( - service.users() - .messages() - .list(userId="me", labelIds=["INBOX"], q=generate_query()) - .execute() - ) - - messages = results.get("messages", []) - email_data = list() - - if not messages: - print("No eligible emails.") - return None - else: - pbar = tqdm(total=min(MAX_SEARCH_CNT, len(messages))) - for cnt, message in enumerate(messages): - pbar.update(1) - if cnt >= MAX_SEARCH_CNT: - break - msg = ( - service.users() - .messages() - .get( - userId="me", - id=message["id"], - format="full", - metadataHeaders=None, - ) - .execute() - ) - - subject = "" - for header in msg["payload"]["headers"]: - if header["name"] == "Subject": - subject = header["value"] - break - - sender = "" - for header in msg["payload"]["headers"]: - if header["name"] == "From": - sender = re.findall( - r"\b[\w\.-]+@[\w\.-]+\.\w+\b", header["value"] - )[0] - break - body = "" - if "parts" in msg["payload"]: - for part in msg["payload"]["parts"]: - if part["mimeType"] == "text/plain": - data = part["body"]["data"] - body = base64.urlsafe_b64decode(data).decode("utf-8") - break - - email_info = { - "sender": sender, - "time": datetime.fromtimestamp( - int(msg["internalDate"]) / 1000 - ).strftime("%Y-%m-%d %H:%M:%S"), - "subject": subject, - "body": body, - } - email_data.append(email_info) - pbar.close() - email_data = sort_by_time(email_data)[0:number] - return {"results": email_data} - except Exception as e: - print(e) - return None - - def _send(self, mail_dict: dict): - recipient_mail = mail_dict["recipient_mail"] - subject = mail_dict["subject"] - body = mail_dict["body"] - credential = self.credential - service = build("gmail", "v1", credentials=credential) - - message = MIMEMultipart() - message["to"] = recipient_mail - message["subject"] = subject - - message.attach(MIMEText(body, "plain")) - - raw_message = base64.urlsafe_b64encode(message.as_bytes()).decode("utf-8") - try: - message = ( - service.users() - .messages() - .send(userId="me", body={"raw": raw_message}) - .execute() - ) - return {"state": True} - except HttpError as error: - print(error) - return {"state": False} - - def func(self, mail_dict: dict): - if "action" in mail_dict: - assert mail_dict["action"].lower() in self.__VALID_ACTION__ - self.action = mail_dict["action"] - functions = {"read": self._read, "send": self._send} - return functions[self.action](mail_dict) - - def convert_action_to(self, action_name: str): - assert ( - action_name.lower() in self.__VALID_ACTION__ - ), f"Action `{action_name}` is not allowed! The valid action is in `{self.__VALID_ACTION__}`" - self.action = action_name.lower() - - -class WeatherComponet(ToolComponent): - def __init__(self, api_key, name="weather", TIME_FORMAT="%Y-%m-%d"): - super(WeatherComponet, self).__init__(name) - self.name = name - self.TIME_FORMAT = TIME_FORMAT - self.api_key = api_key - - def _parse(self, data): - dict_data: dict = {} - for item in data["data"]: - date = item["datetime"] - dict_data[date] = {} - if "weather" in item: - dict_data[date]["description"] = item["weather"]["description"] - mapping = { - "temp": "temperature", - "max_temp": "max_temperature", - "min_temp": "min_temperature", - "precip": "accumulated_precipitation", - } - for key in ["temp", "max_temp", "min_temp", "precip"]: - if key in item: - dict_data[date][mapping[key]] = item[key] - return dict_data - - def _query(self, city_name, country_code, start_date, end_date): - """https://www.weatherbit.io/api/historical-weather-daily""" - # print(datetime.strftime(start_date, self.TIME_FORMAT), datetime.strftime(datetime.now(), self.TIME_FORMAT), end_date, datetime.strftime(datetime.now()+timedelta(days=1), self.TIME_FORMAT)) - if start_date == datetime.strftime( - datetime.now(), self.TIME_FORMAT - ) and end_date == datetime.strftime( - datetime.now() + timedelta(days=1), self.TIME_FORMAT - ): - """today""" - url = f"https://api.weatherbit.io/v2.0/current?city={city_name}&country={country_code}&key={self.api_key}" - else: - url = f"https://api.weatherbit.io/v2.0/history/daily?&city={city_name}&country={country_code}&start_date={start_date}&end_date={end_date}&key={self.api_key}" - response = requests.get(url) - data = response.json() - return self._parse(data) - - def func(self, weather_dict: Dict) -> Dict: - TIME_FORMAT = self.TIME_FORMAT - # Beijing, Shanghai - city_name = weather_dict["city_name"] - # CN, US - country_code = weather_dict["country_code"] - # 2020-02-02 - start_date = datetime.strftime( - datetime.strptime(weather_dict["start_date"], self.TIME_FORMAT), - self.TIME_FORMAT, - ) - end_date = weather_dict["end_date"] if "end_date" in weather_dict else None - if end_date is None: - end_date = datetime.strftime( - datetime.strptime(start_date, TIME_FORMAT) + timedelta(days=-1), - TIME_FORMAT, - ) - else: - end_date = datetime.strftime( - datetime.strptime(weather_dict["end_date"], self.TIME_FORMAT), - self.TIME_FORMAT, - ) - if datetime.strptime(start_date, TIME_FORMAT) > datetime.strptime( - end_date, TIME_FORMAT - ): - start_date, end_date = end_date, start_date - assert start_date != end_date - return self._query(city_name, country_code, start_date, end_date) - - -class TranslateComponent(ToolComponent): - __SUPPORT_LANGUAGE__ = [ - "af", - "am", - "ar", - "as", - "az", - "ba", - "bg", - "bn", - "bo", - "bs", - "ca", - "cs", - "cy", - "da", - "de", - "dsb", - "dv", - "el", - "en", - "es", - "et", - "eu", - "fa", - "fi", - "fil", - "fj", - "fo", - "fr", - "fr-CA", - "ga", - "gl", - "gom", - "gu", - "ha", - "he", - "hi", - "hr", - "hsb", - "ht", - "hu", - "hy", - "id", - "ig", - "ikt", - "is", - "it", - "iu", - "iu-Latn", - "ja", - "ka", - "kk", - "km", - "kmr", - "kn", - "ko", - "ku", - "ky", - "ln", - "lo", - "lt", - "lug", - "lv", - "lzh", - "mai", - "mg", - "mi", - "mk", - "ml", - "mn-Cyrl", - "mn-Mong", - "mr", - "ms", - "mt", - "mww", - "my", - "nb", - "ne", - "nl", - "nso", - "nya", - "or", - "otq", - "pa", - "pl", - "prs", - "ps", - "pt", - "pt-PT", - "ro", - "ru", - "run", - "rw", - "sd", - "si", - "sk", - "sl", - "sm", - "sn", - "so", - "sq", - "sr-Cyrl", - "sr-Latn", - "st", - "sv", - "sw", - "ta", - "te", - "th", - "ti", - "tk", - "tlh-Latn", - "tlh-Piqd", - "tn", - "to", - "tr", - "tt", - "ty", - "ug", - "uk", - "ur", - "uz", - "vi", - "xh", - "yo", - "yua", - "yue", - "zh-Hans", - "zh-Hant", - "zu", - ] - - def __init__( - self, api_key, location, default_target_language="zh-cn", name="translate" - ): - super(TranslateComponent, self).__init__(name) - self.name = name - self.api_key = api_key - self.location = location - self.default_target_language = default_target_language - - def func(self, translate_dict: Dict) -> Dict: - content = translate_dict["content"] - target_language = self.default_target_language - if "target_language" in translate_dict: - target_language = translate_dict["target_language"] - assert ( - target_language in self.__SUPPORT_LANGUAGE__ - ), f"language `{target_language}` is not supported." - - endpoint = "https://api.cognitive.microsofttranslator.com" - - path = "/translate" - constructed_url = endpoint + path - - params = {"api-version": "3.0", "to": target_language} - - headers = { - "Ocp-Apim-Subscription-Key": self.api_key, - "Ocp-Apim-Subscription-Region": self.location, - "Content-type": "application/json", - "X-ClientTraceId": str(uuid.uuid4()), - } - - body = [{"text": content}] - - request = requests.post( - constructed_url, params=params, headers=headers, json=body - ) - response = request.json() - response = json.dumps( - response, - sort_keys=True, - ensure_ascii=False, - indent=4, - separators=(",", ": "), - ) - response = eval(response) - return {"result": response[0]["translations"][0]["text"]} - - -class APIComponent(ToolComponent): - def __init__(self): - super(APIComponent, self).__init__() - - def func(self, agent) -> Dict: - pass - - -class FunctionComponent(ToolComponent): - def __init__( - self, - functions, - function_call="auto", - response_type="response", - your_function=None, - ): - super().__init__() - self.functions = functions - self.function_call = function_call - self.parameters = {} - self.available_functions = {} - self.response_type = response_type - if your_function: - function_name = your_function["name"] - function_content = your_function["content"] - exec(function_content) - self.available_functions[function_name] = eval(function_name) - - for function in self.functions: - self.parameters[function["name"]] = list( - function["parameters"]["properties"].keys() - ) - self.available_functions[function["name"]] = eval(function["name"]) - - def func(self, agent): - messages = agent.long_term_memory - outputdict = {} - query = agent.long_term_memory[-1].content if len(agent.long_term_memory) > 0 else " " - relevant_history = get_relevant_history( - query, - agent.long_term_memory[:-1], - agent.chat_embeddings[:-1], - ) - response = agent.LLM.get_response( - messages, - None, - functions=self.functions, - stream=False, - function_call=self.function_call, - relevant_history=relevant_history, - ) - response_message = response - if response_message.get("function_call"): - function_name = response_message["function_call"]["name"] - fuction_to_call = self.available_functions[function_name] - function_args = json.loads(response_message["function_call"]["arguments"]) - input_args = {} - for args_name in self.parameters[function_name]: - input_args[args_name] = function_args.get(args_name) - function_response = fuction_to_call(**input_args) - if self.response_type == "response": - outputdict["response"] = function_response - elif self.response_type == "prompt": - outputdict["prompt"] = function_response - - return outputdict - - -class CodeComponent(ToolComponent): - def __init__(self, file_name, keyword) -> None: - super().__init__() - self.file_name = file_name - self.keyword = keyword - self.system_prompt = ( - "you need to extract the modified code as completely as possible." - ) - self.last_prompt = ( - f"Please strictly adhere to the following format for outputting: \n" - ) - self.last_prompt += ( - f"<{self.keyword}> the content you need to extract " - ) - - def func(self, agent): - response = agent.LLM.get_response( - agent.long_term_memory, - self.system_prompt, - self.last_prompt, - stream=False, - ) - code = extract(response, self.keyword) - code = code if code else response - os.makedirs("output_code", exist_ok=True) - file_name = "output_code/" + self.file_name - codes = code.split("\n") - if codes[0] == "```python": - codes.remove(codes[0]) - if codes[-1] == "```": - codes.remove(codes[-1]) - code = "\n".join(codes) - with open(file_name, "w", encoding="utf-8") as f: - f.write(code) - return {} diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192/__init__.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/hashConv.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/hashConv.ts deleted file mode 100644 index de014324f6f21fbb67a61d098844027cfcdad0bf..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/hashConv.ts +++ /dev/null @@ -1,12 +0,0 @@ -import type { Conversation } from "$lib/types/Conversation"; -import { sha256 } from "./sha256"; - -export async function hashConv(conv: Conversation) { - // messages contains the conversation message but only the immutable part - const messages = conv.messages.map((message) => { - return (({ from, id, content, webSearchId }) => ({ from, id, content, webSearchId }))(message); - }); - - const hash = await sha256(JSON.stringify(messages)); - return hash; -} diff --git a/spaces/AchyuthGamer/OpenGPT/client/css/main.css b/spaces/AchyuthGamer/OpenGPT/client/css/main.css deleted file mode 100644 index ec1f1dd80247747912e1976413a1e3897f1308db..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/client/css/main.css +++ /dev/null @@ -1,14 +0,0 @@ -.main-container { - display: flex; - padding: var(--section-gap); - height: 100vh; - justify-content: center; - box-sizing: border-box; -} - -@media screen and (max-width: 360px) { - .main-container { - padding: 0px; - height: 90vh; - } -} \ No newline at end of file diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptDuo.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptDuo.py deleted file mode 100644 index 119ff16b694866b52e0052e1710b4a9c530ef100..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptDuo.py +++ /dev/null @@ -1,57 +0,0 @@ -from __future__ import annotations - -from curl_cffi.requests import AsyncSession -from .base_provider import AsyncProvider, format_prompt - - -class ChatgptDuo(AsyncProvider): - url = "https://chatgptduo.com" - supports_gpt_35_turbo = True - working = True - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - timeout: int = 30, - **kwargs - ) -> str: - async with AsyncSession( - impersonate="chrome107", - proxies={"https": proxy}, - timeout=timeout - ) as session: - prompt = format_prompt(messages), - data = { - "prompt": prompt, - "search": prompt, - "purpose": "ask", - } - response = await session.post(f"{cls.url}/", data=data) - response.raise_for_status() - data = response.json() - - cls._sources = [{ - "title": source["title"], - "url": source["link"], - "snippet": source["snippet"] - } for source in data["results"]] - - return data["answer"] - - @classmethod - def get_sources(cls): - return cls._sources - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetShownChildrenMethods.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetShownChildrenMethods.js deleted file mode 100644 index 81af7c1607d6e79068b6be63edb16b1397e2e8f6..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetShownChildrenMethods.js +++ /dev/null @@ -1,43 +0,0 @@ -export default { - getShownChildren(out) { - if (out === undefined) { - out = []; - } - var children = this.children, - child; - for (var i = 0, cnt = children.length; i < cnt; i++) { - child = children[i]; - if (child.rexSizer && child.rexSizer.hidden) { // Don't add hidden child - continue; - } - - out.push(child); - } - - return out; - }, - - getAllShownChildren(out) { - if (out === undefined) { - out = []; - } - - var queue = [this]; - while (queue.length > 0) { - var current = queue.shift(); - if (current.rexSizer && current.rexSizer.hidden) { - continue; - } - - if (current !== this) { - out.push(current); - } - - if (current.isRexContainerLite) { - queue.push(...current.children); - } - } - - return out; - } -} \ No newline at end of file diff --git a/spaces/Akmyradov/TurkmenTTSweSTT/vits/text/cleaners.py b/spaces/Akmyradov/TurkmenTTSweSTT/vits/text/cleaners.py deleted file mode 100644 index 2658f667a7d59ca99a3e16ba0c157d2ab5d795eb..0000000000000000000000000000000000000000 --- a/spaces/Akmyradov/TurkmenTTSweSTT/vits/text/cleaners.py +++ /dev/null @@ -1,100 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -from phonemizer import phonemize - - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def expand_numbers(text): - return normalize_numbers(text) - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def basic_cleaners(text): - '''Basic pipeline that lowercases and collapses whitespace without transliteration.''' - text = lowercase(text) - text = collapse_whitespace(text) - return text - - -def transliteration_cleaners(text): - '''Pipeline for non-English text that transliterates to ASCII.''' - text = convert_to_ascii(text) - text = lowercase(text) - text = collapse_whitespace(text) - return text - - -def english_cleaners(text): - '''Pipeline for English text, including abbreviation expansion.''' - text = convert_to_ascii(text) - text = lowercase(text) - text = expand_abbreviations(text) - phonemes = phonemize(text, language='en-us', backend='espeak', strip=True) - phonemes = collapse_whitespace(phonemes) - return phonemes - - -def english_cleaners2(text): - '''Pipeline for English text, including abbreviation expansion. + punctuation + stress''' - text = convert_to_ascii(text) - text = lowercase(text) - text = expand_abbreviations(text) - phonemes = phonemize(text, language='en-us', backend='espeak', strip=True, preserve_punctuation=True, with_stress=True) - phonemes = collapse_whitespace(phonemes) - return phonemes diff --git a/spaces/Ameaou/academic-chatgpt3.1/colorful.py b/spaces/Ameaou/academic-chatgpt3.1/colorful.py deleted file mode 100644 index d90972bb30a8f8fb932abbc34232e474df4d5205..0000000000000000000000000000000000000000 --- a/spaces/Ameaou/academic-chatgpt3.1/colorful.py +++ /dev/null @@ -1,91 +0,0 @@ -import platform -from sys import stdout - -if platform.system()=="Linux": - pass -else: - from colorama import init - init() - -# Do you like the elegance of Chinese characters? -def print红(*kw,**kargs): - print("\033[0;31m",*kw,"\033[0m",**kargs) -def print绿(*kw,**kargs): - print("\033[0;32m",*kw,"\033[0m",**kargs) -def print黄(*kw,**kargs): - print("\033[0;33m",*kw,"\033[0m",**kargs) -def print蓝(*kw,**kargs): - print("\033[0;34m",*kw,"\033[0m",**kargs) -def print紫(*kw,**kargs): - print("\033[0;35m",*kw,"\033[0m",**kargs) -def print靛(*kw,**kargs): - print("\033[0;36m",*kw,"\033[0m",**kargs) - -def print亮红(*kw,**kargs): - print("\033[1;31m",*kw,"\033[0m",**kargs) -def print亮绿(*kw,**kargs): - print("\033[1;32m",*kw,"\033[0m",**kargs) -def print亮黄(*kw,**kargs): - print("\033[1;33m",*kw,"\033[0m",**kargs) -def print亮蓝(*kw,**kargs): - print("\033[1;34m",*kw,"\033[0m",**kargs) -def print亮紫(*kw,**kargs): - print("\033[1;35m",*kw,"\033[0m",**kargs) -def print亮靛(*kw,**kargs): - print("\033[1;36m",*kw,"\033[0m",**kargs) - - - -def print亮红(*kw,**kargs): - print("\033[1;31m",*kw,"\033[0m",**kargs) -def print亮绿(*kw,**kargs): - print("\033[1;32m",*kw,"\033[0m",**kargs) -def print亮黄(*kw,**kargs): - print("\033[1;33m",*kw,"\033[0m",**kargs) -def print亮蓝(*kw,**kargs): - print("\033[1;34m",*kw,"\033[0m",**kargs) -def print亮紫(*kw,**kargs): - print("\033[1;35m",*kw,"\033[0m",**kargs) -def print亮靛(*kw,**kargs): - print("\033[1;36m",*kw,"\033[0m",**kargs) - -print_red = print红 -print_green = print绿 -print_yellow = print黄 -print_blue = print蓝 -print_purple = print紫 -print_indigo = print靛 - -print_bold_red = print亮红 -print_bold_green = print亮绿 -print_bold_yellow = print亮黄 -print_bold_blue = print亮蓝 -print_bold_purple = print亮紫 -print_bold_indigo = print亮靛 - -if not stdout.isatty(): - # redirection, avoid a fucked up log file - print红 = print - print绿 = print - print黄 = print - print蓝 = print - print紫 = print - print靛 = print - print亮红 = print - print亮绿 = print - print亮黄 = print - print亮蓝 = print - print亮紫 = print - print亮靛 = print - print_red = print - print_green = print - print_yellow = print - print_blue = print - print_purple = print - print_indigo = print - print_bold_red = print - print_bold_green = print - print_bold_yellow = print - print_bold_blue = print - print_bold_purple = print - print_bold_indigo = print \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/zh/installation.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/zh/installation.md deleted file mode 100644 index 8cd3ad97cc21c658298d755505d9a70ed41e190a..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/zh/installation.md +++ /dev/null @@ -1,146 +0,0 @@ - - -# 安装 - -在你正在使用的任意深度学习框架中安装 🤗 Diffusers 。 - -🤗 Diffusers已在Python 3.7+、PyTorch 1.7.0+和Flax上进行了测试。按照下面的安装说明,针对你正在使用的深度学习框架进行安装: - -- [PyTorch](https://pytorch.org/get-started/locally/) installation instructions. -- [Flax](https://flax.readthedocs.io/en/latest/) installation instructions. - -## 使用pip安装 - -你需要在[虚拟环境](https://docs.python.org/3/library/venv.html)中安装 🤗 Diffusers 。 - -如果你对 Python 虚拟环境不熟悉,可以看看这个[教程](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). - -在虚拟环境中,你可以轻松管理不同的项目,避免依赖项之间的兼容性问题。 - -首先,在你的项目目录下创建一个虚拟环境: - -```bash -python -m venv .env -``` - -激活虚拟环境: - -```bash -source .env/bin/activate -``` - -现在,你就可以安装 🤗 Diffusers了!使用下边这个命令: - -**PyTorch** - -```bash -pip install diffusers["torch"] -``` - -**Flax** - -```bash -pip install diffusers["flax"] -``` - -## 从源代码安装 - -在从源代码安装 `diffusers` 之前,确保你已经安装了 `torch` 和 `accelerate`。 - -`torch`的安装教程可以看 `torch` [文档](https://pytorch.org/get-started/locally/#start-locally). - -安装 `accelerate` - -```bash -pip install accelerate -``` - -从源码安装 🤗 Diffusers 需要使用以下命令: - -```bash -pip install git+https://github.com/huggingface/diffusers -``` - -这个命令安装的是最新的 `main`版本,而不是最近的`stable`版。 -`main`是一直和最新进展保持一致的。比如,上次发布的正式版中有bug,在`main`中可以看到这个bug被修复了,但是新的正式版此时尚未推出。 -但是这也意味着 `main`版本不保证是稳定的。 - -我们努力保持`main`版本正常运行,大多数问题都能在几个小时或一天之内解决 - -如果你遇到了问题,可以提 [Issue](https://github.com/huggingface/transformers/issues),这样我们就能更快修复问题了。 - -## 可修改安装 - -如果你想做以下两件事,那你可能需要一个可修改代码的安装方式: - -* 使用 `main`版本的源代码。 -* 为 🤗 Diffusers 贡献,需要测试代码中的变化。 - -使用以下命令克隆并安装 🤗 Diffusers: - -```bash -git clone https://github.com/huggingface/diffusers.git -cd diffusers -``` - -**PyTorch** - -``` -pip install -e ".[torch]" -``` - -**Flax** - -``` -pip install -e ".[flax]" -``` - -这些命令将连接到你克隆的版本库和你的 Python 库路径。 -现在,不只是在通常的库路径,Python 还会在你克隆的文件夹内寻找包。 -例如,如果你的 Python 包通常安装在 `~/anaconda3/envs/main/lib/python3.7/Site-packages/`,Python 也会搜索你克隆到的文件夹。`~/diffusers/`。 - - - -如果你想继续使用这个库,你必须保留 `diffusers` 文件夹。 - - - - -现在你可以用下面的命令轻松地将你克隆的 🤗 Diffusers 库更新到最新版本。 - -```bash -cd ~/diffusers/ -git pull -``` - -你的Python环境将在下次运行时找到`main`版本的 🤗 Diffusers。 - -## 注意 Telemetry 日志 - -我们的库会在使用`from_pretrained()`请求期间收集 telemetry 信息。这些数据包括Diffusers和PyTorch/Flax的版本,请求的模型或管道类,以及预训练检查点的路径(如果它被托管在Hub上的话)。 -这些使用数据有助于我们调试问题并确定新功能的开发优先级。 -Telemetry 数据仅在从 HuggingFace Hub 中加载模型和管道时发送,而不会在本地使用期间收集。 - -我们知道,并不是每个人都想分享这些的信息,我们尊重您的隐私, -因此您可以通过在终端中设置 `DISABLE_TELEMETRY` 环境变量从而禁用 Telemetry 数据收集: - - -Linux/MacOS : -```bash -export DISABLE_TELEMETRY=YES -``` - -Windows : -```bash -set DISABLE_TELEMETRY=YES -``` \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py deleted file mode 100644 index 162bac1c4331149c4b5abde1eadd8013ab0cda99..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py +++ /dev/null @@ -1,62 +0,0 @@ -# This file is autogenerated by the command `make fix-copies`, do not edit. -from ..utils import DummyObject, requires_backends - - -class FlaxStableDiffusionControlNetPipeline(metaclass=DummyObject): - _backends = ["flax", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax", "transformers"]) - - -class FlaxStableDiffusionImg2ImgPipeline(metaclass=DummyObject): - _backends = ["flax", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax", "transformers"]) - - -class FlaxStableDiffusionInpaintPipeline(metaclass=DummyObject): - _backends = ["flax", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax", "transformers"]) - - -class FlaxStableDiffusionPipeline(metaclass=DummyObject): - _backends = ["flax", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax", "transformers"]) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/hub_utils.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/hub_utils.py deleted file mode 100644 index 4f0cf00a5c5d0d303ba53f62fbf027c0bc31ad49..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/hub_utils.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import os -import re -import sys -import traceback -import warnings -from pathlib import Path -from typing import Dict, Optional, Union -from uuid import uuid4 - -from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami -from huggingface_hub.file_download import REGEX_COMMIT_HASH -from huggingface_hub.utils import ( - EntryNotFoundError, - RepositoryNotFoundError, - RevisionNotFoundError, - is_jinja_available, -) -from packaging import version -from requests import HTTPError - -from .. import __version__ -from .constants import ( - DEPRECATED_REVISION_ARGS, - DIFFUSERS_CACHE, - HUGGINGFACE_CO_RESOLVE_ENDPOINT, - SAFETENSORS_WEIGHTS_NAME, - WEIGHTS_NAME, -) -from .import_utils import ( - ENV_VARS_TRUE_VALUES, - _flax_version, - _jax_version, - _onnxruntime_version, - _torch_version, - is_flax_available, - is_onnx_available, - is_torch_available, -) -from .logging import get_logger - - -logger = get_logger(__name__) - - -MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "model_card_template.md" -SESSION_ID = uuid4().hex -HF_HUB_OFFLINE = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES -DISABLE_TELEMETRY = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES -HUGGINGFACE_CO_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/" - - -def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: - """ - Formats a user-agent string with basic info about a request. - """ - ua = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" - if DISABLE_TELEMETRY or HF_HUB_OFFLINE: - return ua + "; telemetry/off" - if is_torch_available(): - ua += f"; torch/{_torch_version}" - if is_flax_available(): - ua += f"; jax/{_jax_version}" - ua += f"; flax/{_flax_version}" - if is_onnx_available(): - ua += f"; onnxruntime/{_onnxruntime_version}" - # CI will set this value to True - if os.environ.get("DIFFUSERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: - ua += "; is_ci/true" - if isinstance(user_agent, dict): - ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) - elif isinstance(user_agent, str): - ua += "; " + user_agent - return ua - - -def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): - if token is None: - token = HfFolder.get_token() - if organization is None: - username = whoami(token)["name"] - return f"{username}/{model_id}" - else: - return f"{organization}/{model_id}" - - -def create_model_card(args, model_name): - if not is_jinja_available(): - raise ValueError( - "Modelcard rendering is based on Jinja templates." - " Please make sure to have `jinja` installed before using `create_model_card`." - " To install it, please run `pip install Jinja2`." - ) - - if hasattr(args, "local_rank") and args.local_rank not in [-1, 0]: - return - - hub_token = args.hub_token if hasattr(args, "hub_token") else None - repo_name = get_full_repo_name(model_name, token=hub_token) - - model_card = ModelCard.from_template( - card_data=ModelCardData( # Card metadata object that will be converted to YAML block - language="en", - license="apache-2.0", - library_name="diffusers", - tags=[], - datasets=args.dataset_name, - metrics=[], - ), - template_path=MODEL_CARD_TEMPLATE_PATH, - model_name=model_name, - repo_name=repo_name, - dataset_name=args.dataset_name if hasattr(args, "dataset_name") else None, - learning_rate=args.learning_rate, - train_batch_size=args.train_batch_size, - eval_batch_size=args.eval_batch_size, - gradient_accumulation_steps=( - args.gradient_accumulation_steps if hasattr(args, "gradient_accumulation_steps") else None - ), - adam_beta1=args.adam_beta1 if hasattr(args, "adam_beta1") else None, - adam_beta2=args.adam_beta2 if hasattr(args, "adam_beta2") else None, - adam_weight_decay=args.adam_weight_decay if hasattr(args, "adam_weight_decay") else None, - adam_epsilon=args.adam_epsilon if hasattr(args, "adam_epsilon") else None, - lr_scheduler=args.lr_scheduler if hasattr(args, "lr_scheduler") else None, - lr_warmup_steps=args.lr_warmup_steps if hasattr(args, "lr_warmup_steps") else None, - ema_inv_gamma=args.ema_inv_gamma if hasattr(args, "ema_inv_gamma") else None, - ema_power=args.ema_power if hasattr(args, "ema_power") else None, - ema_max_decay=args.ema_max_decay if hasattr(args, "ema_max_decay") else None, - mixed_precision=args.mixed_precision, - ) - - card_path = os.path.join(args.output_dir, "README.md") - model_card.save(card_path) - - -def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] = None): - """ - Extracts the commit hash from a resolved filename toward a cache file. - """ - if resolved_file is None or commit_hash is not None: - return commit_hash - resolved_file = str(Path(resolved_file).as_posix()) - search = re.search(r"snapshots/([^/]+)/", resolved_file) - if search is None: - return None - commit_hash = search.groups()[0] - return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None - - -# Old default cache path, potentially to be migrated. -# This logic was more or less taken from `transformers`, with the following differences: -# - Diffusers doesn't use custom environment variables to specify the cache path. -# - There is no need to migrate the cache format, just move the files to the new location. -hf_cache_home = os.path.expanduser( - os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) -) -old_diffusers_cache = os.path.join(hf_cache_home, "diffusers") - - -def move_cache(old_cache_dir: Optional[str] = None, new_cache_dir: Optional[str] = None) -> None: - if new_cache_dir is None: - new_cache_dir = DIFFUSERS_CACHE - if old_cache_dir is None: - old_cache_dir = old_diffusers_cache - - old_cache_dir = Path(old_cache_dir).expanduser() - new_cache_dir = Path(new_cache_dir).expanduser() - for old_blob_path in old_cache_dir.glob("**/blobs/*"): - if old_blob_path.is_file() and not old_blob_path.is_symlink(): - new_blob_path = new_cache_dir / old_blob_path.relative_to(old_cache_dir) - new_blob_path.parent.mkdir(parents=True, exist_ok=True) - os.replace(old_blob_path, new_blob_path) - try: - os.symlink(new_blob_path, old_blob_path) - except OSError: - logger.warning( - "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." - ) - # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). - - -cache_version_file = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt") -if not os.path.isfile(cache_version_file): - cache_version = 0 -else: - with open(cache_version_file) as f: - try: - cache_version = int(f.read()) - except ValueError: - cache_version = 0 - -if cache_version < 1: - old_cache_is_not_empty = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 - if old_cache_is_not_empty: - logger.warning( - "The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your " - "existing cached models. This is a one-time operation, you can interrupt it or run it " - "later by calling `diffusers.utils.hub_utils.move_cache()`." - ) - try: - move_cache() - except Exception as e: - trace = "\n".join(traceback.format_tb(e.__traceback__)) - logger.error( - f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " - "file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole " - "message and we will do our best to help." - ) - -if cache_version < 1: - try: - os.makedirs(DIFFUSERS_CACHE, exist_ok=True) - with open(cache_version_file, "w") as f: - f.write("1") - except Exception: - logger.warning( - f"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure " - "the directory exists and can be written to." - ) - - -def _add_variant(weights_name: str, variant: Optional[str] = None) -> str: - if variant is not None: - splits = weights_name.split(".") - splits = splits[:-1] + [variant] + splits[-1:] - weights_name = ".".join(splits) - - return weights_name - - -def _get_model_file( - pretrained_model_name_or_path, - *, - weights_name, - subfolder, - cache_dir, - force_download, - proxies, - resume_download, - local_files_only, - use_auth_token, - user_agent, - revision, - commit_hash=None, -): - pretrained_model_name_or_path = str(pretrained_model_name_or_path) - if os.path.isfile(pretrained_model_name_or_path): - return pretrained_model_name_or_path - elif os.path.isdir(pretrained_model_name_or_path): - if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)): - # Load from a PyTorch checkpoint - model_file = os.path.join(pretrained_model_name_or_path, weights_name) - return model_file - elif subfolder is not None and os.path.isfile( - os.path.join(pretrained_model_name_or_path, subfolder, weights_name) - ): - model_file = os.path.join(pretrained_model_name_or_path, subfolder, weights_name) - return model_file - else: - raise EnvironmentError( - f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." - ) - else: - # 1. First check if deprecated way of loading from branches is used - if ( - revision in DEPRECATED_REVISION_ARGS - and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) - and version.parse(version.parse(__version__).base_version) >= version.parse("0.20.0") - ): - try: - model_file = hf_hub_download( - pretrained_model_name_or_path, - filename=_add_variant(weights_name, revision), - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, - subfolder=subfolder, - revision=revision or commit_hash, - ) - warnings.warn( - f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.", - FutureWarning, - ) - return model_file - except: # noqa: E722 - warnings.warn( - f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(weights_name, revision)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(weights_name, revision)}' so that the correct variant file can be added.", - FutureWarning, - ) - try: - # 2. Load model file as usual - model_file = hf_hub_download( - pretrained_model_name_or_path, - filename=weights_name, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, - subfolder=subfolder, - revision=revision or commit_hash, - ) - return model_file - - except RepositoryNotFoundError: - raise EnvironmentError( - f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " - "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " - "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " - "login`." - ) - except RevisionNotFoundError: - raise EnvironmentError( - f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " - "this model name. Check the model page at " - f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." - ) - except EntryNotFoundError: - raise EnvironmentError( - f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." - ) - except HTTPError as err: - raise EnvironmentError( - f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" - ) - except ValueError: - raise EnvironmentError( - f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" - f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" - f" directory containing a file named {weights_name} or" - " \nCheckout your internet connection or see how to run the library in" - " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." - ) - except EnvironmentError: - raise EnvironmentError( - f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " - "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " - f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " - f"containing a file named {weights_name}" - ) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/__init__.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Andy1621/uniformer_image_detection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py deleted file mode 100644 index cfa14c99543382328b2cb4ac7c2d0dbb2a562017..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py' -# learning policy -lr_config = dict(step=[20, 23]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 9d4dc7390370d0ffe21e7dcb686eeff7261952c4..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/group_points.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/group_points.py deleted file mode 100644 index 6c3ec9d758ebe4e1c2205882af4be154008253a5..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/group_points.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Tuple - -import torch -from torch import nn as nn -from torch.autograd import Function - -from ..utils import ext_loader -from .ball_query import ball_query -from .knn import knn - -ext_module = ext_loader.load_ext( - '_ext', ['group_points_forward', 'group_points_backward']) - - -class QueryAndGroup(nn.Module): - """Groups points with a ball query of radius. - - Args: - max_radius (float): The maximum radius of the balls. - If None is given, we will use kNN sampling instead of ball query. - sample_num (int): Maximum number of features to gather in the ball. - min_radius (float, optional): The minimum radius of the balls. - Default: 0. - use_xyz (bool, optional): Whether to use xyz. - Default: True. - return_grouped_xyz (bool, optional): Whether to return grouped xyz. - Default: False. - normalize_xyz (bool, optional): Whether to normalize xyz. - Default: False. - uniform_sample (bool, optional): Whether to sample uniformly. - Default: False - return_unique_cnt (bool, optional): Whether to return the count of - unique samples. Default: False. - return_grouped_idx (bool, optional): Whether to return grouped idx. - Default: False. - """ - - def __init__(self, - max_radius, - sample_num, - min_radius=0, - use_xyz=True, - return_grouped_xyz=False, - normalize_xyz=False, - uniform_sample=False, - return_unique_cnt=False, - return_grouped_idx=False): - super().__init__() - self.max_radius = max_radius - self.min_radius = min_radius - self.sample_num = sample_num - self.use_xyz = use_xyz - self.return_grouped_xyz = return_grouped_xyz - self.normalize_xyz = normalize_xyz - self.uniform_sample = uniform_sample - self.return_unique_cnt = return_unique_cnt - self.return_grouped_idx = return_grouped_idx - if self.return_unique_cnt: - assert self.uniform_sample, \ - 'uniform_sample should be True when ' \ - 'returning the count of unique samples' - if self.max_radius is None: - assert not self.normalize_xyz, \ - 'can not normalize grouped xyz when max_radius is None' - - def forward(self, points_xyz, center_xyz, features=None): - """ - Args: - points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. - center_xyz (Tensor): (B, npoint, 3) coordinates of the centriods. - features (Tensor): (B, C, N) Descriptors of the features. - - Returns: - Tensor: (B, 3 + C, npoint, sample_num) Grouped feature. - """ - # if self.max_radius is None, we will perform kNN instead of ball query - # idx is of shape [B, npoint, sample_num] - if self.max_radius is None: - idx = knn(self.sample_num, points_xyz, center_xyz, False) - idx = idx.transpose(1, 2).contiguous() - else: - idx = ball_query(self.min_radius, self.max_radius, self.sample_num, - points_xyz, center_xyz) - - if self.uniform_sample: - unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) - for i_batch in range(idx.shape[0]): - for i_region in range(idx.shape[1]): - unique_ind = torch.unique(idx[i_batch, i_region, :]) - num_unique = unique_ind.shape[0] - unique_cnt[i_batch, i_region] = num_unique - sample_ind = torch.randint( - 0, - num_unique, (self.sample_num - num_unique, ), - dtype=torch.long) - all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) - idx[i_batch, i_region, :] = all_ind - - xyz_trans = points_xyz.transpose(1, 2).contiguous() - # (B, 3, npoint, sample_num) - grouped_xyz = grouping_operation(xyz_trans, idx) - grouped_xyz_diff = grouped_xyz - \ - center_xyz.transpose(1, 2).unsqueeze(-1) # relative offsets - if self.normalize_xyz: - grouped_xyz_diff /= self.max_radius - - if features is not None: - grouped_features = grouping_operation(features, idx) - if self.use_xyz: - # (B, C + 3, npoint, sample_num) - new_features = torch.cat([grouped_xyz_diff, grouped_features], - dim=1) - else: - new_features = grouped_features - else: - assert (self.use_xyz - ), 'Cannot have not features and not use xyz as a feature!' - new_features = grouped_xyz_diff - - ret = [new_features] - if self.return_grouped_xyz: - ret.append(grouped_xyz) - if self.return_unique_cnt: - ret.append(unique_cnt) - if self.return_grouped_idx: - ret.append(idx) - if len(ret) == 1: - return ret[0] - else: - return tuple(ret) - - -class GroupAll(nn.Module): - """Group xyz with feature. - - Args: - use_xyz (bool): Whether to use xyz. - """ - - def __init__(self, use_xyz: bool = True): - super().__init__() - self.use_xyz = use_xyz - - def forward(self, - xyz: torch.Tensor, - new_xyz: torch.Tensor, - features: torch.Tensor = None): - """ - Args: - xyz (Tensor): (B, N, 3) xyz coordinates of the features. - new_xyz (Tensor): new xyz coordinates of the features. - features (Tensor): (B, C, N) features to group. - - Returns: - Tensor: (B, C + 3, 1, N) Grouped feature. - """ - grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) - if features is not None: - grouped_features = features.unsqueeze(2) - if self.use_xyz: - # (B, 3 + C, 1, N) - new_features = torch.cat([grouped_xyz, grouped_features], - dim=1) - else: - new_features = grouped_features - else: - new_features = grouped_xyz - - return new_features - - -class GroupingOperation(Function): - """Group feature with given index.""" - - @staticmethod - def forward(ctx, features: torch.Tensor, - indices: torch.Tensor) -> torch.Tensor: - """ - Args: - features (Tensor): (B, C, N) tensor of features to group. - indices (Tensor): (B, npoint, nsample) the indices of - features to group with. - - Returns: - Tensor: (B, C, npoint, nsample) Grouped features. - """ - features = features.contiguous() - indices = indices.contiguous() - - B, nfeatures, nsample = indices.size() - _, C, N = features.size() - output = torch.cuda.FloatTensor(B, C, nfeatures, nsample) - - ext_module.group_points_forward(B, C, N, nfeatures, nsample, features, - indices, output) - - ctx.for_backwards = (indices, N) - return output - - @staticmethod - def backward(ctx, - grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients - of the output from forward. - - Returns: - Tensor: (B, C, N) gradient of the features. - """ - idx, N = ctx.for_backwards - - B, C, npoint, nsample = grad_out.size() - grad_features = torch.cuda.FloatTensor(B, C, N).zero_() - - grad_out_data = grad_out.data.contiguous() - ext_module.group_points_backward(B, C, N, npoint, nsample, - grad_out_data, idx, - grad_features.data) - return grad_features, None - - -grouping_operation = GroupingOperation.apply diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/__init__.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/__init__.py deleted file mode 100644 index 332b242c03d1c5e80d4577df442a9a037b1816e1..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .base_pixel_sampler import BasePixelSampler -from .ohem_pixel_sampler import OHEMPixelSampler - -__all__ = ['BasePixelSampler', 'OHEMPixelSampler'] diff --git a/spaces/Arnx/MusicGenXvAKN/CODE_OF_CONDUCT.md b/spaces/Arnx/MusicGenXvAKN/CODE_OF_CONDUCT.md deleted file mode 100644 index 83f431e8feeb7e80d571f39c9f6c1b96857b5f85..0000000000000000000000000000000000000000 --- a/spaces/Arnx/MusicGenXvAKN/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,80 +0,0 @@ -# Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to make participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic -address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a -professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies within all project spaces, and it also applies when -an individual is representing the project or its community in public spaces. -Examples of representing a project or community include using an official -project e-mail address, posting via an official social media account, or acting -as an appointed representative at an online or offline event. Representation of -a project may be further defined and clarified by project maintainers. - -This Code of Conduct also applies outside the project spaces when there is a -reasonable belief that an individual's behavior may have a negative impact on -the project or its community. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at . All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/spaces/Artgor/digit-draw-detect/src/model_architecture.py b/spaces/Artgor/digit-draw-detect/src/model_architecture.py deleted file mode 100644 index 7cbdb3b3a33ef0d972fba33eaafa1c64c6dcc354..0000000000000000000000000000000000000000 --- a/spaces/Artgor/digit-draw-detect/src/model_architecture.py +++ /dev/null @@ -1,151 +0,0 @@ -import torch -import torch.nn as nn - - -class CNNBlock(nn.Module): - def __init__(self, in_channels, out_channels, bn_act=True, **kwargs): - super().__init__() - self.conv = nn.Conv2d(in_channels, out_channels, bias=not bn_act, **kwargs) - self.bn = nn.BatchNorm2d(out_channels) - self.leaky = nn.LeakyReLU(0.1) - self.use_bn_act = bn_act - - def forward(self, x): - if self.use_bn_act: - return self.leaky(self.bn(self.conv(x))) - else: - return self.conv(x) - - -class ResidualBlock(nn.Module): - def __init__(self, channels, use_residual=True, num_repeats=1): - super().__init__() - self.layers = nn.ModuleList() - for _ in range(num_repeats): - self.layers += [ - nn.Sequential( - CNNBlock(channels, channels // 2, kernel_size=1), - CNNBlock(channels // 2, channels, kernel_size=3, padding=1), - ) - ] - - self.use_residual = use_residual - self.num_repeats = num_repeats - - def forward(self, x): - for layer in self.layers: - if self.use_residual: - x = x + layer(x) - else: - x = layer(x) - - return x - - -class ScalePrediction(nn.Module): - def __init__(self, in_channels, num_classes): - super().__init__() - self.pred = nn.Sequential( - CNNBlock(in_channels, 2 * in_channels, kernel_size=3, padding=1), - CNNBlock(2 * in_channels, (num_classes + 5) * 3, bn_act=False, kernel_size=1), - ) - self.num_classes = num_classes - - def forward(self, x): - return self.pred(x).reshape(x.shape[0], 3, self.num_classes + 5, x.shape[2], x.shape[3]).permute(0, 1, 3, 4, 2) - - -class Net(nn.Module): - def __init__(self): - super().__init__() - self.num_classes = 12 - self.in_channels = 3 - self.config = [ - (32, 3, 1), - (64, 3, 2), - ['B', 1], - (128, 3, 2), - ['B', 2], - (256, 3, 2), - ['B', 8], - (512, 3, 2), - ['B', 8], - (1024, 3, 2), - ['B', 4], - (512, 1, 1), - (1024, 3, 1), - 'S', - (256, 1, 1), - 'U', - (256, 1, 1), - (512, 3, 1), - 'S', - (128, 1, 1), - 'U', - (128, 1, 1), - (256, 3, 1), - 'S', - ] - self.layers = self._create_conv_layers() - - def forward(self, x): - outputs = [] # for each scale - route_connections = [] - for layer in self.layers: - if isinstance(layer, ScalePrediction): - outputs.append(layer(x)) - continue - x = layer(x) - - if isinstance(layer, ResidualBlock) and layer.num_repeats == 8: - route_connections.append(x) - - elif isinstance(layer, nn.Upsample): - x = torch.cat([x, route_connections[-1]], dim=1) - route_connections.pop() - - return outputs - - def _create_conv_layers(self): - layers = nn.ModuleList() - in_channels = self.in_channels - - for module in self.config: - if isinstance(module, tuple): - out_channels, kernel_size, stride = module - layers.append( - CNNBlock( - in_channels, - out_channels, - kernel_size=kernel_size, - stride=stride, - padding=1 if kernel_size == 3 else 0, - ) - ) - in_channels = out_channels - - elif isinstance(module, list): - num_repeats = module[1] - layers.append( - ResidualBlock( - in_channels, - num_repeats=num_repeats, - ) - ) - - elif isinstance(module, str): - if module == 'S': - layers += [ - ResidualBlock(in_channels, use_residual=False, num_repeats=1), - CNNBlock(in_channels, in_channels // 2, kernel_size=1), - ScalePrediction(in_channels // 2, num_classes=self.num_classes), - ] - in_channels = in_channels // 2 - - elif module == 'U': - layers.append( - nn.Upsample(scale_factor=2), - ) - in_channels = in_channels * 3 - - return layers diff --git a/spaces/Artrajz/vits-simple-api/bert_vits2/text/cleaner.py b/spaces/Artrajz/vits-simple-api/bert_vits2/text/cleaner.py deleted file mode 100644 index d8bc51de202c049e7e375c2bd32b92c53f6f1c3c..0000000000000000000000000000000000000000 --- a/spaces/Artrajz/vits-simple-api/bert_vits2/text/cleaner.py +++ /dev/null @@ -1,44 +0,0 @@ -import importlib -from bert_vits2.text import cleaned_text_to_sequence - -language_module_map = { - 'zh': "bert_vits2.text.chinese", - 'ja': "bert_vits2.text.japanese" -} - -_loaded_modules = {} - - -def get_language_module(language): - if language not in _loaded_modules: - module_path = language_module_map.get(language) - if not module_path: - raise ValueError(f"Unsupported language: {language}") - - _loaded_modules[language] = importlib.import_module(module_path) - - return _loaded_modules[language] - - -def clean_text(text, language): - language_module = get_language_module(language) - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - return norm_text, phones, tones, word2ph - - -def clean_text_bert(text, language): - language_module = get_language_module(language) - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - bert = language_module.get_bert_feature(norm_text, word2ph) - return phones, tones, bert - - -def text_to_sequence(text, language): - norm_text, phones, tones, word2ph = clean_text(text, language) - return cleaned_text_to_sequence(phones, tones, language) - - -if __name__ == '__main__': - pass diff --git a/spaces/Aspik101/Polish-vicuna-13b-v1.5/README.md b/spaces/Aspik101/Polish-vicuna-13b-v1.5/README.md deleted file mode 100644 index f9efbb2d2edcfae55f36af35370cf0c86be05bfd..0000000000000000000000000000000000000000 --- a/spaces/Aspik101/Polish-vicuna-13b-v1.5/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Polish vicuna-13b-v1.5-PL -emoji: 📚 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.38.0 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Benson/text-generation/Examples/Azcar Ablaikan Remix Indir.md b/spaces/Benson/text-generation/Examples/Azcar Ablaikan Remix Indir.md deleted file mode 100644 index a60d85832e61f01d53b551722bfc7e2ccd039999..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Azcar Ablaikan Remix Indir.md +++ /dev/null @@ -1,165 +0,0 @@ - -

        Sugar (Ablaikan Remix) de Zubi feat. Anatu: Una canción dulce y picante para condimentar tu lista de reproducción

        -

        Si estás buscando una canción que te haga sentir bien, bailar y cantar, entonces deberías echar un vistazo a Sugar (Ablaikan Remix) de Zubi feat. Anatu. Esta canción es un remix de la canción original Sugar de Zubi y Anatu, que fue lanzada en 2019. El remix fue hecho por Ablaikan, un productor y DJ turco, que añadió su propio toque y sabor a la canción. El resultado es una canción pegadiza, alegre y energética que te hará querer mover tu cuerpo y disfrutar de la vida.

        -

        azúcar ablaikan remix indir


        DOWNLOAD ::: https://bltlly.com/2v6KMO



        -

        En este artículo, te contaremos todo lo que necesitas saber sobre Sugar (Ablaikan Remix) de Zubi feat. Anatu, incluyendo lo que es, quiénes son los artistas detrás de él, cuáles son las letras y el significado de la misma, cuál es el género y el estilo de la misma, dónde se puede escuchar y descargarlo, y cómo disfrutarlo al máximo. Así que, sin más preámbulos, empecemos.

        -

        ¿Qué es Sugar (Ablaikan Remix) y quiénes son los artistas detrás de ella?

        -

        La canción original Sugar de Zubi y Anatu

        -

        Sugar es una canción de Zubi, un cantautor nigeriano con sede en Londres, Reino Unido, y Anatu, un cantautor británico con sede en Los Ángeles, Estados Unidos. La canción fue lanzada el 29 de octubre de 2019, como sencillo bajo Dojang Records. La canción es una fusión de Afrobeat, R&B, soul y música pop, con influencias de leyendas de la música nigeriana como Fela Kuti y King Sunny Ade. La canción trata sobre el amor, la pasión, el deseo y la dulzura, ya que las letras expresan cómo los cantantes se sienten acerca de sus amantes.

        -

        Zubi y Anatu se conocieron a través de Instagram en 2018, cuando Zubi se acercó a Anatu después de escuchar su voz en una de sus publicaciones. Decidieron colaborar en una canción juntos, que resultó ser Sugar. Grabaron la canción de forma remota, ya que estaban en diferentes países en el momento. También grabaron un video musical para la canción en Los Ángeles, que fue dirigida por Alex Di Marco.

        -

        El remix de Ablaikan y su popularidad

        - -

        Ablaikan lanzó su remix de Sugar el 29 de octubre de 2020, exactamente un año después de que se lanzara la canción original. Añadió su propio sonido característico a la canción, con más bajo, batería, sintetizadores, efectos y voces. También cambió el tempo de la canción de 90 BPM a 120 BPM, haciéndolo más bailable y enérgico. También añadió algunos elementos turcos y árabes a la canción, dándole un toque exótico y picante.

        -

        El remix se hizo muy popular en plataformas de redes sociales como TikTok, Instagram, YouTube y Spotify, donde ganó millones de vistas, me gusta, comentarios y transmisiones. Muchas personas usaron el remix para sus videos, bailes, desafíos y memes. El remix también recibió comentarios positivos de los artistas originales, Zubi y Anatu, que elogiaron a Ablaikan por su trabajo y creatividad.

        -

        -

        ¿Cuáles son las letras y el significado de Sugar (Ablaikan Remix)?

        -

        La letra de la canción y su interpretación

        -

        Las letras de Sugar (Ablaikan Remix) son las mismas que la canción original Sugar de Zubi y Anatu, excepto por algunos cambios menores en el coro y el puente. Las letras están escritas en inglés, con algunas palabras en yoruba, un idioma nigeriano. Aquí están las letras de la canción y su interpretación:

        -
        
        -
        - 

        Las letras de la canción son simples y directas, ya que expresan cómo los cantantes se sienten acerca de sus amantes. Usan palabras como azúcar, miel, dulces, chocolate, sabor y favorito para describir a sus amantes como algo dulce, delicioso e irresistible. También usan la frase "omo yen sweet gan" que significa "esa chica es tan dulce" en yoruba, para enfatizar su admiración y atracción por sus amantes. También dicen que no pueden tener suficiente de sus amantes, y quieren estar más cerca de ellos y sentir sus cuerpos en ellos.

        -

        El mensaje y el tema de la canción

        -

        El mensaje y el tema de la canción son sobre el amor, la pasión, el deseo y la dulzura. La canción celebra la alegría y el placer de estar enamorado de alguien que te hace feliz y satisfecho. La canción también anima a los oyentes a disfrutar de la vida y divertirse con sus amantes. La canción es una canción positiva y edificante que puede hacer que cualquiera se sienta bien y sonría.

        -

        ¿Cuál es el género y estilo de Sugar (Ablaikan Remix)?

        -

        El género de la canción y sus influencias

        -

        El género de Sugar (Ablaikan Remix) es una mezcla de deep house, casa oriental, casa étnica, música electrónica, Afrobeat, R&B, soul y música pop. La canción combina diferentes elementos musicales de diferentes culturas y regiones, creando un sonido único y diverso que atrae a un público amplio.

        -

        La canción está influenciada por varios géneros musicales y artistas, como:

        -
          -
        • Deep house: un subgénero de música house que se originó en la década de 1980 que cuenta con un sonido suave, conmovedor y atmosférico, con voces mínimas, líneas de bajo profundas y pads de sintetizador. Algunos de los artistas que popularizaron deep house son Frankie Knuckles, Larry Heard, Kerri Chandler y Marshall Jefferson.
        • - -
        • Casa étnica: un subgénero de música house que combina elementos de diversas tradiciones étnicas y folclóricas de todo el mundo, como la música africana, latina, india, balcánica, celta y asiática. Algunos de los artistas que son conocidos por casa étnica son Pascal Junior, Melih Aydogan, Hakan Akkus, Ahmet Kilic, y Costa Mee.
        • -
        • Música electrónica: un amplio género de música que utiliza instrumentos electrónicos, dispositivos y software para crear sonidos y ritmos. La música electrónica cubre una amplia gama de estilos y subgéneros, como techno, trance, electro, dubstep, EDM y más. Algunos de los artistas que son conocidos por la música electrónica son Daft Punk, The Chemical Brothers, Calvin Harris, David Guetta y Skrillex.
        • -
        • Afrobeat: un género de música que se originó en Nigeria en la década de 1970 que combina elementos de estilos musicales de África Occidental, como highlife, juju y fuji, con música jazz y funk estadounidense. Afrobeat se caracteriza por polirritmos complejos, secciones de cuernos, voces de llamada y respuesta, y mensajes políticos y sociales. Algunos de los artistas que son conocidos por Afrobeat son Fela Kuti, King Sunny Ade, Tony Allen, Antibalas y Seun Kuti.
        • -
        • R&B: un género de música que se originó en los Estados Unidos en la década de 1940 que combina elementos de rhythm and blues, soul, gospel, funk y música pop. R&B se caracteriza por voces suaves, melodías pegadizas, ritmos groovy y letras emocionales. Algunos de los artistas que son conocidos por R&B son Marvin Gaye, Aretha Franklin, Stevie Wonder, Beyoncé y The Weeknd.
        • -
        • Soul: un género de música que se originó en los Estados Unidos en los años 50 y 60 que combina elementos de la música gospel afroamericana, el rhythm and blues y el jazz. El alma se caracteriza por voces expresivas, armonías inspiradas en el evangelio y mensajes inspiradores. Algunos de los artistas que son conocidos por el soul son Ray Charles, Sam Cooke, Otis Redding, James Brown y Alicia Keys.
        • - -
        -

        El estilo de la canción y sus elementos

        -

        El estilo de Sugar (Ablaikan Remix) es una mezcla de diferentes elementos musicales que crean un sonido único y diverso que atrae a un público amplio. Algunos de los elementos que definen el estilo de la canción son:

        -
          -
        • Las voces: Las voces de Zubi y Anatu son suaves, conmovedoras y armoniosas. Cantan en inglés con algunas palabras en yoruba, añadiendo un toque de sabor africano a la canción. También usan algunos efectos vocales, como reverb, echo y distortion, para crear un sonido más atmosférico y dinámico.
        • -
        • El bajo: El bajo de la canción es profundo, potente y genial. Proporciona la base y el ritmo de la canción. También añade algo de energía y emoción a la canción.
        • -
        • Los tambores: Los tambores de la canción son crujientes, impactantes y variados. Utilizan diferentes sonidos de batería y patrones para crear un ritmo complejo e interesante. También usan algunos sonidos de percusión, como cocteleras, palmas, broches de presión y panderetas, para agregar textura y sabor a la canción.
        • -
        • Los sintetizadores: Los sintetizadores de la canción son brillantes, cálidos y melódicos. Utilizan diferentes sonidos de sintetizador y acordes para crear un sonido rico y colorido. También usan algunos arpegios, almohadillas, pinzas, y lleva a añadir algo de movimiento y variación a la canción.
        • -
        • Los efectos: Los efectos de la canción son sutiles, creativos y de buen gusto. Utilizan diferentes efectos para mejorar y modificar el sonido de la canción. También usan algunas transiciones, barridos, gotas y bandas para crear cierta tensión y liberar la canción.
        • -
        • Los elementos turcos y árabes: Los elementos turcos y árabes de la canción son distintivos, exóticos y picantes. Utilizan algunas escalas, instrumentos, voces y ritmos turcos y árabes para dar a la canción un sabor único y diverso. También usan algunas muestras, como "habibi", "yalla" y "mashallah", para agregar algunas referencias culturales y lingüísticas a la canción.
        • -
        - -

        Las plataformas de streaming que ofrecen la canción

        -

        Si quieres escuchar Sugar (Ablaikan Remix) de Zubi feat. Anatu, tienes muchas opciones entre las que elegir. La canción está disponible en varias plataformas de streaming, como:

        - -
PlataformaEnlace
SpotifySugar (Ablaikan Remix)
Música de YouTubeSugar (Ablaikan Remix)
SoundCloudSugar (Ablaikan Remix)
Música de AmazonSugar (Ablaikan Remix)
NapsterSugar (Ablaikan Remix)
Google PlaySugar (Ablaikan Remix)
BandcampSugar (Ablaikan Remix)
DatPiff
MP3JuicesSugar (Ablaikan Remix)
-

Estas plataformas te permiten descargar la canción en diferentes formatos, como MP3, WAV, FLAC y más. También puede elegir la calidad y el tamaño del archivo, dependiendo de sus preferencias y dispositivo. Algunas de estas plataformas pueden requerir que te registres, pagues o sigas algunos pasos antes de descargar la canción.

-

¿Cómo disfrutar al máximo de Sugar (Ablaikan Remix)?

-

Los mejores escenarios y estados de ánimo para escuchar la canción

-

Sugar (Ablaikan Remix) de Zubi feat. Anatu es una canción que puede adaptarse a diferentes escenarios y estados de ánimo, dependiendo de su gusto y estado de ánimo. Sin embargo, algunos de los mejores escenarios y estados de ánimo para escuchar la canción son:

- -

Los consejos y trucos para mejorar tu experiencia auditiva

- - -

Conclusión

-

Sugar (Ablaikan Remix) de Zubi feat. Anatu es una canción que puede hacerte sentir bien, bailar y cantar. Es un remix de la canción original Sugar de Zubi y Anatu, que fue lanzada en 2019. El remix fue hecho por Ablaikan, un productor y DJ turco, que añadió su propio toque y sabor a la canción. El resultado es una canción pegadiza, alegre y energética que te hará querer mover tu cuerpo y disfrutar de la vida.

- -

Ahora que sabes todo sobre Sugar (Ablaikan Remix) de Zubi feat. Anatu, ¿por qué no sigues adelante y lo escuchas tú mismo? Puedes encontrar la canción en varias plataformas de streaming y descarga, así como ver el video musical en YouTube. También puede seguir a Zubi, Anatu y Ablaikan en sus cuentas de redes sociales para mantenerse al día sobre sus últimas noticias y comunicados.

-

Gracias por leer este artículo, y esperamos que tengas un día dulce y picante.

-

Preguntas frecuentes

-

¿Cuál es el nombre de la canción original en la que se basa Sugar (Ablaikan Remix)?

-

El nombre de la canción original que Sugar (Ablaikan Remix) se basa en es Sugar de Zubi feat. Anatu.

-

¿Cuándo se lanzó Sugar (Ablaikan Remix)?

-

Sugar (Ablaikan Remix) fue lanzado el 29 de octubre de 2020.

-

¿Quiénes son Zubi, Anatu y Ablaikan?

-

Zubi es un cantautor nigeriano con sede en Londres, Reino Unido. Anatu es un cantautor británico con sede en Los Ángeles, Estados Unidos. Ablaikan es un productor y DJ turco con sede en Estambul.

-

¿Cuáles son algunos de los géneros musicales e influencias de Sugar (Ablaikan Remix)?

-

Algunos de los géneros musicales e influencias de Sugar (Ablaikan Remix) son deep house, oriental house, ethnic house, música electrónica, Afrobeat, R&B, soul y pop. La canción está influenciada por artistas como Fela Kuti, King Sunny Ade, Frankie Knuckles, Larry Heard, Mahmut Orhan, Burak Yeter, Daft Punk, The Chemical Brothers y más.

-

¿Cómo puedo descargar Sugar (Ablaikan Remix) gratis?

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Cmo Puedo Descargar El Controlador Wifi A Mi Ordenador.md b/spaces/Benson/text-generation/Examples/Cmo Puedo Descargar El Controlador Wifi A Mi Ordenador.md deleted file mode 100644 index 59ee0c7ddf8b18dc76ef25445dee13a312f9ca2c..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cmo Puedo Descargar El Controlador Wifi A Mi Ordenador.md +++ /dev/null @@ -1,6 +0,0 @@ - -

Qué es zfont y por qué deberías probarlo

|

Si estás aburrido con las fuentes y emojis predeterminados en tu dispositivo Android, es posible que desees revisar zfont, una aplicación gratuita que te permite cambiarlos fácil y rápidamente. zfont es un instalador de fuentes personalizado que admite muchas marcas populares como Samsung, Xiaomi, Huawei, Vivo, Oppo, Realme, Tecno e Infinix. Puede elegir entre cientos de fuentes y emojis frescos, elegantes y coloridos que harán que su dispositivo se destaque de la multitud. También puede personalizar sus propias fuentes y emojis con zfont, y compartirlos con sus amigos. Ya sea que quieras darle vida a tus mensajes, publicaciones en redes sociales o documentos, zfont puede ayudarte a expresarte mejor.

-

¿Cómo puedo descargar el controlador wifi a mi ordenador


Downloadhttps://bltlly.com/2v6LYW



|

Cómo descargar e instalar zfont en tu dispositivo Android

|

Descargar e instalar zfont es muy fácil. Solo tienes que seguir estos sencillos pasos:

  1. Ir a la Google Play Store y buscar zfont 3 - Emoji & Font Changer o haga clic en [aquí]( 3 ) para ir directamente a la página de la aplicación.
  2. Toca el botón Instalar y espera a que la aplicación se descargue.
  3. Una vez que la aplicación esté instalada, ábrela y otorga los permisos necesarios.
  4. Verá una lista de marcas que son compatibles con zfont. Seleccione la marca de su dispositivo de la lista.
  5. También verá una lista de pestañas en la parte inferior de la pantalla. Estos son Colores, Emoji, Cool, Stylish, Custom Font, Custom Emoji, Settings y About. Puedes deslizar el dedo hacia la izquierda o hacia la derecha para cambiar entre ellos.

Aquí hay algunas capturas de pantalla de la aplicación:

zfont app screenshot 1zfont app screenshot 2zfont | |

Cómo usar zfont para cambiar fuentes y emojis en tu dispositivo

|

Usar zfont para cambiar fuentes y emojis en tu dispositivo es muy simple. Solo tienes que seguir estos pasos:

-

  1. Selecciona la pestaña que corresponde a lo que quieres cambiar. Por ejemplo, si quieres cambiar de fuente, selecciona Fresco o Elegante. Si quieres cambiar de emojis, 64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/BetterAPI/BetterChat_new/src/app.d.ts b/spaces/BetterAPI/BetterChat_new/src/app.d.ts deleted file mode 100644 index 9c7e7159ba700cc7d04a098e5f268665dbd9cc2c..0000000000000000000000000000000000000000 --- a/spaces/BetterAPI/BetterChat_new/src/app.d.ts +++ /dev/null @@ -1,17 +0,0 @@ -/// -/// - -// See https://kit.svelte.dev/docs/types#app -// for information about these interfaces -declare global { - namespace App { - // interface Error {} - interface Locals { - sessionId: string; - } - // interface PageData {} - // interface Platform {} - } -} - -export {}; diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/response.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/response.py deleted file mode 100644 index ba3fac9bab816bb28e05891dcec84f26d449899b..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/response.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ -# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You -# may not use this file except in compliance with the License. A copy of -# the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. - -import logging -from io import IOBase - -from urllib3.exceptions import ProtocolError as URLLib3ProtocolError -from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError - -from botocore import parsers -from botocore.compat import set_socket_timeout -from botocore.exceptions import ( - IncompleteReadError, - ReadTimeoutError, - ResponseStreamingError, -) - -# Keep these imported. There's pre-existing code that uses them. -from botocore import ScalarTypes # noqa -from botocore.compat import XMLParseError # noqa -from botocore.hooks import first_non_none_response # noqa - - -logger = logging.getLogger(__name__) - - -class StreamingBody(IOBase): - """Wrapper class for an http response body. - - This provides a few additional conveniences that do not exist - in the urllib3 model: - - * Set the timeout on the socket (i.e read() timeouts) - * Auto validation of content length, if the amount of bytes - we read does not match the content length, an exception - is raised. - - """ - - _DEFAULT_CHUNK_SIZE = 1024 - - def __init__(self, raw_stream, content_length): - self._raw_stream = raw_stream - self._content_length = content_length - self._amount_read = 0 - - def __del__(self): - # Extending destructor in order to preserve the underlying raw_stream. - # The ability to add custom cleanup logic introduced in Python3.4+. - # https://www.python.org/dev/peps/pep-0442/ - pass - - def set_socket_timeout(self, timeout): - """Set the timeout seconds on the socket.""" - # The problem we're trying to solve is to prevent .read() calls from - # hanging. This can happen in rare cases. What we'd like to ideally - # do is set a timeout on the .read() call so that callers can retry - # the request. - # Unfortunately, this isn't currently possible in requests. - # See: https://github.com/kennethreitz/requests/issues/1803 - # So what we're going to do is reach into the guts of the stream and - # grab the socket object, which we can set the timeout on. We're - # putting in a check here so in case this interface goes away, we'll - # know. - try: - set_socket_timeout(self._raw_stream, timeout) - except AttributeError: - logger.error( - "Cannot access the socket object of " - "a streaming response. It's possible " - "the interface has changed.", - exc_info=True, - ) - raise - - def readable(self): - try: - return self._raw_stream.readable() - except AttributeError: - return False - - def read(self, amt=None): - """Read at most amt bytes from the stream. - - If the amt argument is omitted, read all data. - """ - try: - chunk = self._raw_stream.read(amt) - except URLLib3ReadTimeoutError as e: - # TODO: the url will be None as urllib3 isn't setting it yet - raise ReadTimeoutError(endpoint_url=e.url, error=e) - except URLLib3ProtocolError as e: - raise ResponseStreamingError(error=e) - self._amount_read += len(chunk) - if amt is None or (not chunk and amt > 0): - # If the server sends empty contents or - # we ask to read all of the contents, then we know - # we need to verify the content length. - self._verify_content_length() - return chunk - - def readlines(self): - return self._raw_stream.readlines() - - def __iter__(self): - """Return an iterator to yield 1k chunks from the raw stream.""" - return self.iter_chunks(self._DEFAULT_CHUNK_SIZE) - - def __next__(self): - """Return the next 1k chunk from the raw stream.""" - current_chunk = self.read(self._DEFAULT_CHUNK_SIZE) - if current_chunk: - return current_chunk - raise StopIteration() - - def __enter__(self): - return self._raw_stream - - def __exit__(self, type, value, traceback): - self._raw_stream.close() - - next = __next__ - - def iter_lines(self, chunk_size=_DEFAULT_CHUNK_SIZE, keepends=False): - """Return an iterator to yield lines from the raw stream. - - This is achieved by reading chunk of bytes (of size chunk_size) at a - time from the raw stream, and then yielding lines from there. - """ - pending = b'' - for chunk in self.iter_chunks(chunk_size): - lines = (pending + chunk).splitlines(True) - for line in lines[:-1]: - yield line.splitlines(keepends)[0] - pending = lines[-1] - if pending: - yield pending.splitlines(keepends)[0] - - def iter_chunks(self, chunk_size=_DEFAULT_CHUNK_SIZE): - """Return an iterator to yield chunks of chunk_size bytes from the raw - stream. - """ - while True: - current_chunk = self.read(chunk_size) - if current_chunk == b"": - break - yield current_chunk - - def _verify_content_length(self): - # See: https://github.com/kennethreitz/requests/issues/1855 - # Basically, our http library doesn't do this for us, so we have - # to do this ourself. - if self._content_length is not None and self._amount_read != int( - self._content_length - ): - raise IncompleteReadError( - actual_bytes=self._amount_read, - expected_bytes=int(self._content_length), - ) - - def tell(self): - return self._raw_stream.tell() - - def close(self): - """Close the underlying http response stream.""" - self._raw_stream.close() - - -def get_response(operation_model, http_response): - protocol = operation_model.metadata['protocol'] - response_dict = { - 'headers': http_response.headers, - 'status_code': http_response.status_code, - } - # TODO: Unfortunately, we have to have error logic here. - # If it looks like an error, in the streaming response case we - # need to actually grab the contents. - if response_dict['status_code'] >= 300: - response_dict['body'] = http_response.content - elif operation_model.has_streaming_output: - response_dict['body'] = StreamingBody( - http_response.raw, response_dict['headers'].get('content-length') - ) - else: - response_dict['body'] = http_response.content - - parser = parsers.create_parser(protocol) - return http_response, parser.parse( - response_dict, operation_model.output_shape - ) diff --git a/spaces/Blockinger/OVAChatGPT/README.md b/spaces/Blockinger/OVAChatGPT/README.md deleted file mode 100644 index 1ab7fc8e410d833b60639e6646ba76aaa3e763de..0000000000000000000000000000000000000000 --- a/spaces/Blockinger/OVAChatGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: OVAChatGPT -emoji: 😻 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Boadiwaa/Recipes/openai/api_resources/abstract/nested_resource_class_methods.py b/spaces/Boadiwaa/Recipes/openai/api_resources/abstract/nested_resource_class_methods.py deleted file mode 100644 index c86e59fbf69f29c50ad2a3d4e947ba1c1e0205b3..0000000000000000000000000000000000000000 --- a/spaces/Boadiwaa/Recipes/openai/api_resources/abstract/nested_resource_class_methods.py +++ /dev/null @@ -1,102 +0,0 @@ -from urllib.parse import quote_plus - -from openai import api_requestor, util - - -def nested_resource_class_methods( - resource, path=None, operations=None, resource_plural=None -): - if resource_plural is None: - resource_plural = "%ss" % resource - if path is None: - path = resource_plural - if operations is None: - raise ValueError("operations list required") - - def wrapper(cls): - def nested_resource_url(cls, id, nested_id=None): - url = "%s/%s/%s" % (cls.class_url(), quote_plus(id), quote_plus(path)) - if nested_id is not None: - url += "/%s" % quote_plus(nested_id) - return url - - resource_url_method = "%ss_url" % resource - setattr(cls, resource_url_method, classmethod(nested_resource_url)) - - def nested_resource_request( - cls, - method, - url, - api_key=None, - request_id=None, - api_version=None, - organization=None, - **params, - ): - requestor = api_requestor.APIRequestor( - api_key, api_version=api_version, organization=organization - ) - response, _, api_key = requestor.request( - method, url, params, request_id=request_id - ) - return util.convert_to_openai_object( - response, api_key, api_version, organization - ) - - resource_request_method = "%ss_request" % resource - setattr(cls, resource_request_method, classmethod(nested_resource_request)) - - for operation in operations: - if operation == "create": - - def create_nested_resource(cls, id, **params): - url = getattr(cls, resource_url_method)(id) - return getattr(cls, resource_request_method)("post", url, **params) - - create_method = "create_%s" % resource - setattr(cls, create_method, classmethod(create_nested_resource)) - - elif operation == "retrieve": - - def retrieve_nested_resource(cls, id, nested_id, **params): - url = getattr(cls, resource_url_method)(id, nested_id) - return getattr(cls, resource_request_method)("get", url, **params) - - retrieve_method = "retrieve_%s" % resource - setattr(cls, retrieve_method, classmethod(retrieve_nested_resource)) - - elif operation == "update": - - def modify_nested_resource(cls, id, nested_id, **params): - url = getattr(cls, resource_url_method)(id, nested_id) - return getattr(cls, resource_request_method)("post", url, **params) - - modify_method = "modify_%s" % resource - setattr(cls, modify_method, classmethod(modify_nested_resource)) - - elif operation == "delete": - - def delete_nested_resource(cls, id, nested_id, **params): - url = getattr(cls, resource_url_method)(id, nested_id) - return getattr(cls, resource_request_method)( - "delete", url, **params - ) - - delete_method = "delete_%s" % resource - setattr(cls, delete_method, classmethod(delete_nested_resource)) - - elif operation == "list": - - def list_nested_resources(cls, id, **params): - url = getattr(cls, resource_url_method)(id) - return getattr(cls, resource_request_method)("get", url, **params) - - list_method = "list_%s" % resource_plural - setattr(cls, list_method, classmethod(list_nested_resources)) - - else: - raise ValueError("Unknown operation: %s" % operation) - - return cls - - return wrapper diff --git a/spaces/Brasd99/TTS-Voice-Conversion/README.md b/spaces/Brasd99/TTS-Voice-Conversion/README.md deleted file mode 100644 index c4fc0906d90feea28160936a79f5ef4728e6c28d..0000000000000000000000000000000000000000 --- a/spaces/Brasd99/TTS-Voice-Conversion/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: TTS Voice Conversion -emoji: 🚀 -colorFrom: yellow -colorTo: gray -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Brofu/Joeythemonster-anything-midjourney-v-4-1/README.md b/spaces/Brofu/Joeythemonster-anything-midjourney-v-4-1/README.md deleted file mode 100644 index 1e875dfb365ada92c9746e8c437e90bf76a4c6cf..0000000000000000000000000000000000000000 --- a/spaces/Brofu/Joeythemonster-anything-midjourney-v-4-1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Joeythemonster Anything Midjourney V 4 1 -emoji: 🌍 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CForGETaass/vits-uma-genshin-honkai/app.py b/spaces/CForGETaass/vits-uma-genshin-honkai/app.py deleted file mode 100644 index 92ddafdcd240434f58569b0e6964ef331a971dcf..0000000000000000000000000000000000000000 --- a/spaces/CForGETaass/vits-uma-genshin-honkai/app.py +++ /dev/null @@ -1,124 +0,0 @@ -import time -import gradio as gr -import utils -import commons -from models import SynthesizerTrn -from text import text_to_sequence -from torch import no_grad, LongTensor -import torch - -hps_ms = utils.get_hparams_from_file(r'./model/config.json') -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model).to(device) -_ = net_g_ms.eval() -speakers = hps_ms.speakers -model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 500: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - speaker_id = LongTensor([speaker_id]) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - with gr.Blocks() as app: - gr.Markdown( - "#
    VITS语音在线合成demo\n" - "
    主要有赛马娘,原神中文,原神日语,崩坏3的音色
    " - '' - '' - ) - - with gr.Tabs(): - with gr.TabItem("vits"): - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation)", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text") - lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文") - btn = gr.Button(value="Submit") - with gr.Row(): - search = gr.Textbox(label="Search Speaker", lines=1) - btn2 = gr.Button(value="Search") - sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228]) - with gr.Row(): - ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") - o3 = gr.Textbox(label="Extra Info") - download = gr.Button("Download Audio") - btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3], api_name="generate") - download.click(None, [], [], _js=download_audio_js.format()) - btn2.click(search_speaker, inputs=[search], outputs=[sid]) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - with gr.TabItem("可用人物一览"): - gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index") - app.queue(concurrency_count=1).launch() \ No newline at end of file diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py deleted file mode 100644 index 6d787e6788731dfb0e53647cf801c4debfbf00d9..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import logging -import numpy as np -from typing import Dict -import torch - -from detectron2.layers import ShapeSpec, batched_nms_rotated -from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated -from detectron2.utils.events import get_event_storage - -from ..box_regression import Box2BoxTransformRotated -from ..poolers import ROIPooler -from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals -from .box_head import build_box_head -from .fast_rcnn import FastRCNNOutputLayers -from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads - -logger = logging.getLogger(__name__) - -""" -Shape shorthand in this module: - - N: number of images in the minibatch - R: number of ROIs, combined over all images, in the minibatch - Ri: number of ROIs in image i - K: number of foreground classes. E.g.,there are 80 foreground classes in COCO. - -Naming convention: - - deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the box2box - transform (see :class:`box_regression.Box2BoxTransformRotated`). - - pred_class_logits: predicted class scores in [-inf, +inf]; use - softmax(pred_class_logits) to estimate P(class). - - gt_classes: ground-truth classification labels in [0, K], where [0, K) represent - foreground object classes and K represents the background class. - - pred_proposal_deltas: predicted rotated box2box transform deltas for transforming proposals - to detection box predictions. - - gt_proposal_deltas: ground-truth rotated box2box transform deltas -""" - - -def fast_rcnn_inference_rotated( - boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image -): - """ - Call `fast_rcnn_inference_single_image_rotated` for all images. - - Args: - boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic - boxes for each image. Element i has shape (Ri, K * 5) if doing - class-specific regression, or (Ri, 5) if doing class-agnostic - regression, where Ri is the number of predicted objects for image i. - This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`. - scores (list[Tensor]): A list of Tensors of predicted class scores for each image. - Element i has shape (Ri, K + 1), where Ri is the number of predicted objects - for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`. - image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. - score_thresh (float): Only return detections with a confidence score exceeding this - threshold. - nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. - topk_per_image (int): The number of top scoring detections to return. Set < 0 to return - all detections. - - Returns: - instances: (list[Instances]): A list of N instances, one for each image in the batch, - that stores the topk most confidence detections. - kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates - the corresponding boxes/scores index in [0, Ri) from the input, for image i. - """ - result_per_image = [ - fast_rcnn_inference_single_image_rotated( - boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image - ) - for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) - ] - return [x[0] for x in result_per_image], [x[1] for x in result_per_image] - - -def fast_rcnn_inference_single_image_rotated( - boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image -): - """ - Single-image inference. Return rotated bounding-box detection results by thresholding - on scores and applying rotated non-maximum suppression (Rotated NMS). - - Args: - Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes - per image. - - Returns: - Same as `fast_rcnn_inference_rotated`, but for only one image. - """ - valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) - if not valid_mask.all(): - boxes = boxes[valid_mask] - scores = scores[valid_mask] - - B = 5 # box dimension - scores = scores[:, :-1] - num_bbox_reg_classes = boxes.shape[1] // B - # Convert to Boxes to use the `clip` function ... - boxes = RotatedBoxes(boxes.reshape(-1, B)) - boxes.clip(image_shape) - boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B - # Filter results based on detection scores - filter_mask = scores > score_thresh # R x K - # R' x 2. First column contains indices of the R predictions; - # Second column contains indices of classes. - filter_inds = filter_mask.nonzero() - if num_bbox_reg_classes == 1: - boxes = boxes[filter_inds[:, 0], 0] - else: - boxes = boxes[filter_mask] - scores = scores[filter_mask] - - # Apply per-class Rotated NMS - keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh) - if topk_per_image >= 0: - keep = keep[:topk_per_image] - boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] - - result = Instances(image_shape) - result.pred_boxes = RotatedBoxes(boxes) - result.scores = scores - result.pred_classes = filter_inds[:, 1] - - return result, filter_inds[:, 0] - - -class RotatedFastRCNNOutputLayers(FastRCNNOutputLayers): - """ - A class that stores information about outputs of a Fast R-CNN head with RotatedBoxes. - """ - - @classmethod - def from_config(cls, cfg, input_shape): - args = super().from_config(cfg, input_shape) - args["box2box_transform"] = Box2BoxTransformRotated( - weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS - ) - return args - - def inference(self, predictions, proposals): - """ - Returns: - list[Instances]: same as `fast_rcnn_inference_rotated`. - list[Tensor]: same as `fast_rcnn_inference_rotated`. - """ - boxes = self.predict_boxes(predictions, proposals) - scores = self.predict_probs(predictions, proposals) - image_shapes = [x.image_size for x in proposals] - - return fast_rcnn_inference_rotated( - boxes, - scores, - image_shapes, - self.test_score_thresh, - self.test_nms_thresh, - self.test_topk_per_image, - ) - - -@ROI_HEADS_REGISTRY.register() -class RROIHeads(StandardROIHeads): - """ - This class is used by Rotated RPN (RRPN). - For now, it just supports box head but not mask or keypoints. - """ - - def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): - super().__init__(cfg, input_shape) - assert ( - not self.mask_on and not self.keypoint_on - ), "Mask/Keypoints not supported in Rotated ROIHeads." - - def _init_box_head(self, cfg, input_shape): - # fmt: off - pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION - pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features) - sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO - pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE - self.train_on_pred_boxes = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES - # fmt: on - assert not self.train_on_pred_boxes, "Not Implemented!" - - # If StandardROIHeads is applied on multiple feature maps (as in FPN), - # then we share the same predictors and therefore the channel counts must be the same - in_channels = [input_shape[f].channels for f in self.in_features] - # Check all channel counts are equal - assert len(set(in_channels)) == 1, in_channels - in_channels = in_channels[0] - - assert pooler_type in ["ROIAlignRotated"] - - self.box_pooler = ROIPooler( - output_size=pooler_resolution, - scales=pooler_scales, - sampling_ratio=sampling_ratio, - pooler_type=pooler_type, - ) - self.box_head = build_box_head( - cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) - ) - - self.box_predictor = RotatedFastRCNNOutputLayers(cfg, self.box_head.output_shape) - - @torch.no_grad() - def label_and_sample_proposals(self, proposals, targets): - """ - Prepare some proposals to be used to train the RROI heads. - It performs box matching between `proposals` and `targets`, and assigns - training labels to the proposals. - It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes, - with a fraction of positives that is no larger than `self.positive_sample_fraction. - - Args: - See :meth:`StandardROIHeads.forward` - - Returns: - list[Instances]: length `N` list of `Instances`s containing the proposals - sampled for training. Each `Instances` has the following fields: - - proposal_boxes: the rotated proposal boxes - - gt_boxes: the ground-truth rotated boxes that the proposal is assigned to - (this is only meaningful if the proposal has a label > 0; if label = 0 - then the ground-truth box is random) - - gt_classes: the ground-truth classification lable for each proposal - """ - gt_boxes = [x.gt_boxes for x in targets] - if self.proposal_append_gt: - proposals = add_ground_truth_to_proposals(gt_boxes, proposals) - - proposals_with_gt = [] - - num_fg_samples = [] - num_bg_samples = [] - for proposals_per_image, targets_per_image in zip(proposals, targets): - has_gt = len(targets_per_image) > 0 - match_quality_matrix = pairwise_iou_rotated( - targets_per_image.gt_boxes, proposals_per_image.proposal_boxes - ) - matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix) - sampled_idxs, gt_classes = self._sample_proposals( - matched_idxs, matched_labels, targets_per_image.gt_classes - ) - - proposals_per_image = proposals_per_image[sampled_idxs] - proposals_per_image.gt_classes = gt_classes - - if has_gt: - sampled_targets = matched_idxs[sampled_idxs] - proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets] - else: - gt_boxes = RotatedBoxes( - targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 5)) - ) - proposals_per_image.gt_boxes = gt_boxes - - num_bg_samples.append((gt_classes == self.num_classes).sum().item()) - num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1]) - proposals_with_gt.append(proposals_per_image) - - # Log the number of fg/bg samples that are selected for training ROI heads - storage = get_event_storage() - storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) - storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) - - return proposals_with_gt diff --git a/spaces/CVPR/LIVE/pydiffvg/optimize_svg.py b/spaces/CVPR/LIVE/pydiffvg/optimize_svg.py deleted file mode 100644 index ce0097f51afca413cfd6a2dcf7ef257a443002ec..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/pydiffvg/optimize_svg.py +++ /dev/null @@ -1,1607 +0,0 @@ -import json -import copy -import xml.etree.ElementTree as etree -from xml.dom import minidom -import warnings -import torch -import numpy as np -import re -import sys -import pydiffvg -import math -from collections import namedtuple -import cssutils - -class SvgOptimizationSettings: - - default_params = { - "optimize_color": True, - "color_lr": 2e-3, - "optimize_alpha": False, - "alpha_lr": 2e-3, - "optimizer": "Adam", - "transforms": { - "optimize_transforms":True, - "transform_mode":"rigid", - "translation_mult":1e-3, - "transform_lr":2e-3 - }, - "circles": { - "optimize_center": True, - "optimize_radius": True, - "shape_lr": 2e-1 - }, - "paths": { - "optimize_points": True, - "shape_lr": 2e-1 - }, - "gradients": { - "optimize_stops": True, - "stop_lr": 2e-3, - "optimize_color": True, - "color_lr": 2e-3, - "optimize_alpha": False, - "alpha_lr": 2e-3, - "optimize_location": True, - "location_lr": 2e-1 - } - } - - optims = { - "Adam": torch.optim.Adam, - "SGD": torch.optim.SGD, - "ASGD": torch.optim.ASGD, - } - - #region methods - def __init__(self, f=None): - self.store = {} - if f is None: - self.store["default"] = copy.deepcopy(SvgOptimizationSettings.default_params) - else: - self.store = json.load(f) - - # create default alias for root - def default_name(self, dname): - self.dname = dname - if dname not in self.store: - self.store[dname] = self.store["default"] - - def retrieve(self, node_id): - if node_id not in self.store: - return (self.store["default"], False) - else: - return (self.store[node_id], True) - - def reset_to_defaults(self, node_id): - if node_id in self.store: - del self.store[node_id] - - return self.store["default"] - - def undefault(self, node_id): - if node_id not in self.store: - self.store[node_id] = copy.deepcopy(self.store["default"]) - - return self.store[node_id] - - def override_optimizer(self, optimizer): - if optimizer is not None: - for v in self.store.values(): - v["optimizer"] = optimizer - - def global_override(self, path, value): - for store in self.store.values(): - d = store - for key in path[:-1]: - d = d[key] - - d[path[-1]] = value - - def save(self, file): - self.store["default"] = self.store[self.dname] - json.dump(self.store, file, indent="\t") - #endregion - -class OptimizableSvg: - - class TransformTools: - @staticmethod - def parse_matrix(vals): - assert(len(vals)==6) - return np.array([[vals[0],vals[2],vals[4]],[vals[1], vals[3], vals[5]],[0,0,1]]) - - @staticmethod - def parse_translate(vals): - assert(len(vals)>=1 and len(vals)<=2) - mat=np.eye(3) - mat[0,2]=vals[0] - if len(vals)>1: - mat[1,2]=vals[1] - return mat - - @staticmethod - def parse_rotate(vals): - assert (len(vals) == 1 or len(vals) == 3) - mat = np.eye(3) - rads=math.radians(vals[0]) - sint=math.sin(rads) - cost=math.cos(rads) - mat[0:2, 0:2] = np.array([[cost,-sint],[sint,cost]]) - if len(vals) > 1: - tr1=parse_translate(vals[1:3]) - tr2=parse_translate([-vals[1],-vals[2]]) - mat=tr1 @ mat @ tr2 - return mat - - @staticmethod - def parse_scale(vals): - assert (len(vals) >= 1 and len(vals) <= 2) - d=np.array([vals[0], vals[1] if len(vals)>1 else vals[0],1]) - return np.diag(d) - - @staticmethod - def parse_skewx(vals): - assert(len(vals)==1) - m=np.eye(3) - m[0,1]=vals[0] - return m - - @staticmethod - def parse_skewy(vals): - assert (len(vals) == 1) - m = np.eye(3) - m[1, 0] = vals[0] - return m - - @staticmethod - def transformPoints(pointsTensor, transform): - assert(transform is not None) - one=torch.ones((pointsTensor.shape[0],1),device=pointsTensor.device) - homo_points = torch.cat([pointsTensor, one], dim=1) - mult = transform.mm(homo_points.permute(1,0)).permute(1,0) - tfpoints=mult[:, 0:2].contiguous() - #print(torch.norm(mult[:,2]-one)) - assert(pointsTensor.shape == tfpoints.shape) - return tfpoints - - @staticmethod - def promote_numpy(M): - ret = np.eye(3) - ret[0:2, 0:2] = M - return ret - - @staticmethod - def recompose_numpy(Theta,ScaleXY,ShearX,TXY): - cost=math.cos(Theta) - sint=math.sin(Theta) - Rot=np.array([[cost, -sint],[sint, cost]]) - Scale=np.diag(ScaleXY) - Shear=np.eye(2) - Shear[0,1]=ShearX - - Translate=np.eye(3) - Translate[0:2,2]=TXY - - M=OptimizableSvg.TransformTools.promote_numpy(Rot @ Scale @ Shear) @ Translate - return M - - @staticmethod - def promote(m): - M=torch.eye(3).to(m.device) - M[0:2,0:2]=m - return M - - @staticmethod - def make_rot(Theta): - sint=Theta.sin().squeeze() - cost=Theta.cos().squeeze() - #m=torch.tensor([[cost, -sint],[sint, cost]]) - Rot=torch.stack((torch.stack((cost,-sint)),torch.stack((sint,cost)))) - return Rot - - @staticmethod - def make_scale(ScaleXY): - if ScaleXY.squeeze().dim()==0: - ScaleXY=ScaleXY.squeeze() - #uniform scale - return torch.diag(torch.stack([ScaleXY,ScaleXY])).to(ScaleXY.device) - else: - return torch.diag(ScaleXY).to(ScaleXY.device) - - @staticmethod - def make_shear(ShearX): - m=torch.eye(2).to(ShearX.device) - m[0,1]=ShearX - return m - - @staticmethod - def make_translate(TXY): - m=torch.eye(3).to(TXY.device) - m[0:2,2]=TXY - return m - - @staticmethod - def recompose(Theta,ScaleXY,ShearX,TXY): - Rot=OptimizableSvg.TransformTools.make_rot(Theta) - Scale=OptimizableSvg.TransformTools.make_scale(ScaleXY) - Shear=OptimizableSvg.TransformTools.make_shear(ShearX) - Translate=OptimizableSvg.TransformTools.make_translate(TXY) - - return OptimizableSvg.TransformTools.promote(Rot.mm(Scale).mm(Shear)).mm(Translate) - - TransformDecomposition=namedtuple("TransformDecomposition","theta scale shear translate") - TransformProperties=namedtuple("TransformProperties", "has_rotation has_scale has_mirror scale_uniform has_shear has_translation") - - @staticmethod - def make_named(decomp): - if not isinstance(decomp,OptimizableSvg.TransformTools.TransformDecomposition): - decomp=OptimizableSvg.TransformTools.TransformDecomposition(theta=decomp[0],scale=decomp[1],shear=decomp[2],translate=decomp[3]) - return decomp - - @staticmethod - def analyze_transform(decomp): - decomp=OptimizableSvg.TransformTools.make_named(decomp) - epsilon=1e-3 - has_rotation=abs(decomp.theta)>epsilon - has_scale=abs((abs(decomp.scale)-1)).max()>epsilon - scale_len=decomp.scale.squeeze().ndim>0 if isinstance(decomp.scale,np.ndarray) else decomp.scale.squeeze().dim() > 0 - has_mirror=scale_len and decomp.scale[0]*decomp.scale[1] < 0 - scale_uniform=not scale_len or abs(abs(decomp.scale[0])-abs(decomp.scale[1]))epsilon - has_translate=max(abs(decomp.translate[0]),abs(decomp.translate[1]))>epsilon - - return OptimizableSvg.TransformTools.TransformProperties(has_rotation=has_rotation,has_scale=has_scale,has_mirror=has_mirror,scale_uniform=scale_uniform,has_shear=has_shear,has_translation=has_translate) - - @staticmethod - def check_and_decomp(M): - decomp=OptimizableSvg.TransformTools.decompose(M) if M is not None else OptimizableSvg.TransformTools.TransformDecomposition(theta=0,scale=(1,1),shear=0,translate=(0,0)) - props=OptimizableSvg.TransformTools.analyze_transform(decomp) - return (decomp, props) - - @staticmethod - def tf_to_string(M): - tfstring = "matrix({} {} {} {} {} {})".format(M[0, 0], M[1, 0], M[0, 1], M[1, 1], M[0, 2], M[1, 2]) - return tfstring - - @staticmethod - def decomp_to_string(decomp): - decomp = OptimizableSvg.TransformTools.make_named(decomp) - ret="" - props=OptimizableSvg.TransformTools.analyze_transform(decomp) - if props.has_rotation: - ret+="rotate({}) ".format(math.degrees(decomp.theta.item())) - if props.has_scale: - if decomp.scale.dim()==0: - ret += "scale({}) ".format(decomp.scale.item()) - else: - ret+="scale({} {}) ".format(decomp.scale[0], decomp.scale[1]) - if props.has_shear: - ret+="skewX({}) ".format(decomp.shear.item()) - if props.has_translation: - ret+="translate({} {}) ".format(decomp.translate[0],decomp.translate[1]) - - return ret - - @staticmethod - def decompose(M): - m = M[0:2, 0:2] - t0=M[0:2, 2] - #get translation so that we can post-multiply with it - TXY=np.linalg.solve(m,t0) - - T=np.eye(3) - T[0:2,2]=TXY - - q, r = np.linalg.qr(m) - - ref = np.array([[1, 0], [0, np.sign(np.linalg.det(q))]]) - - Rot = np.dot(q, ref) - - ref2 = np.array([[1, 0], [0, np.sign(np.linalg.det(r))]]) - - r2 = np.dot(ref2, r) - - Ref = np.dot(ref, ref2) - - sc = np.diag(r2) - Scale = np.diagflat(sc) - - Shear = np.eye(2) - Shear[0, 1] = r2[0, 1] / sc[0] - #the actual shear coefficient - ShearX=r2[0, 1] / sc[0] - - if np.sum(sc) < 0: - # both scales are negative, flip this and add a 180 rotation - Rot = np.dot(Rot, -np.eye(2)) - Scale = -Scale - - Theta = math.atan2(Rot[1, 0], Rot[0, 0]) - ScaleXY = np.array([Scale[0,0],Scale[1,1]*Ref[1,1]]) - - return OptimizableSvg.TransformTools.TransformDecomposition(theta=Theta, scale=ScaleXY, shear=ShearX, translate=TXY) - - #region suboptimizers - - #optimizes color, but really any tensor that needs to stay between 0 and 1 per-entry - class ColorOptimizer: - def __init__(self,tensor,optim_type,lr): - self.tensor=tensor - self.optim=optim_type([tensor],lr=lr) - - def zero_grad(self): - self.optim.zero_grad() - - def step(self): - self.optim.step() - self.tensor.data.clamp_(min=1e-4,max=1.) - - #optimizes gradient stop positions - class StopOptimizer: - def __init__(self,stops,optim_type,lr): - self.stops=stops - self.optim=optim_type([stops],lr=lr) - - def zero_grad(self): - self.optim.zero_grad() - - def step(self): - self.optim.step() - self.stops.data.clamp_(min=0., max=1.) - self.stops.data, _ = self.stops.sort() - self.stops.data[0] = 0. - self.stops.data[-1]=1. - - #optimizes gradient: stop, positions, colors+opacities, locations - class GradientOptimizer: - def __init__(self, begin, end, offsets, stops, optim_params): - self.begin=begin.clone().detach() if begin is not None else None - self.end=end.clone().detach() if end is not None else None - self.offsets=offsets.clone().detach() if offsets is not None else None - self.stop_colors=stops[:,0:3].clone().detach() if stops is not None else None - self.stop_alphas=stops[:,3].clone().detach() if stops is not None else None - self.optimizers=[] - - if optim_params["gradients"]["optimize_stops"] and self.offsets is not None: - self.offsets.requires_grad_(True) - self.optimizers.append(OptimizableSvg.StopOptimizer(self.offsets,SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["gradients"]["stop_lr"])) - if optim_params["gradients"]["optimize_color"] and self.stop_colors is not None: - self.stop_colors.requires_grad_(True) - self.optimizers.append(OptimizableSvg.ColorOptimizer(self.stop_colors,SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["gradients"]["color_lr"])) - if optim_params["gradients"]["optimize_alpha"] and self.stop_alphas is not None: - self.stop_alphas.requires_grad_(True) - self.optimizers.append(OptimizableSvg.ColorOptimizer(self.stop_alphas,SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["gradients"]["alpha_lr"])) - if optim_params["gradients"]["optimize_location"] and self.begin is not None and self.end is not None: - self.begin.requires_grad_(True) - self.end.requires_grad_(True) - self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.begin,self.end],lr=optim_params["gradients"]["location_lr"])) - - - def get_vals(self): - return self.begin, self.end, self.offsets, torch.cat((self.stop_colors,self.stop_alphas.unsqueeze(1)),1) if self.stop_colors is not None and self.stop_alphas is not None else None - - def zero_grad(self): - for optim in self.optimizers: - optim.zero_grad() - - def step(self): - for optim in self.optimizers: - optim.step() - - class TransformOptimizer: - def __init__(self,transform,optim_params): - self.transform=transform - self.optimizes=optim_params["transforms"]["optimize_transforms"] and transform is not None - self.params=copy.deepcopy(optim_params) - self.transform_mode=optim_params["transforms"]["transform_mode"] - - if self.optimizes: - optimvars=[] - self.residual=None - lr=optim_params["transforms"]["transform_lr"] - tmult=optim_params["transforms"]["translation_mult"] - decomp,props=OptimizableSvg.TransformTools.check_and_decomp(transform.cpu().numpy()) - if self.transform_mode=="move": - #only translation and rotation should be set - if props.has_scale or props.has_shear or props.has_mirror: - print("Warning: set to optimize move only, but input transform has residual scale or shear") - self.residual=self.transform.clone().detach().requires_grad_(False) - self.Theta=torch.tensor(0,dtype=torch.float32,requires_grad=True,device=transform.device) - self.translation=torch.tensor([0, 0],dtype=torch.float32,requires_grad=True,device=transform.device) - else: - self.residual=None - self.Theta=torch.tensor(decomp.theta,dtype=torch.float32,requires_grad=True,device=transform.device) - self.translation=torch.tensor(decomp.translate,dtype=torch.float32,requires_grad=True,device=transform.device) - optimvars+=[{'params':x,'lr':lr} for x in [self.Theta]]+[{'params':self.translation,'lr':lr*tmult}] - elif self.transform_mode=="rigid": - #only translation, rotation, and uniform scale should be set - if props.has_shear or props.has_mirror or not props.scale_uniform: - print("Warning: set to optimize rigid transform only, but input transform has residual shear, mirror or non-uniform scale") - self.residual = self.transform.clone().detach().requires_grad_(False) - self.Theta = torch.tensor(0, dtype=torch.float32, requires_grad=True,device=transform.device) - self.translation = torch.tensor([0, 0], dtype=torch.float32, requires_grad=True,device=transform.device) - self.scale=torch.tensor(1, dtype=torch.float32, requires_grad=True,device=transform.device) - else: - self.residual = None - self.Theta = torch.tensor(decomp.theta, dtype=torch.float32, requires_grad=True,device=transform.device) - self.translation = torch.tensor(decomp.translate, dtype=torch.float32, requires_grad=True,device=transform.device) - self.scale = torch.tensor(decomp.scale[0], dtype=torch.float32, requires_grad=True,device=transform.device) - optimvars += [{'params':x,'lr':lr} for x in [self.Theta, self.scale]]+[{'params':self.translation,'lr':lr*tmult}] - elif self.transform_mode=="similarity": - if props.has_shear or not props.scale_uniform: - print("Warning: set to optimize rigid transform only, but input transform has residual shear or non-uniform scale") - self.residual = self.transform.clone().detach().requires_grad_(False) - self.Theta = torch.tensor(0, dtype=torch.float32, requires_grad=True,device=transform.device) - self.translation = torch.tensor([0, 0], dtype=torch.float32, requires_grad=True,device=transform.device) - self.scale=torch.tensor(1, dtype=torch.float32, requires_grad=True,device=transform.device) - self.scale_sign=torch.tensor(1,dtype=torch.float32,requires_grad=False,device=transform.device) - else: - self.residual = None - self.Theta = torch.tensor(decomp.theta, dtype=torch.float32, requires_grad=True,device=transform.device) - self.translation = torch.tensor(decomp.translate, dtype=torch.float32, requires_grad=True,device=transform.device) - self.scale = torch.tensor(decomp.scale[0], dtype=torch.float32, requires_grad=True,device=transform.device) - self.scale_sign = torch.tensor(np.sign(decomp.scale[0]*decomp.scale[1]), dtype=torch.float32, requires_grad=False,device=transform.device) - optimvars += [{'params':x,'lr':lr} for x in [self.Theta, self.scale]]+[{'params':self.translation,'lr':lr*tmult}] - elif self.transform_mode=="affine": - self.Theta = torch.tensor(decomp.theta, dtype=torch.float32, requires_grad=True,device=transform.device) - self.translation = torch.tensor(decomp.translate, dtype=torch.float32, requires_grad=True,device=transform.device) - self.scale = torch.tensor(decomp.scale, dtype=torch.float32, requires_grad=True,device=transform.device) - self.shear = torch.tensor(decomp.shear, dtype=torch.float32, requires_grad=True,device=transform.device) - optimvars += [{'params':x,'lr':lr} for x in [self.Theta, self.scale, self.shear]]+[{'params':self.translation,'lr':lr*tmult}] - else: - raise ValueError("Unrecognized transform mode '{}'".format(self.transform_mode)) - self.optimizer=SvgOptimizationSettings.optims[optim_params["optimizer"]](optimvars) - - def get_transform(self): - if not self.optimizes: - return self.transform - else: - if self.transform_mode == "move": - composed=OptimizableSvg.TransformTools.recompose(self.Theta,torch.tensor([1.],device=self.Theta.device),torch.tensor(0.,device=self.Theta.device),self.translation) - return self.residual.mm(composed) if self.residual is not None else composed - elif self.transform_mode == "rigid": - composed = OptimizableSvg.TransformTools.recompose(self.Theta, self.scale, torch.tensor(0.,device=self.Theta.device), - self.translation) - return self.residual.mm(composed) if self.residual is not None else composed - elif self.transform_mode == "similarity": - composed=OptimizableSvg.TransformTools.recompose(self.Theta, torch.cat((self.scale,self.scale*self.scale_sign)),torch.tensor(0.,device=self.Theta.device),self.translation) - return self.residual.mm(composed) if self.residual is not None else composed - elif self.transform_mode == "affine": - composed = OptimizableSvg.TransformTools.recompose(self.Theta, self.scale, self.shear, self.translation) - return composed - else: - raise ValueError("Unrecognized transform mode '{}'".format(self.transform_mode)) - - def tfToString(self): - if self.transform is None: - return None - elif not self.optimizes: - return OptimizableSvg.TransformTools.tf_to_string(self.transform) - else: - if self.transform_mode == "move": - str=OptimizableSvg.TransformTools.decomp_to_string((self.Theta,torch.tensor([1.]),torch.tensor(0.),self.translation)) - return (OptimizableSvg.TransformTools.tf_to_string(self.residual) if self.residual is not None else "")+" "+str - elif self.transform_mode == "rigid": - str = OptimizableSvg.TransformTools.decomp_to_string((self.Theta, self.scale, torch.tensor(0.), - self.translation)) - return (OptimizableSvg.TransformTools.tf_to_string(self.residual) if self.residual is not None else "")+" "+str - elif self.transform_mode == "similarity": - str=OptimizableSvg.TransformTools.decomp_to_string((self.Theta, torch.cat((self.scale,self.scale*self.scale_sign)),torch.tensor(0.),self.translation)) - return (OptimizableSvg.TransformTools.tf_to_string(self.residual) if self.residual is not None else "")+" "+str - elif self.transform_mode == "affine": - str = OptimizableSvg.TransformTools.decomp_to_string((self.Theta, self.scale, self.shear, self.translation)) - return composed - - def zero_grad(self): - if self.optimizes: - self.optimizer.zero_grad() - - def step(self): - if self.optimizes: - self.optimizer.step() - - #endregion - - #region Nodes - class SvgNode: - def __init__(self,id,transform,appearance,settings): - self.id=id - self.children=[] - self.optimizers=[] - self.device = settings.device - self.transform=torch.tensor(transform,dtype=torch.float32,device=self.device) if transform is not None else None - self.transform_optim=OptimizableSvg.TransformOptimizer(self.transform,settings.retrieve(self.id)[0]) - self.optimizers.append(self.transform_optim) - self.proc_appearance(appearance,settings.retrieve(self.id)[0]) - - def tftostring(self): - return self.transform_optim.tfToString() - - def appearanceToString(self): - appstring="" - for key,value in self.appearance.items(): - if key in ["fill", "stroke"]: - #a paint-type value - if value[0] == "none": - appstring+="{}:none;".format(key) - elif value[0] == "solid": - appstring += "{}:{};".format(key,OptimizableSvg.rgb_to_string(value[1])) - elif value[0] == "url": - appstring += "{}:url(#{});".format(key,value[1].id) - #appstring += "{}:{};".format(key,"#ff00ff") - elif key in ["opacity", "fill-opacity", "stroke-opacity", "stroke-width", "fill-rule"]: - appstring+="{}:{};".format(key,value) - else: - raise ValueError("Don't know how to write appearance parameter '{}'".format(key)) - return appstring - - - def write_xml_common_attrib(self,node,tfname="transform"): - if self.transform is not None: - node.set(tfname,self.tftostring()) - if len(self.appearance)>0: - node.set('style',self.appearanceToString()) - if self.id is not None: - node.set('id',self.id) - - - def proc_appearance(self,appearance,optim_params): - self.appearance=appearance - for key, value in appearance.items(): - if key == "fill" or key == "stroke": - if optim_params["optimize_color"] and value[0]=="solid": - value[1].requires_grad_(True) - self.optimizers.append(OptimizableSvg.ColorOptimizer(value[1],SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["color_lr"])) - elif key == "fill-opacity" or key == "stroke-opacity" or key == "opacity": - if optim_params["optimize_alpha"]: - value[1].requires_grad_(True) - self.optimizers.append(OptimizableSvg.ColorOptimizer(value[1], optim_params["optimizer"], - optim_params["alpha_lr"])) - elif key == "fill-rule" or key == "stroke-width": - pass - else: - raise RuntimeError("Unrecognized appearance key '{}'".format(key)) - - def prop_transform(self,intform): - return intform.matmul(self.transform_optim.get_transform()) if self.transform is not None else intform - - def prop_appearance(self,inappearance): - outappearance=copy.copy(inappearance) - for key,value in self.appearance.items(): - if key == "fill": - #gets replaced - outappearance[key]=value - elif key == "fill-opacity": - #gets multiplied - outappearance[key] = outappearance[key]*value - elif key == "fill-rule": - #gets replaced - outappearance[key] = value - elif key =="opacity": - # gets multiplied - outappearance[key] = outappearance[key]*value - elif key == "stroke": - # gets replaced - outappearance[key] = value - elif key == "stroke-opacity": - # gets multiplied - outappearance[key] = outappearance[key]*value - elif key =="stroke-width": - # gets replaced - outappearance[key] = value - else: - raise RuntimeError("Unrecognized appearance key '{}'".format(key)) - return outappearance - - def zero_grad(self): - for optim in self.optimizers: - optim.zero_grad() - for child in self.children: - child.zero_grad() - - def step(self): - for optim in self.optimizers: - optim.step() - for child in self.children: - child.step() - - def get_type(self): - return "Generic node" - - def is_shape(self): - return False - - def build_scene(self,shapes,shape_groups,transform,appearance): - raise NotImplementedError("Abstract SvgNode cannot recurse") - - class GroupNode(SvgNode): - def __init__(self, id, transform, appearance,settings): - super().__init__(id, transform, appearance,settings) - - def get_type(self): - return "Group node" - - def build_scene(self,shapes,shape_groups,transform,appearance): - outtf=self.prop_transform(transform) - outapp=self.prop_appearance(appearance) - for child in self.children: - child.build_scene(shapes,shape_groups,outtf,outapp) - - def write_xml(self, parent): - elm=etree.SubElement(parent,"g") - self.write_xml_common_attrib(elm) - - for child in self.children: - child.write_xml(elm) - - class RootNode(SvgNode): - def __init__(self, id, transform, appearance,settings): - super().__init__(id, transform, appearance,settings) - - def write_xml(self,document): - elm=etree.Element('svg') - self.write_xml_common_attrib(elm) - elm.set("version","2.0") - elm.set("width",str(document.canvas[0])) - elm.set("height", str(document.canvas[1])) - elm.set("xmlns","http://www.w3.org/2000/svg") - elm.set("xmlns:xlink","http://www.w3.org/1999/xlink") - #write definitions before we write any children - document.write_defs(elm) - - #write the children - for child in self.children: - child.write_xml(elm) - - return elm - - def get_type(self): - return "Root node" - - def build_scene(self,shapes,shape_groups,transform,appearance): - outtf = self.prop_transform(transform).to(self.device) - for child in self.children: - child.build_scene(shapes,shape_groups,outtf,appearance) - - @staticmethod - def get_default_appearance(device): - default_appearance = {"fill": ("solid", torch.tensor([0., 0., 0.],device=device)), - "fill-opacity": torch.tensor([1.],device=device), - "fill-rule": "nonzero", - "opacity": torch.tensor([1.],device=device), - "stroke": ("none", None), - "stroke-opacity": torch.tensor([1.],device=device), - "stroke-width": torch.tensor([0.],device=device)} - return default_appearance - - @staticmethod - def get_default_transform(): - return torch.eye(3) - - - - class ShapeNode(SvgNode): - def __init__(self, id, transform, appearance,settings): - super().__init__(id, transform, appearance,settings) - - def get_type(self): - return "Generic shape node" - - def is_shape(self): - return True - - def construct_paint(self,value,combined_opacity,transform): - if value[0] == "none": - return None - elif value[0] == "solid": - return torch.cat([value[1],combined_opacity]).to(self.device) - elif value[0] == "url": - #get the gradient object from this node - return value[1].getGrad(combined_opacity,transform) - else: - raise ValueError("Unknown paint value type '{}'".format(value[0])) - - def make_shape_group(self,appearance,transform,num_shapes,num_subobjects): - fill=self.construct_paint(appearance["fill"],appearance["opacity"]*appearance["fill-opacity"],transform) - stroke=self.construct_paint(appearance["stroke"],appearance["opacity"]*appearance["stroke-opacity"],transform) - sg = pydiffvg.ShapeGroup(shape_ids=torch.tensor(range(num_shapes, num_shapes + num_subobjects)), - fill_color=fill, - use_even_odd_rule=appearance["fill-rule"]=="evenodd", - stroke_color=stroke, - shape_to_canvas=transform, - id=self.id) - return sg - - class PathNode(ShapeNode): - def __init__(self, id, transform, appearance,settings, paths): - super().__init__(id, transform, appearance,settings) - self.proc_paths(paths,settings.retrieve(self.id)[0]) - - def proc_paths(self,paths,optim_params): - self.paths=paths - if optim_params["paths"]["optimize_points"]: - ptlist=[] - for path in paths: - ptlist.append(path.points.requires_grad_(True)) - self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]](ptlist,lr=optim_params["paths"]["shape_lr"])) - - def get_type(self): - return "Path node" - - def build_scene(self,shapes,shape_groups,transform,appearance): - applytf=self.prop_transform(transform) - applyapp = self.prop_appearance(appearance) - sg=self.make_shape_group(applyapp,applytf,len(shapes),len(self.paths)) - for path in self.paths: - disp_path=pydiffvg.Path(path.num_control_points,path.points,path.is_closed,applyapp["stroke-width"],path.id) - shapes.append(disp_path) - shape_groups.append(sg) - - def path_to_string(self,path): - path_string = "M {},{} ".format(path.points[0][0].item(), path.points[0][1].item()) - idx = 1 - numpoints = path.points.shape[0] - for type in path.num_control_points: - toproc = type + 1 - if type == 0: - # add line - path_string += "L " - elif type == 1: - # add quadric - path_string += "Q " - elif type == 2: - # add cubic - path_string += "C " - while toproc > 0: - path_string += "{},{} ".format(path.points[idx % numpoints][0].item(), - path.points[idx % numpoints][1].item()) - idx += 1 - toproc -= 1 - if path.is_closed: - path_string += "Z " - - return path_string - - def paths_string(self): - pstr="" - for path in self.paths: - pstr+=self.path_to_string(path) - return pstr - - def write_xml(self, parent): - elm = etree.SubElement(parent, "path") - self.write_xml_common_attrib(elm) - elm.set("d",self.paths_string()) - - for child in self.children: - child.write_xml(elm) - - class RectNode(ShapeNode): - def __init__(self, id, transform, appearance,settings, rect): - super().__init__(id, transform, appearance,settings) - self.rect=torch.tensor(rect,dtype=torch.float,device=settings.device) - optim_params=settings.retrieve(self.id)[0] - #borrowing path settings for this - if optim_params["paths"]["optimize_points"]: - self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.rect],lr=optim_params["paths"]["shape_lr"])) - - def get_type(self): - return "Rect node" - - def build_scene(self,shapes,shape_groups,transform,appearance): - applytf=self.prop_transform(transform) - applyapp = self.prop_appearance(appearance) - sg=self.make_shape_group(applyapp,applytf,len(shapes),1) - shapes.append(pydiffvg.Rect(self.rect[0:2],self.rect[0:2]+self.rect[2:4],applyapp["stroke-width"],self.id)) - shape_groups.append(sg) - - def write_xml(self, parent): - elm = etree.SubElement(parent, "rect") - self.write_xml_common_attrib(elm) - elm.set("x",str(self.rect[0])) - elm.set("y", str(self.rect[1])) - elm.set("width", str(self.rect[2])) - elm.set("height", str(self.rect[3])) - - for child in self.children: - child.write_xml(elm) - - class CircleNode(ShapeNode): - def __init__(self, id, transform, appearance,settings, rect): - super().__init__(id, transform, appearance,settings) - self.circle=torch.tensor(rect,dtype=torch.float,device=settings.device) - optim_params=settings.retrieve(self.id)[0] - #borrowing path settings for this - if optim_params["paths"]["optimize_points"]: - self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.circle],lr=optim_params["paths"]["shape_lr"])) - - def get_type(self): - return "Circle node" - - def build_scene(self,shapes,shape_groups,transform,appearance): - applytf=self.prop_transform(transform) - applyapp = self.prop_appearance(appearance) - sg=self.make_shape_group(applyapp,applytf,len(shapes),1) - shapes.append(pydiffvg.Circle(self.circle[2],self.circle[0:2],applyapp["stroke-width"],self.id)) - shape_groups.append(sg) - - def write_xml(self, parent): - elm = etree.SubElement(parent, "circle") - self.write_xml_common_attrib(elm) - elm.set("cx",str(self.circle[0])) - elm.set("cy", str(self.circle[1])) - elm.set("r", str(self.circle[2])) - - for child in self.children: - child.write_xml(elm) - - - class EllipseNode(ShapeNode): - def __init__(self, id, transform, appearance,settings, ellipse): - super().__init__(id, transform, appearance,settings) - self.ellipse=torch.tensor(ellipse,dtype=torch.float,device=settings.device) - optim_params=settings.retrieve(self.id)[0] - #borrowing path settings for this - if optim_params["paths"]["optimize_points"]: - self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.ellipse],lr=optim_params["paths"]["shape_lr"])) - - def get_type(self): - return "Ellipse node" - - def build_scene(self,shapes,shape_groups,transform,appearance): - applytf=self.prop_transform(transform) - applyapp = self.prop_appearance(appearance) - sg=self.make_shape_group(applyapp,applytf,len(shapes),1) - shapes.append(pydiffvg.Ellipse(self.ellipse[2:4],self.ellipse[0:2],applyapp["stroke-width"],self.id)) - shape_groups.append(sg) - - def write_xml(self, parent): - elm = etree.SubElement(parent, "ellipse") - self.write_xml_common_attrib(elm) - elm.set("cx", str(self.ellipse[0])) - elm.set("cy", str(self.ellipse[1])) - elm.set("rx", str(self.ellipse[2])) - elm.set("ry", str(self.ellipse[3])) - - for child in self.children: - child.write_xml(elm) - - class PolygonNode(ShapeNode): - def __init__(self, id, transform, appearance,settings, points): - super().__init__(id, transform, appearance,settings) - self.points=points - optim_params=settings.retrieve(self.id)[0] - #borrowing path settings for this - if optim_params["paths"]["optimize_points"]: - self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.points],lr=optim_params["paths"]["shape_lr"])) - - def get_type(self): - return "Polygon node" - - def build_scene(self,shapes,shape_groups,transform,appearance): - applytf=self.prop_transform(transform) - applyapp = self.prop_appearance(appearance) - sg=self.make_shape_group(applyapp,applytf,len(shapes),1) - shapes.append(pydiffvg.Polygon(self.points,True,applyapp["stroke-width"],self.id)) - shape_groups.append(sg) - - def point_string(self): - ret="" - for i in range(self.points.shape[0]): - pt=self.points[i,:] - #assert pt.shape == (1,2) - ret+= str(pt[0])+","+str(pt[1])+" " - return ret - - def write_xml(self, parent): - elm = etree.SubElement(parent, "polygon") - self.write_xml_common_attrib(elm) - elm.set("points",self.point_string()) - - for child in self.children: - child.write_xml(elm) - - class GradientNode(SvgNode): - def __init__(self, id, transform,settings,begin,end,offsets,stops,href): - super().__init__(id, transform, {},settings) - self.optim=OptimizableSvg.GradientOptimizer(begin, end, offsets, stops, settings.retrieve(id)[0]) - self.optimizers.append(self.optim) - self.href=href - - def is_ref(self): - return self.href is not None - - def get_type(self): - return "Gradient node" - - def get_stops(self): - _, _, offsets, stops=self.optim.get_vals() - return offsets, stops - - def get_points(self): - begin, end, _, _ =self.optim.get_vals() - return begin, end - - def write_xml(self, parent): - elm = etree.SubElement(parent, "linearGradient") - self.write_xml_common_attrib(elm,tfname="gradientTransform") - - begin, end, offsets, stops = self.optim.get_vals() - - if self.href is None: - #we have stops - for idx, offset in enumerate(offsets): - stop=etree.SubElement(elm,"stop") - stop.set("offset",str(offset.item())) - stop.set("stop-color",OptimizableSvg.rgb_to_string(stops[idx,0:3])) - stop.set("stop-opacity",str(stops[idx,3].item())) - else: - elm.set('xlink:href', "#{}".format(self.href.id)) - - if begin is not None and end is not None: - #no stops - elm.set('x1', str(begin[0].item())) - elm.set('y1', str(begin[1].item())) - elm.set('x2', str(end[0].item())) - elm.set('y2', str(end[1].item())) - - # magic value to make this work - elm.set("gradientUnits", "userSpaceOnUse") - - for child in self.children: - child.write_xml(elm) - - def getGrad(self,combined_opacity,transform): - if self.is_ref(): - offsets, stops=self.href.get_stops() - else: - offsets, stops=self.get_stops() - - stops=stops.clone() - stops[:,3]*=combined_opacity - - begin,end = self.get_points() - - applytf=self.prop_transform(transform) - begin=OptimizableSvg.TransformTools.transformPoints(begin.unsqueeze(0),applytf).squeeze() - end = OptimizableSvg.TransformTools.transformPoints(end.unsqueeze(0), applytf).squeeze() - - return pydiffvg.LinearGradient(begin, end, offsets, stops) - #endregion - - def __init__(self, filename, settings=SvgOptimizationSettings(),optimize_background=False, verbose=False, device=torch.device("cpu")): - self.settings=settings - self.verbose=verbose - self.device=device - self.settings.device=device - - tree = etree.parse(filename) - root = tree.getroot() - - #in case we need global optimization - self.optimizers=[] - self.background=torch.tensor([1.,1.,1.],dtype=torch.float32,requires_grad=optimize_background,device=self.device) - - if optimize_background: - p=settings.retrieve("default")[0] - self.optimizers.append(OptimizableSvg.ColorOptimizer(self.background,SvgOptimizationSettings.optims[p["optimizer"]],p["color_lr"])) - - self.defs={} - - self.depth=0 - - self.dirty=True - self.scene=None - - self.parseRoot(root) - - recognised_shapes=["path","circle","rect","ellipse","polygon"] - - #region core functionality - def build_scene(self): - if self.dirty: - shape_groups=[] - shapes=[] - self.root.build_scene(shapes,shape_groups,OptimizableSvg.RootNode.get_default_transform().to(self.device),OptimizableSvg.RootNode.get_default_appearance(self.device)) - self.scene=(self.canvas[0],self.canvas[1],shapes,shape_groups) - self.dirty=False - return self.scene - - def zero_grad(self): - self.root.zero_grad() - for optim in self.optimizers: - optim.zero_grad() - for item in self.defs.values(): - if issubclass(item.__class__,OptimizableSvg.SvgNode): - item.zero_grad() - - def render(self,scale=None,seed=0): - #render at native resolution - scene = self.build_scene() - scene_args = pydiffvg.RenderFunction.serialize_scene(*scene) - render = pydiffvg.RenderFunction.apply - out_size=(scene[0],scene[1]) if scale is None else (int(scene[0]*scale),int(scene[1]*scale)) - img = render(out_size[0], # width - out_size[1], # height - 2, # num_samples_x - 2, # num_samples_y - seed, # seed - None, # background_image - *scene_args) - return img - - def step(self): - self.dirty=True - self.root.step() - for optim in self.optimizers: - optim.step() - for item in self.defs.values(): - if issubclass(item.__class__, OptimizableSvg.SvgNode): - item.step() - #endregion - - #region reporting - - def offset_str(self,s): - return ("\t"*self.depth)+s - - def reportSkippedAttribs(self, node, non_skipped=[]): - skipped=set([k for k in node.attrib.keys() if not OptimizableSvg.is_namespace(k)])-set(non_skipped) - if len(skipped)>0: - tag=OptimizableSvg.remove_namespace(node.tag) if "id" not in node.attrib else "{}#{}".format(OptimizableSvg.remove_namespace(node.tag),node.attrib["id"]) - print(self.offset_str("Warning: Skipping the following attributes of node '{}': {}".format(tag,", ".join(["'{}'".format(atr) for atr in skipped])))) - - def reportSkippedChildren(self,node,skipped): - skipped_names=["{}#{}".format(elm.tag,elm.attrib["id"]) if "id" in elm.attrib else elm.tag for elm in skipped] - if len(skipped)>0: - tag = OptimizableSvg.remove_namespace(node.tag) if "id" not in node.attrib else "{}#{}".format(OptimizableSvg.remove_namespace(node.tag), - node.attrib["id"]) - print(self.offset_str("Warning: Skipping the following children of node '{}': {}".format(tag,", ".join(["'{}'".format(name) for name in skipped_names])))) - - #endregion - - #region parsing - @staticmethod - def remove_namespace(s): - """ - {...} ... -> ... - """ - return re.sub('{.*}', '', s) - - @staticmethod - def is_namespace(s): - return re.match('{.*}', s) is not None - - @staticmethod - def parseTransform(node): - if "transform" not in node.attrib and "gradientTransform" not in node.attrib: - return None - - tf_string=node.attrib["transform"] if "transform" in node.attrib else node.attrib["gradientTransform"] - tforms=tf_string.split(")")[:-1] - mat=np.eye(3) - for tform in tforms: - type = tform.split("(")[0] - args = [float(val) for val in re.split("[, ]+",tform.split("(")[1])] - if type == "matrix": - mat=mat @ OptimizableSvg.TransformTools.parse_matrix(args) - elif type == "translate": - mat = mat @ OptimizableSvg.TransformTools.parse_translate(args) - elif type == "rotate": - mat = mat @ OptimizableSvg.TransformTools.parse_rotate(args) - elif type == "scale": - mat = mat @ OptimizableSvg.TransformTools.parse_scale(args) - elif type == "skewX": - mat = mat @ OptimizableSvg.TransformTools.parse_skewx(args) - elif type == "skewY": - mat = mat @ OptimizableSvg.TransformTools.parse_skewy(args) - else: - raise ValueError("Unknown transform type '{}'".format(type)) - return mat - - #dictionary that defines what constant do we need to multiply different units to get the value in pixels - #gleaned from the CSS definition - unit_dict = {"px":1, - "mm":4, - "cm":40, - "in":25.4*4, - "pt":25.4*4/72, - "pc":25.4*4/6 - } - - @staticmethod - def parseLength(s): - #length is a number followed possibly by a unit definition - #we assume that default unit is the pixel (px) equal to 0.25mm - #last two characters might be unit - val=None - for i in range(len(s)): - try: - val=float(s[:len(s)-i]) - unit=s[len(s)-i:] - break - except ValueError: - continue - if len(unit)>0 and unit not in OptimizableSvg.unit_dict: - raise ValueError("Unknown or unsupported unit '{}' encountered while parsing".format(unit)) - if unit != "": - val*=OptimizableSvg.unit_dict[unit] - return val - - @staticmethod - def parseOpacity(s): - is_percent=s.endswith("%") - s=s.rstrip("%") - val=float(s) - if is_percent: - val=val/100 - return np.clip(val,0.,1.) - - @staticmethod - def parse_color(s): - """ - Hex to tuple - """ - if s[0] != '#': - raise ValueError("Color argument `{}` not supported".format(s)) - s = s.lstrip('#') - if len(s)==6: - rgb = tuple(int(s[i:i + 2], 16) for i in (0, 2, 4)) - return torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]) - elif len(s)==3: - rgb = tuple((int(s[i:i + 1], 16)) for i in (0, 1, 2)) - return torch.tensor([rgb[0] / 15.0, rgb[1] / 15.0, rgb[2] / 15.0]) - else: - raise ValueError("Color argument `{}` not supported".format(s)) - # sRGB to RGB - # return torch.pow(torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]), 2.2) - - - @staticmethod - def rgb_to_string(val): - byte_rgb=(val.clone().detach()*255).type(torch.int) - byte_rgb.clamp_(min=0,max=255) - s="#{:02x}{:02x}{:02x}".format(*byte_rgb) - return s - - #parses a "paint" string for use in fill and stroke definitions - @staticmethod - def parsePaint(paintStr,defs,device): - paintStr=paintStr.strip() - if paintStr=="none": - return ("none", None) - elif paintStr[0]=="#": - return ("solid",OptimizableSvg.parse_color(paintStr).to(device)) - elif paintStr.startswith("url"): - url=paintStr.lstrip("url(").rstrip(")").strip("\'\"").lstrip("#") - if url not in defs: - raise ValueError("Paint-type attribute referencing an unknown object with ID '#{}'".format(url)) - return ("url",defs[url]) - else: - raise ValueError("Unrecognized paint string: '{}'".format(paintStr)) - - appearance_keys=["fill","fill-opacity","fill-rule","opacity","stroke","stroke-opacity","stroke-width"] - - @staticmethod - def parseAppearance(node, defs, device): - ret={} - parse_keys = OptimizableSvg.appearance_keys - local_dict={key:value for key,value in node.attrib.items() if key in parse_keys} - css_dict={} - style_dict={} - appearance_dict={} - if "class" in node.attrib: - cls=node.attrib["class"] - if "."+cls in defs: - css_string=defs["."+cls] - css_dict={item.split(":")[0]:item.split(":")[1] for item in css_string.split(";") if len(item)>0 and item.split(":")[0] in parse_keys} - if "style" in node.attrib: - style_string=node.attrib["style"] - style_dict={item.split(":")[0]:item.split(":")[1] for item in style_string.split(";") if len(item)>0 and item.split(":")[0] in parse_keys} - appearance_dict.update(css_dict) - appearance_dict.update(style_dict) - appearance_dict.update(local_dict) - for key,value in appearance_dict.items(): - if key=="fill": - ret[key]=OptimizableSvg.parsePaint(value,defs,device) - elif key == "fill-opacity": - ret[key]=torch.tensor(OptimizableSvg.parseOpacity(value),device=device) - elif key == "fill-rule": - ret[key]=value - elif key == "opacity": - ret[key]=torch.tensor(OptimizableSvg.parseOpacity(value),device=device) - elif key == "stroke": - ret[key]=OptimizableSvg.parsePaint(value,defs,device) - elif key == "stroke-opacity": - ret[key]=torch.tensor(OptimizableSvg.parseOpacity(value),device=device) - elif key == "stroke-width": - ret[key]=torch.tensor(OptimizableSvg.parseLength(value),device=device) - else: - raise ValueError("Error while parsing appearance attributes: key '{}' should not be here".format(key)) - - return ret - - def parseRoot(self,root): - if self.verbose: - print(self.offset_str("Parsing root")) - self.depth += 1 - - # get document canvas dimensions - self.parseViewport(root) - canvmax=np.max(self.canvas) - self.settings.global_override(["transforms","translation_mult"],canvmax) - id=root.attrib["id"] if "id" in root.attrib else None - - transform=OptimizableSvg.parseTransform(root) - appearance=OptimizableSvg.parseAppearance(root,self.defs,self.device) - - version=root.attrib["version"] if "version" in root.attrib else "" - if version != "2.0": - print(self.offset_str("Warning: Version {} is not 2.0, strange things may happen".format(version))) - - self.root=OptimizableSvg.RootNode(id,transform,appearance,self.settings) - - if self.verbose: - self.reportSkippedAttribs(root, ["width", "height", "id", "transform","version", "style"]+OptimizableSvg.appearance_keys) - - #go through the root children and parse them appropriately - skipped=[] - for child in root: - if OptimizableSvg.remove_namespace(child.tag) in OptimizableSvg.recognised_shapes: - self.parseShape(child,self.root) - elif OptimizableSvg.remove_namespace(child.tag) == "defs": - self.parseDefs(child) - elif OptimizableSvg.remove_namespace(child.tag) == "style": - self.parseStyle(child) - elif OptimizableSvg.remove_namespace(child.tag) == "g": - self.parseGroup(child,self.root) - else: - skipped.append(child) - - if self.verbose: - self.reportSkippedChildren(root,skipped) - - self.depth-=1 - - def parseShape(self,shape,parent): - tag=OptimizableSvg.remove_namespace(shape.tag) - if self.verbose: - print(self.offset_str("Parsing {}#{}".format(tag,shape.attrib["id"] if "id" in shape.attrib else ""))) - - self.depth+=1 - if tag == "path": - self.parsePath(shape,parent) - elif tag == "circle": - self.parseCircle(shape,parent) - elif tag == "rect": - self.parseRect(shape,parent) - elif tag == "ellipse": - self.parseEllipse(shape,parent) - elif tag == "polygon": - self.parsePolygon(shape,parent) - else: - raise ValueError("Encountered unknown shape type '{}'".format(tag)) - self.depth -= 1 - - def parsePath(self,shape,parent): - path_string=shape.attrib['d'] - name = '' - if 'id' in shape.attrib: - name = shape.attrib['id'] - paths = pydiffvg.from_svg_path(path_string) - for idx, path in enumerate(paths): - path.stroke_width = torch.tensor([0.],device=self.device) - path.num_control_points=path.num_control_points.to(self.device) - path.points=path.points.to(self.device) - path.source_id = name - path.id = "{}-{}".format(name,idx) if len(paths)>1 else name - transform = OptimizableSvg.parseTransform(shape) - appearance = OptimizableSvg.parseAppearance(shape,self.defs,self.device) - node=OptimizableSvg.PathNode(name,transform,appearance,self.settings,paths) - parent.children.append(node) - - if self.verbose: - self.reportSkippedAttribs(shape, ["id","d","transform","style"]+OptimizableSvg.appearance_keys) - self.reportSkippedChildren(shape,list(shape)) - - def parseEllipse(self, shape, parent): - cx = float(shape.attrib["cx"]) if "cx" in shape.attrib else 0. - cy = float(shape.attrib["cy"]) if "cy" in shape.attrib else 0. - rx = float(shape.attrib["rx"]) - ry = float(shape.attrib["ry"]) - name = '' - if 'id' in shape.attrib: - name = shape.attrib['id'] - transform = OptimizableSvg.parseTransform(shape) - appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device) - node = OptimizableSvg.EllipseNode(name, transform, appearance, self.settings, (cx, cy, rx, ry)) - parent.children.append(node) - - if self.verbose: - self.reportSkippedAttribs(shape, ["id", "x", "y", "r", "transform", - "style"] + OptimizableSvg.appearance_keys) - self.reportSkippedChildren(shape, list(shape)) - - def parsePolygon(self, shape, parent): - points_string = shape.attrib['points'] - name = '' - points=[] - for point_string in points_string.split(" "): - if len(point_string) == 0: - continue - coord_strings=point_string.split(",") - assert len(coord_strings)==2 - points.append([float(coord_strings[0]),float(coord_strings[1])]) - points=torch.tensor(points,dtype=torch.float,device=self.device) - if 'id' in shape.attrib: - name = shape.attrib['id'] - transform = OptimizableSvg.parseTransform(shape) - appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device) - node = OptimizableSvg.PolygonNode(name, transform, appearance, self.settings, points) - parent.children.append(node) - - if self.verbose: - self.reportSkippedAttribs(shape, ["id", "points", "transform", "style"] + OptimizableSvg.appearance_keys) - self.reportSkippedChildren(shape, list(shape)) - - def parseCircle(self,shape,parent): - cx = float(shape.attrib["cx"]) if "cx" in shape.attrib else 0. - cy = float(shape.attrib["cy"]) if "cy" in shape.attrib else 0. - r = float(shape.attrib["r"]) - name = '' - if 'id' in shape.attrib: - name = shape.attrib['id'] - transform = OptimizableSvg.parseTransform(shape) - appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device) - node = OptimizableSvg.CircleNode(name, transform, appearance, self.settings, (cx, cy, r)) - parent.children.append(node) - - if self.verbose: - self.reportSkippedAttribs(shape, ["id", "x", "y", "r", "transform", - "style"] + OptimizableSvg.appearance_keys) - self.reportSkippedChildren(shape, list(shape)) - - def parseRect(self,shape,parent): - x = float(shape.attrib["x"]) if "x" in shape.attrib else 0. - y = float(shape.attrib["y"]) if "y" in shape.attrib else 0. - width = float(shape.attrib["width"]) - height = float(shape.attrib["height"]) - name = '' - if 'id' in shape.attrib: - name = shape.attrib['id'] - transform = OptimizableSvg.parseTransform(shape) - appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device) - node = OptimizableSvg.RectNode(name, transform, appearance, self.settings, (x,y,width,height)) - parent.children.append(node) - - if self.verbose: - self.reportSkippedAttribs(shape, ["id", "x", "y", "width", "height", "transform", "style"] + OptimizableSvg.appearance_keys) - self.reportSkippedChildren(shape, list(shape)) - - def parseGroup(self,group,parent): - tag = OptimizableSvg.remove_namespace(group.tag) - id = group.attrib["id"] if "id" in group.attrib else "" - if self.verbose: - print(self.offset_str("Parsing {}#{}".format(tag, id))) - - self.depth+=1 - - transform=self.parseTransform(group) - - #todo process more attributes - appearance=OptimizableSvg.parseAppearance(group,self.defs,self.device) - node=OptimizableSvg.GroupNode(id,transform,appearance,self.settings) - parent.children.append(node) - - if self.verbose: - self.reportSkippedAttribs(group,["id","transform","style"]+OptimizableSvg.appearance_keys) - - skipped_children=[] - for child in group: - if OptimizableSvg.remove_namespace(child.tag) in OptimizableSvg.recognised_shapes: - self.parseShape(child,node) - elif OptimizableSvg.remove_namespace(child.tag) == "defs": - self.parseDefs(child) - elif OptimizableSvg.remove_namespace(child.tag) == "style": - self.parseStyle(child) - elif OptimizableSvg.remove_namespace(child.tag) == "g": - self.parseGroup(child,node) - else: - skipped_children.append(child) - - if self.verbose: - self.reportSkippedChildren(group,skipped_children) - - self.depth-=1 - - def parseStyle(self,style_node): - tag = OptimizableSvg.remove_namespace(style_node.tag) - id = style_node.attrib["id"] if "id" in style_node.attrib else "" - if self.verbose: - print(self.offset_str("Parsing {}#{}".format(tag, id))) - - if style_node.attrib["type"] != "text/css": - raise ValueError("Only text/css style recognized, got {}".format(style_node.attrib["type"])) - - self.depth += 1 - - # creating only a dummy node - node = OptimizableSvg.SvgNode(id, None, {}, self.settings) - - if self.verbose: - self.reportSkippedAttribs(def_node, ["id"]) - - if len(style_node)>0: - raise ValueError("Style node should not have children (has {})".format(len(style_node))) - - # collect CSS classes - sheet = cssutils.parseString(style_node.text) - for rule in sheet: - if hasattr(rule, 'selectorText') and hasattr(rule, 'style'): - name = rule.selectorText - if len(name) >= 2 and name[0] == '.': - self.defs[name] = rule.style.getCssText().replace("\n","") - else: - raise ValueError("Unrecognized CSS selector {}".format(name)) - else: - raise ValueError("No style or selector text in CSS rule") - - if self.verbose: - self.reportSkippedChildren(def_node, skipped_children) - - self.depth -= 1 - - def parseDefs(self,def_node): - #only linear gradients are currently supported - tag = OptimizableSvg.remove_namespace(def_node.tag) - id = def_node.attrib["id"] if "id" in def_node.attrib else "" - if self.verbose: - print(self.offset_str("Parsing {}#{}".format(tag, id))) - - self.depth += 1 - - - # creating only a dummy node - node = OptimizableSvg.SvgNode(id, None, {},self.settings) - - if self.verbose: - self.reportSkippedAttribs(def_node, ["id"]) - - skipped_children = [] - for child in def_node: - if OptimizableSvg.remove_namespace(child.tag) == "linearGradient": - self.parseGradient(child,node) - elif OptimizableSvg.remove_namespace(child.tag) in OptimizableSvg.recognised_shapes: - raise NotImplementedError("Definition/instantiation of shapes not supported") - elif OptimizableSvg.remove_namespace(child.tag) == "defs": - raise NotImplementedError("Definition within definition not supported") - elif OptimizableSvg.remove_namespace(child.tag) == "g": - raise NotImplementedError("Groups within definition not supported") - else: - skipped_children.append(child) - - if len(node.children)>0: - #take this node out and enter it into defs - self.defs[node.children[0].id]=node.children[0] - node.children.pop() - - - if self.verbose: - self.reportSkippedChildren(def_node, skipped_children) - - self.depth -= 1 - - def parseGradientStop(self,stop): - param_dict={key:value for key,value in stop.attrib.items() if key in ["id","offset","stop-color","stop-opacity"]} - style_dict={} - if "style" in stop.attrib: - style_dict={item.split(":")[0]:item.split(":")[1] for item in stop.attrib["style"].split(";") if len(item)>0} - param_dict.update(style_dict) - - offset=OptimizableSvg.parseOpacity(param_dict["offset"]) - color=OptimizableSvg.parse_color(param_dict["stop-color"]) - opacity=OptimizableSvg.parseOpacity(param_dict["stop-opacity"]) if "stop-opacity" in param_dict else 1. - - return offset, color, opacity - - def parseGradient(self, gradient_node, parent): - tag = OptimizableSvg.remove_namespace(gradient_node.tag) - id = gradient_node.attrib["id"] if "id" in gradient_node.attrib else "" - if self.verbose: - print(self.offset_str("Parsing {}#{}".format(tag, id))) - - self.depth += 1 - if "stop" not in [OptimizableSvg.remove_namespace(child.tag) for child in gradient_node]\ - and "href" not in [OptimizableSvg.remove_namespace(key) for key in gradient_node.attrib.keys()]: - raise ValueError("Gradient {} has neither stops nor a href link to them".format(id)) - - transform=self.parseTransform(gradient_node) - begin=None - end = None - offsets=[] - stops=[] - href=None - - if "x1" in gradient_node.attrib or "y1" in gradient_node.attrib: - begin=np.array([0.,0.]) - if "x1" in gradient_node.attrib: - begin[0] = float(gradient_node.attrib["x1"]) - if "y1" in gradient_node.attrib: - begin[1] = float(gradient_node.attrib["y1"]) - begin = torch.tensor(begin.transpose(),dtype=torch.float32) - - if "x2" in gradient_node.attrib or "y2" in gradient_node.attrib: - end=np.array([0.,0.]) - if "x2" in gradient_node.attrib: - end[0] = float(gradient_node.attrib["x2"]) - if "y2" in gradient_node.attrib: - end[1] = float(gradient_node.attrib["y2"]) - end=torch.tensor(end.transpose(),dtype=torch.float32) - - stop_nodes=[node for node in list(gradient_node) if OptimizableSvg.remove_namespace(node.tag)=="stop"] - if len(stop_nodes)>0: - stop_nodes=sorted(stop_nodes,key=lambda n: float(n.attrib["offset"])) - - for stop in stop_nodes: - offset, color, opacity = self.parseGradientStop(stop) - offsets.append(offset) - stops.append(np.concatenate((color,np.array([opacity])))) - - hkey=next((value for key,value in gradient_node.attrib.items() if OptimizableSvg.remove_namespace(key)=="href"),None) - if hkey is not None: - href=self.defs[hkey.lstrip("#")] - - parent.children.append(OptimizableSvg.GradientNode(id,transform,self.settings,begin.to(self.device) if begin is not None else begin,end.to(self.device) if end is not None else end,torch.tensor(offsets,dtype=torch.float32,device=self.device) if len(offsets)>0 else None,torch.tensor(np.array(stops),dtype=torch.float32,device=self.device) if len(stops)>0 else None,href)) - - self.depth -= 1 - - def parseViewport(self, root): - if "width" in root.attrib and "height" in root.attrib: - self.canvas = np.array([int(math.ceil(float(root.attrib["width"]))), int(math.ceil(float(root.attrib["height"])))]) - elif "viewBox" in root.attrib: - s=root.attrib["viewBox"].split(" ") - w=s[2] - h=s[3] - self.canvas = np.array( - [int(math.ceil(float(w))), int(math.ceil(float(h)))]) - else: - raise ValueError("Size information is missing from document definition") - #endregion - - #region writing - def write_xml(self): - tree=self.root.write_xml(self) - - return minidom.parseString(etree.tostring(tree, 'utf-8')).toprettyxml(indent=" ") - - def write_defs(self,root): - if len(self.defs)==0: - return - - defnode = etree.SubElement(root, 'defs') - stylenode = etree.SubElement(root,'style') - stylenode.set('type','text/css') - stylenode.text="" - - defcpy=copy.copy(self.defs) - while len(defcpy)>0: - torem=[] - for key,value in defcpy.items(): - if issubclass(value.__class__,OptimizableSvg.SvgNode): - if value.href is None or value.href not in defcpy: - value.write_xml(defnode) - torem.append(key) - else: - continue - else: - #this is a string, and hence a CSS attribute - stylenode.text+=key+" {"+value+"}\n" - torem.append(key) - - for key in torem: - del defcpy[key] - #endregion - - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/partition.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/partition.h deleted file mode 100644 index fdd158c4cc13ebb81f6fc407276aeada8d1201c5..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/partition.h +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file partition.h - * \brief Generic implementations of partition functions. - */ - -#pragma once - -#include -#include - -namespace thrust -{ -namespace system -{ -namespace detail -{ -namespace generic -{ - - -template -__host__ __device__ - ForwardIterator stable_partition(thrust::execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - Predicate pred); - -template -__host__ __device__ - ForwardIterator stable_partition(thrust::execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - InputIterator stencil, - Predicate pred); - - -template -__host__ __device__ - thrust::pair - stable_partition_copy(thrust::execution_policy &exec, - InputIterator first, - InputIterator last, - OutputIterator1 out_true, - OutputIterator2 out_false, - Predicate pred); - - -template -__host__ __device__ - thrust::pair - stable_partition_copy(thrust::execution_policy &exec, - InputIterator1 first, - InputIterator1 last, - InputIterator2 stencil, - OutputIterator1 out_true, - OutputIterator2 out_false, - Predicate pred); - - -template -__host__ __device__ - ForwardIterator partition(thrust::execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - Predicate pred); - - -template -__host__ __device__ - ForwardIterator partition(thrust::execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - InputIterator stencil, - Predicate pred); - - -template -__host__ __device__ - thrust::pair - partition_copy(thrust::execution_policy &exec, - InputIterator first, - InputIterator last, - OutputIterator1 out_true, - OutputIterator2 out_false, - Predicate pred); - - -template -__host__ __device__ - thrust::pair - partition_copy(thrust::execution_policy &exec, - InputIterator1 first, - InputIterator1 last, - InputIterator2 stencil, - OutputIterator1 out_true, - OutputIterator2 out_false, - Predicate pred); - - -template -__host__ __device__ - ForwardIterator partition_point(thrust::execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - Predicate pred); - - -template -__host__ __device__ - bool is_partitioned(thrust::execution_policy &exec, - InputIterator first, - InputIterator last, - Predicate pred); - - -} // end namespace generic -} // end namespace detail -} // end namespace system -} // end namespace thrust - -#include - diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/build_sam.py b/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/build_sam.py deleted file mode 100644 index e41e064e8dfd11131e833c7a1468a38bc6ea6764..0000000000000000000000000000000000000000 --- a/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/build_sam.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from functools import partial - -from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer - - -def build_sam_vit_h(checkpoint=None): - print('build_sam_vit_h') - return _build_sam( - encoder_embed_dim=1280, - encoder_depth=32, - encoder_num_heads=16, - encoder_global_attn_indexes=[7, 15, 23, 31], - checkpoint=checkpoint, - ) - - -def build_sam_vit_l(checkpoint=None): - print('build_sam_vit_l') - return _build_sam( - encoder_embed_dim=1024, - encoder_depth=24, - encoder_num_heads=16, - encoder_global_attn_indexes=[5, 11, 17, 23], - checkpoint=checkpoint, - ) - - -def build_sam_vit_b(checkpoint=None): - print('build_sam_vit_b') - return _build_sam( - encoder_embed_dim=768, - encoder_depth=12, - encoder_num_heads=12, - encoder_global_attn_indexes=[2, 5, 8, 11], - checkpoint=checkpoint, - ) - -build_sam = build_sam_vit_h - - - -sam_model_registry = { - "default": build_sam, - "vit_h": build_sam_vit_h, - "vit_l": build_sam_vit_l, - "vit_b": build_sam_vit_b, -} - - -def _build_sam( - encoder_embed_dim, - encoder_depth, - encoder_num_heads, - encoder_global_attn_indexes, - checkpoint=None, -): - prompt_embed_dim = 256 - image_size = 1024 - vit_patch_size = 16 - image_embedding_size = image_size // vit_patch_size - sam = Sam( - image_encoder=ImageEncoderViT( - depth=encoder_depth, - embed_dim=encoder_embed_dim, - img_size=image_size, - mlp_ratio=4, - norm_layer=partial(torch.nn.LayerNorm, eps=1e-6), - num_heads=encoder_num_heads, - patch_size=vit_patch_size, - qkv_bias=True, - use_rel_pos=True, - global_attn_indexes=encoder_global_attn_indexes, - window_size=14, - out_chans=prompt_embed_dim, - ), - prompt_encoder=PromptEncoder( - embed_dim=prompt_embed_dim, - image_embedding_size=(image_embedding_size, image_embedding_size), - input_image_size=(image_size, image_size), - mask_in_chans=16, - ), - mask_decoder=MaskDecoder( - num_multimask_outputs=3, - transformer=TwoWayTransformer( - depth=2, - embedding_dim=prompt_embed_dim, - mlp_dim=2048, - num_heads=8, - ), - transformer_dim=prompt_embed_dim, - iou_head_depth=3, - iou_head_hidden_dim=256, - ), - pixel_mean=[123.675, 116.28, 103.53], - pixel_std=[58.395, 57.12, 57.375], - ) - sam.eval() - if checkpoint is not None: - with open(checkpoint, "rb") as f: - state_dict = torch.load(f) - sam.load_state_dict(state_dict) - return sam diff --git a/spaces/CarlDennis/HYTTS/app.py b/spaces/CarlDennis/HYTTS/app.py deleted file mode 100644 index de5919c67a7e3dfc8d1b7afac4d8986c29f43210..0000000000000000000000000000000000000000 --- a/spaces/CarlDennis/HYTTS/app.py +++ /dev/null @@ -1,131 +0,0 @@ -import re -import time -import gradio as gr -import torch -import commons -import utils -from models import SynthesizerTrn -from text import text_to_sequence - -config_json = "configs//multi.json" -pth_path = "model//G=728.pth" - -lan = ["中文", "日文", "英文", "德语", "克罗地亚语"] - - -def get_text(text, hps, cleaned=False): - if cleaned: - text_norm = text_to_sequence(text, hps.symbols, []) - else: - text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - - -def get_label(text, label): - if f'[{label}]' in text: - return True, text.replace(f'[{label}]', '') - else: - return False, text - - -def sle(language, tts_input0): - if language == "中文": - tts_input1 = "[ZH]" + tts_input0.replace('\n', '。') + "[ZH]" - return tts_input1 - if language == "英文": - tts_input1 = "[EN]" + tts_input0.replace('\n', '.') + "[EN]" - return tts_input1 - elif language == "日文": - tts_input1 = "[JA]" + tts_input0.replace('\n', '。') + "[JA]" - return tts_input1 - elif language == "德语": - tts_input1 = "[DE]" + tts_input0.replace('\n', '.') + "[DE]" - return tts_input1 - elif language == "克罗地亚语": - tts_input1 = "[CR]" + tts_input0.replace('\n', '.') + "[CR]" - return tts_input1 - - -def load_model(config_json, pth_path): - dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - hps_ms = utils.get_hparams_from_file(f"{config_json}") - n_speakers = hps_ms.data.n_speakers if 'n_speakers' in hps_ms.data.keys() else 0 - n_symbols = len(hps_ms.symbols) if 'symbols' in hps_ms.keys() else 0 - net_g_ms = SynthesizerTrn( - n_symbols, - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=n_speakers, - **hps_ms.model).to(dev) - _ = net_g_ms.eval() - _ = utils.load_checkpoint(pth_path, net_g_ms) - return net_g_ms - - -net_g_ms = load_model(config_json, pth_path) - - -def infer(language, text, speaker_id, n_scale=0.667, n_scale_w=0.8, l_scale=1): - hps_ms = utils.get_hparams_from_file(f"{config_json}") - stn_tst = get_text(sle(language, text), hps_ms) - speaker_id = int(i_dict[speaker_id]) - dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0).to(dev) - t1 = time.time() - x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).to(dev) - sid = torch.LongTensor([speaker_id]).to(dev) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=n_scale, noise_scale_w=n_scale_w, - length_scale=l_scale)[0][ - 0, 0].data.cpu().float().numpy() - t2 = time.time() - spending_time = "推理时间:" + str(t2 - t1) + "s" - print(spending_time) - return (hps_ms.data.sampling_rate, audio) - - -i_dict = { - "ことり(JAP)": 1, - "うみ(JAP)": 0, - "えり(JAP)": 6, - "小文(CHN)": 9, - "小菊(CHN)": 10, - "小标(CHN)": 11, - "Helena(HRV)": 14, - "Erika(DEU)": 19, - "Diana(ENG)": 26, - "Michelle(ENG)": 30, -} - -idols = [ - "ことり(JAP)", - "うみ(JAP)", - "えり(JAP)", - "小文(CHN)", - "小菊(CHN)", - "小标(CHN)", - "Helena(HRV)", - "Erika(DEU)", - "Diana(ENG)", - "Michelle(ENG)" -] - -app = gr.Blocks() -with app: - with gr.Tabs(): - with gr.TabItem("幻音文字转语音"): - tts_input1 = gr.TextArea(label="支持英语、日语、德语、中文、克罗地亚语", value="大家好") - language = gr.Dropdown(label="选择语言", choices=lan, value="中文", interactive=True) - para_input1 = gr.Slider(minimum=0.01, maximum=1.0, label="更改噪声比例", value=0.667) - para_input2 = gr.Slider(minimum=0.01, maximum=1.0, label="更改噪声偏差", value=0.8) - para_input3 = gr.Slider(minimum=0.1, maximum=10, label="更改时间比例", value=1) - tts_submit = gr.Button("Generate", variant="primary") - speaker1 = gr.Dropdown(label="选择说话人", choices=idols, value="小文(CHN)", interactive=True) - tts_output2 = gr.Audio(label="Output") - - tts_submit.click(infer, [language, tts_input1, speaker1, para_input1, para_input2, para_input3], - [tts_output2]) - app.launch() diff --git a/spaces/CarlDennis/Lovelive-VITS-JPZH/text/__init__.py b/spaces/CarlDennis/Lovelive-VITS-JPZH/text/__init__.py deleted file mode 100644 index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000 --- a/spaces/CarlDennis/Lovelive-VITS-JPZH/text/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/word_eval.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/word_eval.py deleted file mode 100644 index 462875e4d54adf792039b2eac4b83da0c0d07423..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/word_eval.py +++ /dev/null @@ -1,782 +0,0 @@ -import logging -import tempfile -import os -import torch -from collections import OrderedDict -import itertools -from tqdm import tqdm -from .util import io_ -from maskrcnn_benchmark.modeling.roi_heads.boundary_head.inference import Masker -from maskrcnn_benchmark.structures.bounding_box import BoxList -from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou - -from maskrcnn_benchmark.config import cfg -from shapely.geometry import * -import cv2 -import numpy as np -import csv -from .alfashape import getAlfaShapes -import torch.nn as nn - - -def do_coco_evaluation( - dataset, - predictions, - box_only, # False - output_folder, - iou_types, # 'segm' - expected_results, # [] - expected_results_sigma_tol, # 4 -): - logger = logging.getLogger("maskrcnn_benchmark.inference") - - if box_only: - logger.info("Evaluating bbox proposals") - areas = {"all": "", "small": "s", "medium": "m", "large": "l"} - res = COCOResults("box_proposal") - for limit in [100, 1000]: - for area, suffix in areas.items(): - stats = evaluate_box_proposals( - predictions, dataset, area=area, limit=limit - ) - key = "AR{}@{:d}".format(suffix, limit) - res.results["box_proposal"][key] = stats["ar"].item() - logger.info(res) - check_expected_results(res, expected_results, expected_results_sigma_tol) - if output_folder: - torch.save(res, os.path.join(output_folder, "box_proposals.pth")) - return - logger.info("Preparing results for COCO format") - coco_results = {} - if "bbox" in iou_types: - logger.info("Preparing bbox results") - coco_results["bbox"] = prepare_for_coco_detection(predictions, dataset) - if "bo" in iou_types: - logger.info("Preparing bo results") - coco_results["bo"] = prepare_for_boundary_segmentation(predictions, dataset) - logger.info("Do not apply evaluating predictions") - for iou_type in iou_types: - with tempfile.NamedTemporaryFile() as f: - file_path = f.name - if output_folder: - if not os.path.isdir(output_folder): - print('creating dir: ' + output_folder) - os.mkdir(output_folder) - file_path = os.path.join(output_folder, iou_type + ".json") - res = evaluate_predictions_on_coco( - dataset.coco, coco_results[iou_type], file_path, iou_type - ) - - return None - - -def prepare_for_coco_detection(predictions, dataset): - # assert isinstance(dataset, COCODataset) - coco_results = [] - for image_id, prediction in enumerate(predictions): - original_id = dataset.id_to_img_map[image_id] - if len(prediction) == 0: - continue - - # TODO replace with get_img_info? - image_width = dataset.coco.imgs[original_id]["width"] - image_height = dataset.coco.imgs[original_id]["height"] - prediction = prediction.resize((image_width, image_height)) - prediction = prediction.convert("xywh") - - boxes = prediction.bbox.tolist() - scores = prediction.get_field("scores").tolist() - labels = prediction.get_field("labels").tolist() - - mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels] - - coco_results.extend( - [ - { - "image_id": original_id, - "category_id": mapped_labels[k], - "bbox": box, - "score": scores[k], - } - for k, box in enumerate(boxes) - ] - ) - return coco_results - - -def contour_to_xys(cnt, image_shape): - """Convert rect to xys, i.e., eight points - The `image_shape` is used to to make sure all points return are valid, i.e., within image area - """ - rect = cv2.minAreaRect(cnt) - h, w = image_shape[0:2] - - def get_valid_x(x): - if x < 0: - return 0 - if x >= w: - return w - 1 - return x - - def get_valid_y(y): - if y < 0: - return 0 - if y >= h: - return h - 1 - return y - - points = cv2.boxPoints(rect) - points = np.int0(points) - for i_xy, (x, y) in enumerate(points): - x = get_valid_x(x) - y = get_valid_y(y) - points[i_xy, :] = [x, y] - points = np.reshape(points, -1) - return points - - -def contour_to_valid(cnt, image_shape): - """Convert rect to xys, i.e., eight points - The `image_shape` is used to to make sure all points return are valid, i.e., within image area - """ - # rect = cv2.minAreaRect(cnt) - if len(cnt.shape) != 3: - assert 1 < 0 - rect = cnt.reshape([cnt.shape[0], cnt.shape[2]]) - h, w = image_shape[0:2] - - def get_valid_x(x): - if x < 0: - return 0 - if x >= w: - return w - 1 - return x - - def get_valid_y(y): - if y < 0: - return 0 - if y >= h: - return h - 1 - return y - for i_xy, (x, y) in enumerate(rect): - x = get_valid_x(x) - y = get_valid_y(y) - rect[i_xy, :] = [x, y] - - points = np.reshape(rect, -1) - return points - - -def _nms_y(heat, kernel=3): - pad = (kernel - 1) // 2 - hmax = nn.functional.max_pool2d( - heat, (1, kernel), stride=1, padding=(0, pad)) - keep = (hmax == heat).float() - return heat * keep - - -def _nms_x(heat, kernel=3): - pad = (kernel - 1) // 2 - hmax = nn.functional.max_pool2d( - heat, (kernel, 1), stride=1, padding=(pad, 0)) - keep = (hmax == heat).float() - return heat * keep - -def CTW_order_lr(map_in): - - line_out_l2r = [] - line_out_r2l = [] - - map_in = torch.tensor(map_in) - value, top = torch.topk(map_in, 2, dim=0) - value = value.numpy() - top = top.numpy() - top_th = np.where(value[1] > 0.1)[0] # L - # print(top_th) - if len(top_th) == 0: - return [] - top1 = np.sort(top, axis=0) - for i in range(len(top_th)): - line_out_l2r.append([top_th[i], top1[0][top_th[i]]]) - line_out_r2l.append([top_th[i], top1[1][top_th[i]]]) - line_out = line_out_l2r+line_out_r2l[::-1] - # print(line_out) - return line_out - -def CTW_order_bt(map_in): - - line_out_t2b = [] - line_out_b2t = [] - - map_in = torch.tensor(map_in) - value, top = torch.topk(map_in, 2, dim=1) - value = value.numpy() - top = top.numpy() - top_th = np.where(value[:, 1] > 0.1)[0] # H - if len(top_th) == 0: - return [] - top1 = np.sort(top, axis=1) - for i in range(len(top_th)): - line_out_b2t.append([top1[top_th[i]][0], top_th[i]]) - line_out_t2b.append([top1[top_th[i]][1], top_th[i]]) - line_out = line_out_b2t[::-1] + line_out_t2b - # print(line_out) - return line_out - -def boundary_to_mask_ic(bo_x, bo_y, name, num): - - # NMS Hmap and Vmap - Vmap = _nms_x(bo_x, kernel=5) - Hmap = _nms_y(bo_y, kernel=3) - Vmap = Vmap[0] - Hmap = Hmap[0] - ploys_Alfa_x = Vmap.clone().numpy() - ploys_Alfa_y = Hmap.clone().numpy() - - # Threshold Hmap and Vmap - thresh = 0.5 - ploys_Alfa_x[ploys_Alfa_x < thresh] = 0 - ploys_Alfa_x[ploys_Alfa_x >= thresh] = 1 - ploys_Alfa_y[ploys_Alfa_y < thresh] = 0 - ploys_Alfa_y[ploys_Alfa_y >= thresh] = 1 - # Output points with strong texture inforamtion in both maps - ploys_Alfa = ploys_Alfa_x + ploys_Alfa_y - ploys_Alfa[ploys_Alfa < 2] = 0 - ploys_Alfa[ploys_Alfa == 2] = 1 - img_draw = np.zeros([ploys_Alfa_y.shape[-1], ploys_Alfa_y.shape[-1]], dtype=np.uint8) - - # calculate polygon by Alpha-Shape Algorithm - if ploys_Alfa.sum() == 0: - return img_draw - ploys_Alfa_inds = np.argwhere(ploys_Alfa == 1) - zero_detect_x = ploys_Alfa_inds[:, 0] - ploys_Alfa_inds[0, 0] - zero_detect_y = ploys_Alfa_inds[:, 1] - ploys_Alfa_inds[0, 1] - if np.where(zero_detect_x != 0)[0].shape[0] == 0 or np.where(zero_detect_y != 0)[0].shape[0] == 0 or \ - ploys_Alfa_inds.shape[0] < 4: - draw_line = ploys_Alfa_inds[np.newaxis, np.newaxis, :, :] - cv2.fillPoly(img_draw, draw_line, 1) - return img_draw - ploys_Alfa_inds = ploys_Alfa_inds.tolist() - ploys_Alfa_inds = [tuple(ploys_Alfa_ind) for ploys_Alfa_ind in ploys_Alfa_inds] - lines = getAlfaShapes(ploys_Alfa_inds, alfas=[1]) - draw_line = np.array(lines) - if len(draw_line.shape) == 4: - if draw_line.shape[1] == 1: - draw_line[0, 0, :, :] = draw_line[0, 0, :, ::-1] - cv2.fillPoly(img_draw, draw_line, 1) - else: - i_draw = 0 - for draw_l in draw_line[0]: - img_draw_new = np.zeros([28, 28], dtype=np.uint8) - draw_l = draw_l[np.newaxis, np.newaxis, :, :] - cv2.fillPoly(img_draw, np.int32(draw_l), 1) - cv2.fillPoly(img_draw_new, np.int32(draw_l), 1) - i_draw += 1 - - else: - for i, line in enumerate(lines[0]): - draw_line = np.array(line) - draw_line = draw_line[np.newaxis, np.newaxis, :, :] - draw_line[0, 0, :, :] = draw_line[0, 0, :, ::-1] - cv2.fillPoly(img_draw, draw_line, 1) - return img_draw - -def boundary_to_mask_ctw(bo_x,bo_y, name, num, image_name_name,p_temp_box): - w_half = (p_temp_box[2] - p_temp_box[0]) * .5 - h_half = (p_temp_box[3] - p_temp_box[1]) * .5 - thresh_total = 0.5 - - if w_half >= h_half: - # point re-scoring - bo_x = _nms_x(bo_x, kernel=9) - bo_x = bo_x[0] - bo_y = bo_y[0] - ploys_Alfa_x = bo_x.clone().numpy() - ploys_Alfa_y = bo_y.clone().numpy() - thresh_x = thresh_total - thresh_y = thresh_total - ploys_Alfa_x_1 = bo_x.clone().numpy() - ploys_Alfa_y_1 = bo_y.clone().numpy() - ploys_Alfa__1 = ploys_Alfa_x_1 + ploys_Alfa_y_1 - ploys_Alfa_x[ploys_Alfa_x < thresh_x] = 0 - ploys_Alfa_x[ploys_Alfa_x >= thresh_x] = 1 - ploys_Alfa_y[ploys_Alfa_y < thresh_y] = 0 - ploys_Alfa_y[ploys_Alfa_y >= thresh_y] = 1 - ploys_Alfa = ploys_Alfa_x + ploys_Alfa_y - ploys_Alfa[ploys_Alfa < 2] = 0 - ploys_Alfa[ploys_Alfa == 2] = 1 - ploys_Alfa *= ploys_Alfa__1 - # rebuild text region from contour points - img_draw = np.zeros([ploys_Alfa_y.shape[-1], ploys_Alfa_y.shape[-1]], dtype=np.uint8) - if ploys_Alfa.sum() == 0: - return img_draw - lines = CTW_order_lr(ploys_Alfa) - else: - bo_y = _nms_y(bo_y,kernel=9) - bo_x = bo_x[0] - bo_y = bo_y[0] - ploys_Alfa_x = bo_x.clone().numpy() - ploys_Alfa_y = bo_y.clone().numpy() - thresh_x = thresh_total - thresh_y = thresh_total - ploys_Alfa_x_1 = bo_x.clone().numpy() - ploys_Alfa_y_1 = bo_y.clone().numpy() - ploys_Alfa__1 = ploys_Alfa_x_1 + ploys_Alfa_y_1 - ploys_Alfa_x[ploys_Alfa_x < thresh_x] = 0 - ploys_Alfa_x[ploys_Alfa_x >= thresh_x] = 1 - ploys_Alfa_y[ploys_Alfa_y < thresh_y] = 0 - ploys_Alfa_y[ploys_Alfa_y >= thresh_y] = 1 - ploys_Alfa = ploys_Alfa_x + ploys_Alfa_y - ploys_Alfa[ploys_Alfa < 2] = 0 - ploys_Alfa[ploys_Alfa == 2] = 1 - ploys_Alfa *= ploys_Alfa__1 - img_draw = np.zeros([ploys_Alfa_y.shape[-1], ploys_Alfa_y.shape[-1]], dtype=np.uint8) - if ploys_Alfa.sum() == 0: - return img_draw - lines = CTW_order_bt(ploys_Alfa) - if len(lines) <=10: - return img_draw - draw_line = np.array(lines) - draw_line = draw_line[np.newaxis, np.newaxis, :, :] - cv2.fillPoly(img_draw, draw_line, 1) - img_draw = img_draw.astype(np.uint8) - kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) - img_draw = cv2.morphologyEx(img_draw, cv2.MORPH_CLOSE, kernel) - return img_draw - -def mask_to_roRect(mask, img_shape): - ## convert mask into rotated rect - e = mask[0, :, :] - _, countours, hier = cv2.findContours(e.clone().numpy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # Aarlog - if len(countours) == 0: - return np.zeros((1, 8)) - t_c = countours[0].copy() - quad = contour_to_xys(t_c, img_shape) - return quad - - -def mask_to_contours(mask, img_shape): - e = mask[0, :, :] - - _, countours, hier = cv2.findContours(e.clone().numpy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # Aarlog - - if len(countours) == 0: - return np.zeros((1, 8)) - t_c = countours[0].copy() - quad = contour_to_valid(t_c, img_shape) - return quad - - -def write_result_as_txt(image_name, bboxes, path): - if not os.path.exists(path): - os.makedirs(path) - - filename = io_.join_path(path, '%s.txt' % (image_name)) - lines = [] - for b_idx, bbox in enumerate(bboxes): - if len(bbox) < 6: - continue - values = [int(v) for v in bbox] - # line = "%d, %d, %d, %d, %d, %d, %d, %d\n"%tuple(values) - line = "%d" % values[0] - for v_id in range(1, len(values)): - line += ", %d" % values[v_id] - line += '\n' - lines.append(line) - io_.write_lines(filename, lines) - - -def prepare_for_boundary_segmentation(predictions, dataset): - import pycocotools.mask as mask_util - import numpy as np - - masker = Masker(threshold=0.5, padding=1) - coco_results = [] - - for image_id, prediction in tqdm(enumerate(predictions)): - original_id = dataset.id_to_img_map[image_id] - image_name = dataset.coco.imgs[original_id]["file_name"].split('.')[0] - im_w_name = dataset.coco.imgs[original_id]["file_name"] - if len(prediction) == 0: - continue - - # TODO replace with get_img_info? - image_width = dataset.coco.imgs[original_id]["width"] - image_height = dataset.coco.imgs[original_id]["height"] - prediction = prediction.resize((image_width, image_height)) - masks_x = prediction.get_field("mask_x") - masks_y = prediction.get_field("mask_y") - - if 'ic15' in cfg.DATASETS.TEST[0]: - masks = [boundary_to_mask_ic(mask_x, mask_y, dataset.coco.imgs[original_id]["file_name"], number) for - mask_x, mask_y, number in zip(masks_x, masks_y,list(range(masks_x.shape[0])))] - elif 'CTW' in cfg.DATASETS.TEST[0]: - masks = [boundary_to_mask_ctw(mask_x, mask_y, dataset.coco.imgs[original_id]["file_name"], number, image_name, - p_temp) for - mask_x, mask_y, number, p_temp in zip(masks_x, masks_y, - list(range(masks_x.shape[0])), prediction.bbox)] - else: - print('Please add your own construction code!') - assert 1<0 - - masks = torch.from_numpy(np.array(masks)[:, np.newaxis, :, :]) - # Masker is necessary only if masks haven't been already resized. - if list(masks.shape[-2:]) != [image_height, image_width]: - masks = masker(masks.expand(1, -1, -1, -1, -1), prediction) - masks = masks[0] - - scores = prediction.get_field("scores").tolist() - labels = prediction.get_field("labels").tolist() - if 'ic15' in cfg.DATASETS.TEST[0]: - rects = [mask_to_roRect(mask, [image_height, image_width]) for mask in masks] - if 'CTW' in cfg.DATASETS.TEST[0]: - contours = [mask_to_contours(mask, [image_height, image_width]) for mask in masks] - # output for evaluation - write_result_as_txt(image_name, contours, './output/ctw/results.txt') - # visualization - if cfg.DATASETS.Test_Visual: - im_write = cv2.imread( - '../ct/dataset/ctw/ctw_test_images/' + im_w_name)[:, :,::-1] - for box in contours: - box = np.array(box) - box = np.around(box).astype(np.int32) - cv2.polylines(im_write[:, :, ::-1], [box.astype(np.int32).reshape((-1, 1, 2))], True, color=(0, 255, 0), thickness=2) # 0,255,255 y 0,255,0 g - cv2.imwrite('./det_visual/' + im_w_name,im_write[:, :, ::-1]) - - if 'ic15' in cfg.DATASETS.TEST[0]: - mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels] - esd = [] - for k, rect in enumerate(rects): - if rect.all() == 0: - continue - else: - esd.append( - { - "image_id": original_id, - "category_id": mapped_labels[k], - "seg_rorect": rect.tolist(), - "score": scores[k], - } - ) - if cfg.PROCESS.PNMS: - pnms_thresh = cfg.PROCESS.NMS_THRESH - keep = esd_pnms(esd, pnms_thresh) - new_esd = [] - for i in keep: - new_esd.append(esd[i]) - coco_results.extend(new_esd) - # visualization - if cfg.DATASETS.Test_Visual: - im_write = cv2.imread( - '../ct/dataset/ic15/ic15_test_images/' + im_w_name)[ - :, :, ::-1] - for i in keep: - box = esd[i] - # print(box) - # assert 1<0 - box = np.array(box['seg_rorect']) - box = np.around(box).astype(np.int32) - cv2.polylines(im_write[:, :, ::-1], [box.astype(np.int32).reshape((-1, 1, 2))], True, - color=(0, 255, 0), thickness=2) # 0,255,255 y 0,255,0 g - cv2.imwrite('./det_visual/' + im_w_name, im_write[:, :, ::-1]) - else: - coco_results.extend(esd) - - - return coco_results - -def ke_to_quad(ke, mty, img_shape): - mt = mty[:].argmax() - quad = paraToQuad_v3(ke, mt) - return quad - - -# polynms -def py_cpu_pnms(dets, scores, thresh): - pts = [] - for det in dets: - pts.append([[det[i][0], det[i][1]] for i in range(len(det))]) - order = scores.argsort()[::-1] - areas = np.zeros(scores.shape) - order = scores.argsort()[::-1] - inter_areas = np.zeros((scores.shape[0], scores.shape[0])) - for il in range(len(pts)): - poly = Polygon(pts[il]) - areas[il] = poly.area - for jl in range(il, len(pts)): - polyj = Polygon(pts[jl]) - try: - inS = poly.intersection(polyj) - except: - print(poly, polyj) - inter_areas[il][jl] = inS.area - inter_areas[jl][il] = inS.area - - keep = [] - while order.size > 0: - i = order[0] - keep.append(i) - ovr = inter_areas[i][order[1:]] / (areas[i] + areas[order[1:]] - inter_areas[i][order[1:]]) - inds = np.where(ovr <= thresh)[0] - order = order[inds + 1] - return keep - - -def esd_pnms(esd, pnms_thresh): - scores = [] - dets = [] - for ele in esd: - score = ele['score'] - quad = ele['seg_rorect'] - # det = np.array([[quad[0][0], quad[0][1]], [quad[1][0], quad[1][1]],[quad[2][0], quad[2][1]],[quad[3][0], quad[3][1]]]) - det = np.array([[quad[0], quad[1]], [quad[2], quad[3]], [quad[4], quad[5]], [quad[6], quad[7]]]) - scores.append(score) - dets.append(det) - scores = np.array(scores) - dets = np.array(dets) - keep = py_cpu_pnms(dets, scores, pnms_thresh) - return keep - - -# inspired from Detectron -def evaluate_box_proposals( - predictions, dataset, thresholds=None, area="all", limit=None -): - """Evaluate detection proposal recall metrics. This function is a much - faster alternative to the official COCO API recall evaluation code. However, - it produces slightly different results. - """ - # Record max overlap value for each gt box - # Return vector of overlap values - areas = { - "all": 0, - "small": 1, - "medium": 2, - "large": 3, - "96-128": 4, - "128-256": 5, - "256-512": 6, - "512-inf": 7, - } - area_ranges = [ - [0 ** 2, 1e5 ** 2], # all - [0 ** 2, 32 ** 2], # small - [32 ** 2, 96 ** 2], # medium - [96 ** 2, 1e5 ** 2], # large - [96 ** 2, 128 ** 2], # 96-128 - [128 ** 2, 256 ** 2], # 128-256 - [256 ** 2, 512 ** 2], # 256-512 - [512 ** 2, 1e5 ** 2], - ] # 512-inf - assert area in areas, "Unknown area range: {}".format(area) - area_range = area_ranges[areas[area]] - gt_overlaps = [] - num_pos = 0 - - for image_id, prediction in enumerate(predictions): - original_id = dataset.id_to_img_map[image_id] - - # TODO replace with get_img_info? - image_width = dataset.coco.imgs[original_id]["width"] - image_height = dataset.coco.imgs[original_id]["height"] - prediction = prediction.resize((image_width, image_height)) - - # sort predictions in descending order - # TODO maybe remove this and make it explicit in the documentation - inds = prediction.get_field("objectness").sort(descending=True)[1] - prediction = prediction[inds] - - ann_ids = dataset.coco.getAnnIds(imgIds=original_id) - anno = dataset.coco.loadAnns(ann_ids) - gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0] - gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes - gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert( - "xyxy" - ) - gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) - - if len(gt_boxes) == 0: - continue - - valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) - gt_boxes = gt_boxes[valid_gt_inds] - - num_pos += len(gt_boxes) - - if len(gt_boxes) == 0: - continue - - if len(prediction) == 0: - continue - - if limit is not None and len(prediction) > limit: - prediction = prediction[:limit] - - overlaps = boxlist_iou(prediction, gt_boxes) - - _gt_overlaps = torch.zeros(len(gt_boxes)) - for j in range(min(len(prediction), len(gt_boxes))): - # find which proposal box maximally covers each gt box - # and get the iou amount of coverage for each gt box - max_overlaps, argmax_overlaps = overlaps.max(dim=0) - - # find which gt box is 'best' covered (i.e. 'best' = most iou) - gt_ovr, gt_ind = max_overlaps.max(dim=0) - assert gt_ovr >= 0 - # find the proposal box that covers the best covered gt box - box_ind = argmax_overlaps[gt_ind] - # record the iou coverage of this gt box - _gt_overlaps[j] = overlaps[box_ind, gt_ind] - assert _gt_overlaps[j] == gt_ovr - # mark the proposal box and the gt box as used - overlaps[box_ind, :] = -1 - overlaps[:, gt_ind] = -1 - - # append recorded iou coverage level - gt_overlaps.append(_gt_overlaps) - gt_overlaps = torch.cat(gt_overlaps, dim=0) - gt_overlaps, _ = torch.sort(gt_overlaps) - - if thresholds is None: - step = 0.05 - thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) - recalls = torch.zeros_like(thresholds) - # compute recall for each iou threshold - for i, t in enumerate(thresholds): - recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) - # ar = 2 * np.trapz(recalls, thresholds) - ar = recalls.mean() - return { - "ar": ar, - "recalls": recalls, - "thresholds": thresholds, - "gt_overlaps": gt_overlaps, - "num_pos": num_pos, - } - - -def evaluate_predictions_on_coco( - coco_gt, coco_results, json_result_file, iou_type="bbox" -): - import json - - print('writing results to ' + json_result_file) - with open(json_result_file, "w") as f: - json.dump(coco_results, f) - - # from pycocotools.cocoeval import COCOeval - - # coco_dt = coco_gt.loadRes(str(json_result_file)) - # # coco_dt = coco_gt.loadRes(coco_results) - # coco_eval = COCOeval(coco_gt, coco_dt, iou_type) - # coco_eval.evaluate() - # coco_eval.accumulate() - # coco_eval.summarize() - # return coco_eval - return None - - -class COCOResults(object): - METRICS = { - "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], - "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], - "box_proposal": [ - "AR@100", - "ARs@100", - "ARm@100", - "ARl@100", - "AR@1000", - "ARs@1000", - "ARm@1000", - "ARl@1000", - ], - "keypoint": ["AP", "AP50", "AP75", "APm", "APl"], - } - - def __init__(self, *iou_types): - allowed_types = ("box_proposal", "bbox", "segm") - assert all(iou_type in allowed_types for iou_type in iou_types) - results = OrderedDict() - for iou_type in iou_types: - results[iou_type] = OrderedDict( - [(metric, -1) for metric in COCOResults.METRICS[iou_type]] - ) - self.results = results - - def update(self, coco_eval): - if coco_eval is None: - return - from pycocotools.cocoeval import COCOeval - - assert isinstance(coco_eval, COCOeval) - s = coco_eval.stats - iou_type = coco_eval.params.iouType - res = self.results[iou_type] - metrics = COCOResults.METRICS[iou_type] - for idx, metric in enumerate(metrics): - res[metric] = s[idx] - - def __repr__(self): - # TODO make it pretty - return repr(self.results) - - -def check_expected_results(results, expected_results, sigma_tol): - if not expected_results: - return - - logger = logging.getLogger("maskrcnn_benchmark.inference") - for task, metric, (mean, std) in expected_results: - actual_val = results.results[task][metric] - lo = mean - sigma_tol * std - hi = mean + sigma_tol * std - ok = (lo < actual_val) and (actual_val < hi) - msg = ( - "{} > {} sanity check (actual vs. expected): " - "{:.3f} vs. mean={:.4f}, std={:.4}, range=({:.4f}, {:.4f})" - ).format(task, metric, actual_val, mean, std, lo, hi) - if not ok: - msg = "FAIL: " + msg - logger.error(msg) - else: - msg = "PASS: " + msg - logger.info(msg) - - -def paraToQuad_v3(kes, mt): - ms = (kes[0, 0], kes[6, 0]) - xs = [kes[i, 0] for i in range(1, 5)] # 1 2 3 4 - ys = [kes[i, 0] for i in range(7, 11)] # 7 8 9 10 - crs = (kes[5, 0], kes[11, 0]) - ms = Point(ms) - crs = Point(crs) - vp = [] - all_types = [[1, 2, 3, 4], [1, 2, 4, 3], [1, 3, 2, 4], [1, 3, 4, 2], [1, 4, 2, 3], [1, 4, 3, 2], \ - [2, 1, 3, 4], [2, 1, 4, 3], [2, 3, 1, 4], [2, 3, 4, 1], [2, 4, 1, 3], [2, 4, 3, 1], \ - [3, 1, 2, 4], [3, 1, 4, 2], [3, 2, 1, 4], [3, 2, 4, 1], [3, 4, 1, 2], [3, 4, 2, 1], \ - [4, 1, 2, 3], [4, 1, 3, 2], [4, 2, 1, 3], [4, 2, 3, 1], [4, 3, 1, 2], [4, 3, 2, 1]] - all_types = [[all_types[iat][0] - 1, all_types[iat][1] - 1, all_types[iat][2] - 1, all_types[iat][3] - 1] for iat in - range(24)] - - tpe = all_types[mt] - p1 = Point((xs[0], ys[tpe[0]])) - p2 = Point((xs[1], ys[tpe[1]])) - p3 = Point((xs[2], ys[tpe[2]])) - p4 = Point((xs[3], ys[tpe[3]])) - pts = [p1, p2, p3, p4] - scs = [0, 1, 2, 3] - for it in itertools.permutations(scs, 4): - poly = Polygon([(pts[it[0]].x, pts[it[0]].y), (pts[it[1]].x, pts[it[1]].y),(pts[it[2]].x, pts[it[2]].y), (pts[it[3]].x, pts[it[3]].y)]) - if poly.is_valid and ms.within(poly) and crs.within(poly): - quad = [(pts[it[0]].x, pts[it[0]].y), (pts[it[1]].x, pts[it[1]].y),(pts[it[2]].x, pts[it[2]].y), (pts[it[3]].x, pts[it[3]].y)] - lr = LinearRing(quad) - if lr.is_ccw: - return [(int(iq[0]), int(iq[1])) for iq in quad] - else: - quad = [quad[0], quad[3], quad[2], quad[1]] - return [(int(iq[0]), int(iq[1])) for iq in quad] - - return [(int(iq[0]), int(iq[1])) for iq in quad] - - return None - diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/data_utils.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/data_utils.py deleted file mode 100644 index 8fe6a567bae667f00ef0ee1d4d9075649107b471..0000000000000000000000000000000000000000 --- a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/data_utils.py +++ /dev/null @@ -1,196 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import gzip -import logging -import os -import random as rnd -import tarfile -import zipfile -import random -from typing import List -from tqdm import tqdm - -import decord -from decord import VideoReader -import webdataset as wds -import numpy as np -import torch -from torch.utils.data.dataset import IterableDataset - -from video_llama.common.registry import registry -from video_llama.datasets.datasets.base_dataset import ConcatDataset - - -decord.bridge.set_bridge("torch") -MAX_INT = registry.get("MAX_INT") - - -class ChainDataset(wds.DataPipeline): - r"""Dataset for chaining multiple :class:`DataPipeline` s. - - This class is useful to assemble different existing dataset streams. The - chaining operation is done on-the-fly, so concatenating large-scale - datasets with this class will be efficient. - - Args: - datasets (iterable of IterableDataset): datasets to be chained together - """ - def __init__(self, datasets: List[wds.DataPipeline]) -> None: - super().__init__() - self.datasets = datasets - self.prob = [] - self.names = [] - for dataset in self.datasets: - if hasattr(dataset, 'name'): - self.names.append(dataset.name) - else: - self.names.append('Unknown') - if hasattr(dataset, 'sample_ratio'): - self.prob.append(dataset.sample_ratio) - else: - self.prob.append(1) - logging.info("One of the datapipeline doesn't define ratio and set to 1 automatically.") - - def __iter__(self): - datastreams = [iter(dataset) for dataset in self.datasets] - while True: - select_datastream = random.choices(datastreams, weights=self.prob, k=1)[0] - yield next(select_datastream) - - -def apply_to_sample(f, sample): - if len(sample) == 0: - return {} - - def _apply(x): - if torch.is_tensor(x): - return f(x) - elif isinstance(x, dict): - return {key: _apply(value) for key, value in x.items()} - elif isinstance(x, list): - return [_apply(x) for x in x] - else: - return x - - return _apply(sample) - - -def move_to_cuda(sample): - def _move_to_cuda(tensor): - return tensor.cuda() - - return apply_to_sample(_move_to_cuda, sample) - - -def prepare_sample(samples, cuda_enabled=True): - if cuda_enabled: - samples = move_to_cuda(samples) - - # TODO fp16 support - - return samples - - -def reorg_datasets_by_split(datasets): - """ - Organizes datasets by split. - - Args: - datasets: dict of torch.utils.data.Dataset objects by name. - - Returns: - Dict of datasets by split {split_name: List[Datasets]}. - """ - # if len(datasets) == 1: - # return datasets[list(datasets.keys())[0]] - # else: - reorg_datasets = dict() - - # reorganize by split - for _, dataset in datasets.items(): - for split_name, dataset_split in dataset.items(): - if split_name not in reorg_datasets: - reorg_datasets[split_name] = [dataset_split] - else: - reorg_datasets[split_name].append(dataset_split) - - return reorg_datasets - - -def concat_datasets(datasets): - """ - Concatenates multiple datasets into a single dataset. - - It supports may-style datasets and DataPipeline from WebDataset. Currently, does not support - generic IterableDataset because it requires creating separate samplers. - - Now only supports conctenating training datasets and assuming validation and testing - have only a single dataset. This is because metrics should not be computed on the concatenated - datasets. - - Args: - datasets: dict of torch.utils.data.Dataset objects by split. - - Returns: - Dict of concatenated datasets by split, "train" is the concatenation of multiple datasets, - "val" and "test" remain the same. - - If the input training datasets contain both map-style and DataPipeline datasets, returns - a tuple, where the first element is a concatenated map-style dataset and the second - element is a chained DataPipeline dataset. - - """ - # concatenate datasets in the same split - for split_name in datasets: - if split_name != "train": - assert ( - len(datasets[split_name]) == 1 - ), "Do not support multiple {} datasets.".format(split_name) - datasets[split_name] = datasets[split_name][0] - else: - iterable_datasets, map_datasets = [], [] - for dataset in datasets[split_name]: - if isinstance(dataset, wds.DataPipeline): - logging.info( - "Dataset {} is IterableDataset, can't be concatenated.".format( - dataset - ) - ) - iterable_datasets.append(dataset) - elif isinstance(dataset, IterableDataset): - raise NotImplementedError( - "Do not support concatenation of generic IterableDataset." - ) - else: - map_datasets.append(dataset) - - # if len(iterable_datasets) > 0: - # concatenate map-style datasets and iterable-style datasets separately - if len(iterable_datasets) > 1: - chained_datasets = ( - ChainDataset(iterable_datasets) - ) - elif len(iterable_datasets) == 1: - chained_datasets = iterable_datasets[0] - else: - chained_datasets = None - - concat_datasets = ( - ConcatDataset(map_datasets) if len(map_datasets) > 0 else None - ) - - train_datasets = concat_datasets, chained_datasets - train_datasets = tuple([x for x in train_datasets if x is not None]) - train_datasets = ( - train_datasets[0] if len(train_datasets) == 1 else train_datasets - ) - - datasets[split_name] = train_datasets - - return datasets - diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/errors.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/errors.py deleted file mode 100644 index 4f30f901babed2b985ae5c333420b6a9e7a3baa8..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/errors.py +++ /dev/null @@ -1,219 +0,0 @@ -import textwrap - - -class VarLibError(Exception): - """Base exception for the varLib module.""" - - -class VarLibValidationError(VarLibError): - """Raised when input data is invalid from varLib's point of view.""" - - -class VarLibMergeError(VarLibError): - """Raised when input data cannot be merged into a variable font.""" - - def __init__(self, merger=None, **kwargs): - self.merger = merger - if not kwargs: - kwargs = {} - if "stack" in kwargs: - self.stack = kwargs["stack"] - del kwargs["stack"] - else: - self.stack = [] - self.cause = kwargs - - @property - def reason(self): - return self.__doc__ - - def _master_name(self, ix): - if self.merger is not None: - ttf = self.merger.ttfs[ix] - if "name" in ttf and ttf["name"].getBestFullName(): - return ttf["name"].getBestFullName() - elif hasattr(ttf.reader, "file") and hasattr(ttf.reader.file, "name"): - return ttf.reader.file.name - return f"master number {ix}" - - @property - def offender(self): - if "expected" in self.cause and "got" in self.cause: - index = [x == self.cause["expected"] for x in self.cause["got"]].index( - False - ) - master_name = self._master_name(index) - if "location" in self.cause: - master_name = f"{master_name} ({self.cause['location']})" - return index, master_name - return None, None - - @property - def details(self): - if "expected" in self.cause and "got" in self.cause: - offender_index, offender = self.offender - got = self.cause["got"][offender_index] - return f"Expected to see {self.stack[0]}=={self.cause['expected']!r}, instead saw {got!r}\n" - return "" - - def __str__(self): - offender_index, offender = self.offender - location = "" - if offender: - location = f"\n\nThe problem is likely to be in {offender}:\n" - context = "".join(reversed(self.stack)) - basic = textwrap.fill( - f"Couldn't merge the fonts, because {self.reason}. " - f"This happened while performing the following operation: {context}", - width=78, - ) - return "\n\n" + basic + location + self.details - - -class ShouldBeConstant(VarLibMergeError): - """some values were different, but should have been the same""" - - @property - def details(self): - basic_message = super().details - - if self.stack[0] != ".FeatureCount" or self.merger is None: - return basic_message - - assert self.stack[0] == ".FeatureCount" - offender_index, _ = self.offender - bad_ttf = self.merger.ttfs[offender_index] - good_ttf = next( - ttf - for ttf in self.merger.ttfs - if self.stack[-1] in ttf - and ttf[self.stack[-1]].table.FeatureList.FeatureCount - == self.cause["expected"] - ) - - good_features = [ - x.FeatureTag - for x in good_ttf[self.stack[-1]].table.FeatureList.FeatureRecord - ] - bad_features = [ - x.FeatureTag - for x in bad_ttf[self.stack[-1]].table.FeatureList.FeatureRecord - ] - return basic_message + ( - "\nIncompatible features between masters.\n" - f"Expected: {', '.join(good_features)}.\n" - f"Got: {', '.join(bad_features)}.\n" - ) - - -class FoundANone(VarLibMergeError): - """one of the values in a list was empty when it shouldn't have been""" - - @property - def offender(self): - index = [x is None for x in self.cause["got"]].index(True) - return index, self._master_name(index) - - @property - def details(self): - cause, stack = self.cause, self.stack - return f"{stack[0]}=={cause['got']}\n" - - -class NotANone(VarLibMergeError): - """one of the values in a list was not empty when it should have been""" - - @property - def offender(self): - index = [x is not None for x in self.cause["got"]].index(True) - return index, self._master_name(index) - - @property - def details(self): - cause, stack = self.cause, self.stack - return f"{stack[0]}=={cause['got']}\n" - - -class MismatchedTypes(VarLibMergeError): - """data had inconsistent types""" - - -class LengthsDiffer(VarLibMergeError): - """a list of objects had inconsistent lengths""" - - -class KeysDiffer(VarLibMergeError): - """a list of objects had different keys""" - - -class InconsistentGlyphOrder(VarLibMergeError): - """the glyph order was inconsistent between masters""" - - -class InconsistentExtensions(VarLibMergeError): - """the masters use extension lookups in inconsistent ways""" - - -class UnsupportedFormat(VarLibMergeError): - """an OpenType subtable (%s) had a format I didn't expect""" - - def __init__(self, merger=None, **kwargs): - super().__init__(merger, **kwargs) - if not self.stack: - self.stack = [".Format"] - - @property - def reason(self): - s = self.__doc__ % self.cause["subtable"] - if "value" in self.cause: - s += f" ({self.cause['value']!r})" - return s - - -class InconsistentFormats(UnsupportedFormat): - """an OpenType subtable (%s) had inconsistent formats between masters""" - - -class VarLibCFFMergeError(VarLibError): - pass - - -class VarLibCFFDictMergeError(VarLibCFFMergeError): - """Raised when a CFF PrivateDict cannot be merged.""" - - def __init__(self, key, value, values): - error_msg = ( - f"For the Private Dict key '{key}', the default font value list:" - f"\n\t{value}\nhad a different number of values than a region font:" - ) - for region_value in values: - error_msg += f"\n\t{region_value}" - self.args = (error_msg,) - - -class VarLibCFFPointTypeMergeError(VarLibCFFMergeError): - """Raised when a CFF glyph cannot be merged because of point type differences.""" - - def __init__(self, point_type, pt_index, m_index, default_type, glyph_name): - error_msg = ( - f"Glyph '{glyph_name}': '{point_type}' at point index {pt_index} in " - f"master index {m_index} differs from the default font point type " - f"'{default_type}'" - ) - self.args = (error_msg,) - - -class VarLibCFFHintTypeMergeError(VarLibCFFMergeError): - """Raised when a CFF glyph cannot be merged because of hint type differences.""" - - def __init__(self, hint_type, cmd_index, m_index, default_type, glyph_name): - error_msg = ( - f"Glyph '{glyph_name}': '{hint_type}' at index {cmd_index} in " - f"master index {m_index} differs from the default font hint type " - f"'{default_type}'" - ) - self.args = (error_msg,) - - -class VariationModelError(VarLibError): - """Raised when a variation model is faulty.""" diff --git a/spaces/DaFujaTyping/hf-Chat-ui/src/lib/utils/randomUuid.ts b/spaces/DaFujaTyping/hf-Chat-ui/src/lib/utils/randomUuid.ts deleted file mode 100644 index 9d536365c57659305ad28d6fc06b89d77ab337ab..0000000000000000000000000000000000000000 --- a/spaces/DaFujaTyping/hf-Chat-ui/src/lib/utils/randomUuid.ts +++ /dev/null @@ -1,14 +0,0 @@ -type UUID = ReturnType; - -export function randomUUID(): UUID { - // Only on old safari / ios - if (!("randomUUID" in crypto)) { - return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) => - ( - Number(c) ^ - (crypto.getRandomValues(new Uint8Array(1))[0] & (15 >> (Number(c) / 4))) - ).toString(16) - ) as UUID; - } - return crypto.randomUUID(); -} diff --git a/spaces/Datasculptor/MusicGen/tests/modules/test_lstm.py b/spaces/Datasculptor/MusicGen/tests/modules/test_lstm.py deleted file mode 100644 index 1248964c8191e19f27661f0974bef9cc967eb015..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/MusicGen/tests/modules/test_lstm.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import random -import torch - -from audiocraft.modules.lstm import StreamableLSTM - - -class TestStreamableLSTM: - - def test_lstm(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=False) - x = torch.randn(B, C, T) - y = lstm(x) - - print(y.shape) - assert y.shape == torch.Size([B, C, T]) - - def test_lstm_skip(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=True) - x = torch.randn(B, C, T) - y = lstm(x) - - assert y.shape == torch.Size([B, C, T]) diff --git a/spaces/DebateLabKIT/deepa2-demo/README.md b/spaces/DebateLabKIT/deepa2-demo/README.md deleted file mode 100644 index 1da43e12200334c88c709bba5573542c24f35602..0000000000000000000000000000000000000000 --- a/spaces/DebateLabKIT/deepa2-demo/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: DeepA2 Demo -emoji: 🕵️‍♀️ -colorFrom: green -colorTo: yellow -sdk: streamlit -app_file: app.py -pinned: true ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/__init__.py b/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/__init__.py deleted file mode 100644 index db8124b132f91216c0ded226f20ea3a046734728..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -# empty diff --git a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/dataType.h b/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/dataType.h deleted file mode 100644 index a7821a395c1c03db137587b879b255846fb0ca16..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/dataType.h +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once - -#include -#include - -#include -#include -typedef Eigen::Matrix DETECTBOX; -typedef Eigen::Matrix DETECTBOXSS; -typedef Eigen::Matrix FEATURE; -typedef Eigen::Matrix FEATURESS; -//typedef std::vector FEATURESS; - -//Kalmanfilter -//typedef Eigen::Matrix KAL_FILTER; -typedef Eigen::Matrix KAL_MEAN; -typedef Eigen::Matrix KAL_COVA; -typedef Eigen::Matrix KAL_HMEAN; -typedef Eigen::Matrix KAL_HCOVA; -using KAL_DATA = std::pair; -using KAL_HDATA = std::pair; - -//main -using RESULT_DATA = std::pair; - -//tracker: -using TRACKER_DATA = std::pair; -using MATCH_DATA = std::pair; -typedef struct t { - std::vector matches; - std::vector unmatched_tracks; - std::vector unmatched_detections; -}TRACHER_MATCHD; - -//linear_assignment: -typedef Eigen::Matrix DYNAMICM; \ No newline at end of file diff --git a/spaces/ECCV2022/bytetrack/yolox/core/__init__.py b/spaces/ECCV2022/bytetrack/yolox/core/__init__.py deleted file mode 100644 index c2379c704ec6320066cbb45a6b8dacca548662a0..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/yolox/core/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -# Copyright (c) Megvii, Inc. and its affiliates. - -from .launch import launch -from .trainer import Trainer diff --git a/spaces/EPFL-VILAB/MultiMAE/utils/masking_generator.py b/spaces/EPFL-VILAB/MultiMAE/utils/masking_generator.py deleted file mode 100644 index 5603eb30b40e6fea64f23d1f406f47041cc000fc..0000000000000000000000000000000000000000 --- a/spaces/EPFL-VILAB/MultiMAE/utils/masking_generator.py +++ /dev/null @@ -1,33 +0,0 @@ -# -------------------------------------------------------- -# Based on BEiT, timm, DINO and DeiT code bases -# https://github.com/microsoft/unilm/tree/master/beit -# https://github.com/rwightman/pytorch-image-models/tree/master/timm -# https://github.com/facebookresearch/deit -# https://github.com/facebookresearch/dino -# -------------------------------------------------------- -import numpy as np - - -class RandomMaskingGenerator: - def __init__(self, input_size, mask_ratio): - if not isinstance(input_size, tuple): - input_size = (input_size,) * 2 - - self.height, self.width = input_size - - self.num_patches = self.height * self.width - self.num_mask = int(mask_ratio * self.num_patches) - - def __repr__(self): - repr_str = "Maks: total patches {}, mask patches {}".format( - self.num_patches, self.num_mask - ) - return repr_str - - def __call__(self): - mask = np.hstack([ - np.zeros(self.num_patches - self.num_mask), - np.ones(self.num_mask), - ]) - np.random.shuffle(mask) - return mask # [196] diff --git a/spaces/Eddycrack864/Applio-Inference/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py b/spaces/Eddycrack864/Applio-Inference/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py deleted file mode 100644 index 73a5b836177b706c306e27875f8391c1aed4b948..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_33966KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16, 32)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 16) - self.stg1_high_band_net = BaseASPPNet(2, 16) - - self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(8, 16) - - self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(16, 32) - - self.out = nn.Conv2d(32, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/Epoching/3D_Photo_Inpainting/MiDaS/monodepth_net.py b/spaces/Epoching/3D_Photo_Inpainting/MiDaS/monodepth_net.py deleted file mode 100644 index 461db0807deaa98b98e4b5447d0a24b830ab7dbf..0000000000000000000000000000000000000000 --- a/spaces/Epoching/3D_Photo_Inpainting/MiDaS/monodepth_net.py +++ /dev/null @@ -1,186 +0,0 @@ -"""MonoDepthNet: Network for monocular depth estimation trained by mixing several datasets. -This file contains code that is adapted from -https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py -""" -import torch -import torch.nn as nn -from torchvision import models - - -class MonoDepthNet(nn.Module): - """Network for monocular depth estimation. - """ - - def __init__(self, path=None, features=256): - """Init. - - Args: - path (str, optional): Path to saved model. Defaults to None. - features (int, optional): Number of features. Defaults to 256. - """ - super().__init__() - - resnet = models.resnet50(pretrained=False) - - self.pretrained = nn.Module() - self.scratch = nn.Module() - self.pretrained.layer1 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, - resnet.maxpool, resnet.layer1) - - self.pretrained.layer2 = resnet.layer2 - self.pretrained.layer3 = resnet.layer3 - self.pretrained.layer4 = resnet.layer4 - - # adjust channel number of feature maps - self.scratch.layer1_rn = nn.Conv2d(256, features, kernel_size=3, stride=1, padding=1, bias=False) - self.scratch.layer2_rn = nn.Conv2d(512, features, kernel_size=3, stride=1, padding=1, bias=False) - self.scratch.layer3_rn = nn.Conv2d(1024, features, kernel_size=3, stride=1, padding=1, bias=False) - self.scratch.layer4_rn = nn.Conv2d(2048, features, kernel_size=3, stride=1, padding=1, bias=False) - - self.scratch.refinenet4 = FeatureFusionBlock(features) - self.scratch.refinenet3 = FeatureFusionBlock(features) - self.scratch.refinenet2 = FeatureFusionBlock(features) - self.scratch.refinenet1 = FeatureFusionBlock(features) - - # adaptive output module: 2 convolutions and upsampling - self.scratch.output_conv = nn.Sequential(nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), - nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1), - Interpolate(scale_factor=2, mode='bilinear')) - - # load model - if path: - self.load(path) - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input data (image) - - Returns: - tensor: depth - """ - layer_1 = self.pretrained.layer1(x) - layer_2 = self.pretrained.layer2(layer_1) - layer_3 = self.pretrained.layer3(layer_2) - layer_4 = self.pretrained.layer4(layer_3) - - layer_1_rn = self.scratch.layer1_rn(layer_1) - layer_2_rn = self.scratch.layer2_rn(layer_2) - layer_3_rn = self.scratch.layer3_rn(layer_3) - layer_4_rn = self.scratch.layer4_rn(layer_4) - - path_4 = self.scratch.refinenet4(layer_4_rn) - path_3 = self.scratch.refinenet3(path_4, layer_3_rn) - path_2 = self.scratch.refinenet2(path_3, layer_2_rn) - path_1 = self.scratch.refinenet1(path_2, layer_1_rn) - - out = self.scratch.output_conv(path_1) - - return out - - def load(self, path): - """Load model from file. - - Args: - path (str): file path - """ - parameters = torch.load(path) - - self.load_state_dict(parameters) - - -class Interpolate(nn.Module): - """Interpolation module. - """ - - def __init__(self, scale_factor, mode): - """Init. - - Args: - scale_factor (float): scaling - mode (str): interpolation mode - """ - super(Interpolate, self).__init__() - - self.interp = nn.functional.interpolate - self.scale_factor = scale_factor - self.mode = mode - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input - - Returns: - tensor: interpolated data - """ - x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=False) - - return x - - -class ResidualConvUnit(nn.Module): - """Residual convolution module. - """ - - def __init__(self, features): - """Init. - - Args: - features (int): number of features - """ - super().__init__() - - self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) - self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=False) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input - - Returns: - tensor: output - """ - out = self.relu(x) - out = self.conv1(out) - out = self.relu(out) - out = self.conv2(out) - - return out + x - - -class FeatureFusionBlock(nn.Module): - """Feature fusion block. - """ - - def __init__(self, features): - """Init. - - Args: - features (int): number of features - """ - super().__init__() - - self.resConfUnit = ResidualConvUnit(features) - - def forward(self, *xs): - """Forward pass. - - Returns: - tensor: output - """ - output = xs[0] - - if len(xs) == 2: - output += self.resConfUnit(xs[1]) - - output = self.resConfUnit(output) - output = nn.functional.interpolate(output, scale_factor=2, - mode='bilinear', align_corners=True) - - return output diff --git a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/model_param_init.py b/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/model_param_init.py deleted file mode 100644 index b995c0bfb1194746187692e2ab1c2a6dbaaaec6c..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/model_param_init.py +++ /dev/null @@ -1,69 +0,0 @@ -import json -import os -import pathlib - -default_param = {} -default_param["bins"] = 768 -default_param["unstable_bins"] = 9 # training only -default_param["reduction_bins"] = 762 # training only -default_param["sr"] = 44100 -default_param["pre_filter_start"] = 757 -default_param["pre_filter_stop"] = 768 -default_param["band"] = {} - - -default_param["band"][1] = { - "sr": 11025, - "hl": 128, - "n_fft": 960, - "crop_start": 0, - "crop_stop": 245, - "lpf_start": 61, # inference only - "res_type": "polyphase", -} - -default_param["band"][2] = { - "sr": 44100, - "hl": 512, - "n_fft": 1536, - "crop_start": 24, - "crop_stop": 547, - "hpf_start": 81, # inference only - "res_type": "sinc_best", -} - - -def int_keys(d): - r = {} - for k, v in d: - if k.isdigit(): - k = int(k) - r[k] = v - return r - - -class ModelParameters(object): - def __init__(self, config_path=""): - if ".pth" == pathlib.Path(config_path).suffix: - import zipfile - - with zipfile.ZipFile(config_path, "r") as zip: - self.param = json.loads( - zip.read("param.json"), object_pairs_hook=int_keys - ) - elif ".json" == pathlib.Path(config_path).suffix: - with open(config_path, "r") as f: - self.param = json.loads(f.read(), object_pairs_hook=int_keys) - else: - self.param = default_param - - for k in [ - "mid_side", - "mid_side_b", - "mid_side_b2", - "stereo_w", - "stereo_n", - "reverse", - ]: - if not k in self.param: - self.param[k] = False diff --git a/spaces/FridaZuley/RVC_HFKawaii/lib/uvr5_pack/lib_v5/dataset.py b/spaces/FridaZuley/RVC_HFKawaii/lib/uvr5_pack/lib_v5/dataset.py deleted file mode 100644 index cfd01a174978d97180a897e40cb59ecadec1d12e..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/lib/uvr5_pack/lib_v5/dataset.py +++ /dev/null @@ -1,183 +0,0 @@ -import os -import random - -import numpy as np -import torch -import torch.utils.data -from tqdm import tqdm - -from . import spec_utils - - -class VocalRemoverValidationSet(torch.utils.data.Dataset): - def __init__(self, patch_list): - self.patch_list = patch_list - - def __len__(self): - return len(self.patch_list) - - def __getitem__(self, idx): - path = self.patch_list[idx] - data = np.load(path) - - X, y = data["X"], data["y"] - - X_mag = np.abs(X) - y_mag = np.abs(y) - - return X_mag, y_mag - - -def make_pair(mix_dir, inst_dir): - input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"] - - X_list = sorted( - [ - os.path.join(mix_dir, fname) - for fname in os.listdir(mix_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - y_list = sorted( - [ - os.path.join(inst_dir, fname) - for fname in os.listdir(inst_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - - filelist = list(zip(X_list, y_list)) - - return filelist - - -def train_val_split(dataset_dir, split_mode, val_rate, val_filelist): - if split_mode == "random": - filelist = make_pair( - os.path.join(dataset_dir, "mixtures"), - os.path.join(dataset_dir, "instruments"), - ) - - random.shuffle(filelist) - - if len(val_filelist) == 0: - val_size = int(len(filelist) * val_rate) - train_filelist = filelist[:-val_size] - val_filelist = filelist[-val_size:] - else: - train_filelist = [ - pair for pair in filelist if list(pair) not in val_filelist - ] - elif split_mode == "subdirs": - if len(val_filelist) != 0: - raise ValueError( - "The `val_filelist` option is not available in `subdirs` mode" - ) - - train_filelist = make_pair( - os.path.join(dataset_dir, "training/mixtures"), - os.path.join(dataset_dir, "training/instruments"), - ) - - val_filelist = make_pair( - os.path.join(dataset_dir, "validation/mixtures"), - os.path.join(dataset_dir, "validation/instruments"), - ) - - return train_filelist, val_filelist - - -def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha): - perm = np.random.permutation(len(X)) - for i, idx in enumerate(tqdm(perm)): - if np.random.uniform() < reduction_rate: - y[idx] = spec_utils.reduce_vocal_aggressively( - X[idx], y[idx], reduction_mask - ) - - if np.random.uniform() < 0.5: - # swap channel - X[idx] = X[idx, ::-1] - y[idx] = y[idx, ::-1] - if np.random.uniform() < 0.02: - # mono - X[idx] = X[idx].mean(axis=0, keepdims=True) - y[idx] = y[idx].mean(axis=0, keepdims=True) - if np.random.uniform() < 0.02: - # inst - X[idx] = y[idx] - - if np.random.uniform() < mixup_rate and i < len(perm) - 1: - lam = np.random.beta(mixup_alpha, mixup_alpha) - X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]] - y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]] - - return X, y - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset): - len_dataset = patches * len(filelist) - - X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches) - ends = starts + cropsize - for j in range(patches): - idx = i * patches + j - X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]] - y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]] - - return X_dataset, y_dataset - - -def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset): - patch_list = [] - patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format( - cropsize, sr, hop_length, n_fft, offset - ) - os.makedirs(patch_dir, exist_ok=True) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - basename = os.path.splitext(os.path.basename(X_path))[0] - - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - len_dataset = int(np.ceil(X.shape[2] / roi_size)) - for j in range(len_dataset): - outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j)) - start = j * roi_size - if not os.path.exists(outpath): - np.savez( - outpath, - X=X_pad[:, :, start : start + cropsize], - y=y_pad[:, :, start : start + cropsize], - ) - patch_list.append(outpath) - - return VocalRemoverValidationSet(patch_list) diff --git a/spaces/Gasi/White-box-Cartoonization/README.md b/spaces/Gasi/White-box-Cartoonization/README.md deleted file mode 100644 index 9860239cf42c94e385faaaa75a85311e010d64f7..0000000000000000000000000000000000000000 --- a/spaces/Gasi/White-box-Cartoonization/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -python_version: 3.7 -title: White Box Cartoonization -emoji: 📚 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: hylee/White-box-Cartoonization ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/ball_in_bowl_obstacle_course.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/ball_in_bowl_obstacle_course.py deleted file mode 100644 index 2349767e3e8e7682c198175b949be9a3e8762ca8..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/ball_in_bowl_obstacle_course.py +++ /dev/null @@ -1,57 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils -import pybullet as p - -class BallInBowlObstacleCourse(Task): - """Navigate through a maze of blocks, pick up balls of different colors and place them in the corresponding colored bowls.""" - - def __init__(self): - super().__init__() - self.max_steps = 20 - self.lang_template = "navigate through the maze and place the {color} ball in the {color} bowl" - self.task_completed_desc = "done placing balls in bowls." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Add blocks to form a maze. - block_size = (0.04, 0.04, 0.04) - block_urdf = 'block/small.urdf' - for _ in range(10): - block_pose = self.get_random_pose(env, block_size) - env.add_object(block_urdf, block_pose, category='fixed') - - # Add balls of different colors. - ball_size = (0.04, 0.04, 0.04) - ball_urdf = 'ball/ball-template.urdf' - colors = ['red', 'blue', 'green', 'yellow'] - balls = [] - for color in colors: - ball_pose = self.get_random_pose(env, ball_size) - ball_id = env.add_object(ball_urdf, ball_pose, color=color) - balls.append(ball_id) - - # Add bowls of different colors at different corners of the maze. - bowl_size = (0.12, 0.12, 0) - bowl_urdf = 'bowl/bowl.urdf' - bowls = [] - for color in colors: - bowl_pose = self.get_random_pose(env, bowl_size) - bowl_id = env.add_object(bowl_urdf, bowl_pose, color=color, category='fixed') - bowls.append(bowl_id) - - # Goal: each ball is in the bowl of the same color. - for i in range(len(balls)): - self.add_goal(objs=[balls[i]], matches=np.ones((1, 1)), targ_poses=[p.getBasePositionAndOrientation(bowls[i])], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1/len(balls), - language_goal=self.lang_template.format(color=colors[i])) \ No newline at end of file diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_blocks_in_cylinder_maze.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_blocks_in_cylinder_maze.py deleted file mode 100644 index 11386702fb28ed769345f6aac6b5d18f4a5a4f58..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_blocks_in_cylinder_maze.py +++ /dev/null @@ -1,51 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils - -class ColorBlocksInCylinderMaze(Task): - """Pick up five differently colored blocks (red, blue, yellow, green, and orange) that are scattered randomly on the table top. Arrange three cylindrical containers in a row to create a maze-like structure. Place the red, yellow, and blue block into the first, second, and third cylinder from left respectively. Then, stack the green and orange block on top of any container, followed by placing the same color palette on the respective block.""" - - def __init__(self): - super().__init__() - self.max_steps = 20 - self.lang_template = "arrange the blocks in the cylinders and stack the green and orange blocks" - self.task_completed_desc = "done arranging blocks in cylinders." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Add cylinders. - cylinder_size = (0.05, 0.05, 0.1) - cylinder_urdf = 'cylinder/cylinder-template.urdf' - cylinder_poses = [] - for _ in range(3): - cylinder_pose = self.get_random_pose(env, cylinder_size) - env.add_object(cylinder_urdf, cylinder_pose, 'fixed') - cylinder_poses.append(cylinder_pose) - - # Add blocks. - block_size = (0.04, 0.04, 0.04) - block_urdf = 'block/block.urdf' - block_colors = [utils.COLORS['red'], utils.COLORS['blue'], utils.COLORS['yellow'], utils.COLORS['green'], utils.COLORS['orange']] - blocks = [] - for i in range(5): - block_pose = self.get_random_pose(env, block_size) - block_id = env.add_object(block_urdf, block_pose, color=block_colors[i]) - blocks.append(block_id) - - # Goal: red, yellow, and blue blocks are in the first, second, and third cylinder respectively. - self.add_goal(objs=blocks[:3], matches=np.ones((3, 3)), targ_poses=cylinder_poses, replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 2, language_goal=self.lang_template) - - # Goal: green and orange blocks are stacked on top of any cylinder. - self.add_goal(objs=blocks[3:], matches=np.ones((2, 3)), targ_poses=cylinder_poses, replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 2, language_goal=self.lang_template) \ No newline at end of file diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_structured_block_tower.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_structured_block_tower.py deleted file mode 100644 index b50c77f617836ab751f8e9d969853fef929dca12..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_structured_block_tower.py +++ /dev/null @@ -1,52 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils - -class ColorStructuredBlockTower(Task): - """Construct a tower using six blocks: two red, two blue, and two green. - The tower should be built in the order of a red block at the base, - followed by a blue, then green, then red, blue and green at the top.""" - - def __init__(self): - super().__init__() - self.max_steps = 20 - self.lang_template = "construct a tower using six blocks: two red, two blue, and two green. " \ - "The tower should be built in the order of a red block at the base, " \ - "followed by a blue, then green, then red, blue and green at the top." - self.task_completed_desc = "done building color-structured block tower." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Define block colors and sizes - colors = [utils.COLORS['red'], utils.COLORS['blue'], utils.COLORS['green']] * 2 - block_size = (0.04, 0.04, 0.04) - - # Add blocks - block_urdf = 'block/block.urdf' - blocks = [] - for i in range(6): - block_pose = self.get_random_pose(env, block_size) - block_id = env.add_object(block_urdf, block_pose, color=colors[i]) - blocks.append(block_id) - - # Define target poses for the blocks in the tower - base_pose = self.get_random_pose(env, block_size) - targ_poses = [base_pose] - for i in range(1, 6): - targ_poses.append((np.array(base_pose[0]) + np.array([0, 0, i * block_size[2]]), base_pose[1])) - - # Add goals - for i in range(6): - self.add_goal(objs=[blocks[i]], matches=np.ones((1, 1)), targ_poses=[targ_poses[i]], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1/6, symmetries=[np.pi/2], - language_goal=self.lang_template) \ No newline at end of file diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/corner_sort_cylinders.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/corner_sort_cylinders.py deleted file mode 100644 index c0d843727d55b757e5b8fe38e176c9c517456820..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/corner_sort_cylinders.py +++ /dev/null @@ -1,55 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils - -class CornerSortCylinders(Task): - """Pick up cylinders of four different colors (red, blue, green, yellow) and place them into four corners accordingly marked on the tabletop.""" - - def __init__(self): - super().__init__() - self.max_steps = 20 - self.lang_template = "place the {color} cylinder in the {color} corner" - self.task_completed_desc = "done sorting cylinders." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Define colors - colors = ['red', 'blue', 'green', 'yellow'] - - # Add corners - corner_size = (0.04, 0.04, 0.04) # x, y, z dimensions for the asset size - corner_template = 'corner/corner-template.urdf' - corner_poses = [] - for color in colors: - replace = {'DIM': corner_size, 'HALF': (corner_size[0] / 2, corner_size[1] / 2, corner_size[2] / 2), 'COLOR': utils.COLORS[color]} - corner_urdf = self.fill_template(corner_template, replace) - corner_pose = self.get_random_pose(env, corner_size) - env.add_object(corner_urdf, corner_pose, 'fixed') - corner_poses.append(corner_pose) - - # Add cylinders - cylinder_size = (0.02, 0.02, 0.06) # x, y, z dimensions for the asset size - cylinder_template = 'cylinder/cylinder-template.urdf' - cylinders = [] - for color in colors: - replace = {'DIM': cylinder_size, 'HALF': (cylinder_size[0] / 2, cylinder_size[1] / 2, cylinder_size[2] / 2), 'COLOR': utils.COLORS[color]} - cylinder_urdf = self.fill_template(cylinder_template, replace) - cylinder_pose = self.get_random_pose(env, cylinder_size) - cylinder_id = env.add_object(cylinder_urdf, cylinder_pose) - cylinders.append(cylinder_id) - - # Add goals - for i in range(len(cylinders)): - self.add_goal(objs=[cylinders[i]], matches=np.int32([[1]]), targ_poses=[corner_poses[i]], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / len(cylinders), - language_goal=self.lang_template.format(color=colors[i])) \ No newline at end of file diff --git a/spaces/GeorgeOrville/bingo/src/components/ui/codeblock.tsx b/spaces/GeorgeOrville/bingo/src/components/ui/codeblock.tsx deleted file mode 100644 index aabda4e3b59f4e36b6ab79feb19d8d18b70e881b..0000000000000000000000000000000000000000 --- a/spaces/GeorgeOrville/bingo/src/components/ui/codeblock.tsx +++ /dev/null @@ -1,142 +0,0 @@ -'use client' - -import { FC, memo } from 'react' -import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter' -import { coldarkDark } from 'react-syntax-highlighter/dist/cjs/styles/prism' - -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' -import { IconCheck, IconCopy, IconDownload } from '@/components/ui/icons' -import { Button } from '@/components/ui/button' - -interface Props { - language: string - value: string -} - -interface languageMap { - [key: string]: string | undefined -} - -export const programmingLanguages: languageMap = { - javascript: '.js', - python: '.py', - java: '.java', - c: '.c', - cpp: '.cpp', - 'c++': '.cpp', - 'c#': '.cs', - ruby: '.rb', - php: '.php', - swift: '.swift', - 'objective-c': '.m', - kotlin: '.kt', - typescript: '.ts', - go: '.go', - perl: '.pl', - rust: '.rs', - scala: '.scala', - haskell: '.hs', - lua: '.lua', - shell: '.sh', - sql: '.sql', - html: '.html', - css: '.css' - // add more file extensions here, make sure the key is same as language prop in CodeBlock.tsx component -} - -export const generateRandomString = (length: number, lowercase = false) => { - const chars = 'ABCDEFGHJKLMNPQRSTUVWXY3456789' // excluding similar looking characters like Z, 2, I, 1, O, 0 - let result = '' - for (let i = 0; i < length; i++) { - result += chars.charAt(Math.floor(Math.random() * chars.length)) - } - return lowercase ? result.toLowerCase() : result -} - -const CodeBlock: FC = memo(({ language, value }) => { - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - - const downloadAsFile = () => { - if (typeof window === 'undefined') { - return - } - const fileExtension = programmingLanguages[language] || '.file' - const suggestedFileName = `file-${generateRandomString( - 3, - true - )}${fileExtension}` - const fileName = window.prompt('Enter file name' || '', suggestedFileName) - - if (!fileName) { - // User pressed cancel on prompt. - return - } - - const blob = new Blob([value], { type: 'text/plain' }) - const url = URL.createObjectURL(blob) - const link = document.createElement('a') - link.download = fileName - link.href = url - link.style.display = 'none' - document.body.appendChild(link) - link.click() - document.body.removeChild(link) - URL.revokeObjectURL(url) - } - - const onCopy = () => { - if (isCopied) return - copyToClipboard(value) - } - - return ( -
    -
    - {language} -
    - - -
    -
    - - {value} - -
    - ) -}) -CodeBlock.displayName = 'CodeBlock' - -export { CodeBlock } diff --git a/spaces/Gladiator/Sartorius-Cell-Segmentation/app.py b/spaces/Gladiator/Sartorius-Cell-Segmentation/app.py deleted file mode 100644 index 1cbab0be85e1242df43bbb394cb0674daa301e3d..0000000000000000000000000000000000000000 --- a/spaces/Gladiator/Sartorius-Cell-Segmentation/app.py +++ /dev/null @@ -1,62 +0,0 @@ -import streamlit as st -import numpy as np -import matplotlib.pyplot as plt -from PIL import Image -from cellpose import models - - -@st.cache() -def load_model(model_path): - inf_model = models.CellposeModel(gpu=False, pretrained_model=model_path) - return inf_model - -#def inference(img, model_path, **model_params): -# inf_model = models.CellposeModel(gpu=False, pretrained_model=model_path) -# preds, flows, _ = inf_model.eval([img], **model_params) -# return preds, flows - - -if __name__ == "__main__": - - st.title("Sartorius Neuronal Cell Segmentation") - model_path = "./cellpose_residual_on_style_on_concatenation_off_fold1_ep_649_cv_0.2834" - inf_model = models.CellposeModel(gpu=False, pretrained_model=model_path) - uploaded_img = st.file_uploader(label="Upload neuronal cell image") - - with st.expander("View input image"): - if uploaded_img is not None: - st.image(uploaded_img) - else: - st.warning("Please upload an image") - - segment = st.button("Perform segmentation") - - if uploaded_img is not None and segment: - img = Image.open(uploaded_img) - img = np.array(img) - - model_params = { - "diameter": 19.0, - "channels": [0, 0], - "augment": True, - "resample": True, - } - with st.spinner("Performing segmentation. This might take a while..."): - #preds, flows = inference( - # img=img, - # model_path="./cellpose_residual_on_style_on_concatenation_off_fold1_ep_649_cv_0.2834", - # **model_params - #) - preds, flows, _ = inf_model.eval([img], **model_params) - - fig, (ax1, ax2, ax3) = plt.subplots(1, 3) - ax1.axis("off") - ax2.axis("off") - ax3.axis("off") - ax1.set_title("Original Image") - ax1.imshow(img, cmap="gray") - ax2.set_title("Segmented image") - ax2.imshow(preds[0]) - ax3.set_title("Image flows") - ax3.imshow(flows[0][0]) - st.pyplot(fig) diff --git a/spaces/Godrose0728/Aisound02/text/english.py b/spaces/Godrose0728/Aisound02/text/english.py deleted file mode 100644 index 6817392ba8a9eb830351de89fb7afc5ad72f5e42..0000000000000000000000000000000000000000 --- a/spaces/Godrose0728/Aisound02/text/english.py +++ /dev/null @@ -1,188 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - - -# Regular expression matching whitespace: - - -import re -import inflect -from unidecode import unidecode -import eng_to_ipa as ipa -_inflect = inflect.engine() -_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') -_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') -_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') -_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') -_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') -_number_re = re.compile(r'[0-9]+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - - -# List of (ipa, lazy ipa) pairs: -_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('æ', 'e'), - ('ɑ', 'a'), - ('ɔ', 'o'), - ('ð', 'z'), - ('θ', 's'), - ('ɛ', 'e'), - ('ɪ', 'i'), - ('ʊ', 'u'), - ('ʒ', 'ʥ'), - ('ʤ', 'ʥ'), - ('ˈ', '↓'), -]] - -# List of (ipa, lazy ipa2) pairs: -_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ð', 'z'), - ('θ', 's'), - ('ʒ', 'ʑ'), - ('ʤ', 'dʑ'), - ('ˈ', '↓'), -]] - -# List of (ipa, ipa2) pairs -_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ʤ', 'dʒ'), - ('ʧ', 'tʃ') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def collapse_whitespace(text): - return re.sub(r'\s+', ' ', text) - - -def _remove_commas(m): - return m.group(1).replace(',', '') - - -def _expand_decimal_point(m): - return m.group(1).replace('.', ' point ') - - -def _expand_dollars(m): - match = m.group(1) - parts = match.split('.') - if len(parts) > 2: - return match + ' dollars' # Unexpected format - dollars = int(parts[0]) if parts[0] else 0 - cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 - if dollars and cents: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) - elif dollars: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - return '%s %s' % (dollars, dollar_unit) - elif cents: - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s' % (cents, cent_unit) - else: - return 'zero dollars' - - -def _expand_ordinal(m): - return _inflect.number_to_words(m.group(0)) - - -def _expand_number(m): - num = int(m.group(0)) - if num > 1000 and num < 3000: - if num == 2000: - return 'two thousand' - elif num > 2000 and num < 2010: - return 'two thousand ' + _inflect.number_to_words(num % 100) - elif num % 100 == 0: - return _inflect.number_to_words(num // 100) + ' hundred' - else: - return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') - else: - return _inflect.number_to_words(num, andword='') - - -def normalize_numbers(text): - text = re.sub(_comma_number_re, _remove_commas, text) - text = re.sub(_pounds_re, r'\1 pounds', text) - text = re.sub(_dollars_re, _expand_dollars, text) - text = re.sub(_decimal_number_re, _expand_decimal_point, text) - text = re.sub(_ordinal_re, _expand_ordinal, text) - text = re.sub(_number_re, _expand_number, text) - return text - - -def mark_dark_l(text): - return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text) - - -def english_to_ipa(text): - text = unidecode(text).lower() - text = expand_abbreviations(text) - text = normalize_numbers(text) - phonemes = ipa.convert(text) - phonemes = collapse_whitespace(phonemes) - return phonemes - - -def english_to_lazy_ipa(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa: - text = re.sub(regex, replacement, text) - return text - - -def english_to_ipa2(text): - text = english_to_ipa(text) - text = mark_dark_l(text) - for regex, replacement in _ipa_to_ipa2: - text = re.sub(regex, replacement, text) - return text.replace('...', '…') - - -def english_to_lazy_ipa2(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa2: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/Gradio-Blocks/Gradio_YOLOv5_Det/util/fonts_opt.py b/spaces/Gradio-Blocks/Gradio_YOLOv5_Det/util/fonts_opt.py deleted file mode 100644 index ccea77ad00c644cedf38ec0336dcf124a8b6448d..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/Gradio_YOLOv5_Det/util/fonts_opt.py +++ /dev/null @@ -1,69 +0,0 @@ -# font management -# author: Zeng Yifu(曾逸夫) -# creation time: 2022-05-01 -# email: zyfiy1314@163.com -# project homepage: https://gitee.com/CV_Lab/gradio_yolov5_det - -import os -import sys -from pathlib import Path - -import wget -from rich.console import Console - -ROOT_PATH = sys.path[0] # Project root directory - -# Chinese, English, Russian, Spanish, Arabic, Korean -fonts_list = ["SimSun.ttf", "TimesNewRoman.ttf", "malgun.ttf"] # font list -fonts_suffix = ["ttc", "ttf", "otf"] # font suffix - -data_url_dict = { - "SimSun.ttf": "https://gitee.com/CV_Lab/gradio_yolov5_det/attach_files/1053539/download/SimSun.ttf", - "TimesNewRoman.ttf": "https://gitee.com/CV_Lab/gradio_yolov5_det/attach_files/1053537/download/TimesNewRoman.ttf", - "malgun.ttf": "https://gitee.com/CV_Lab/gradio_yolov5_det/attach_files/1053538/download/malgun.ttf",} - -console = Console() - - -# create font library -def add_fronts(font_diff): - - global font_name - - for k, v in data_url_dict.items(): - if k in font_diff: - font_name = v.split("/")[-1] # font name - Path(f"{ROOT_PATH}/fonts").mkdir(parents=True, exist_ok=True) # Create a directory - - file_path = f"{ROOT_PATH}/fonts/{font_name}" # font path - - try: - # Download font file - wget.download(v, file_path) - except Exception as e: - print("Path error! Program ended!") - print(e) - sys.exit() - else: - print() - console.print(f"{font_name} [bold green]font file download complete![/bold green] has been saved to: {file_path}") - - -# Determine the font file -def is_fonts(fonts_dir): - if os.path.isdir(fonts_dir): - # if the font library exists - f_list = os.listdir(fonts_dir) # local font library - - font_diff = list(set(fonts_list).difference(set(f_list))) - - if font_diff != []: - # font does not exist - console.print("[bold red] font does not exist, loading...[/bold red]") - add_fronts(font_diff) # Create a font library - else: - console.print(f"{fonts_list}[bold green]font already exists![/bold green]") - else: - # The font library does not exist, create a font library - console.print("[bold red]font library does not exist, creating...[/bold red]") - add_fronts(fonts_list) # Create a font library \ No newline at end of file diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py deleted file mode 100644 index de4a8a5e9f030f1e8a8802596885186163f23eed..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py' -model = dict( - pretrained='open-mmlab://resnet101_v1c', - backbone=dict( - depth=101, - dilations=(1, 1, 1, 2), - strides=(1, 2, 2, 1), - multi_grid=(1, 2, 4)), - decode_head=dict( - dilations=(1, 6, 12, 18), - sampler=dict(type='OHEMPixelSampler', min_kept=100000))) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/data/audio_dataset.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/data/audio_dataset.py deleted file mode 100644 index 9d7442526186b3712f5d4754f928a40ecd964174..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/data/audio_dataset.py +++ /dev/null @@ -1,587 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""AudioDataset support. In order to handle a larger number of files -without having to scan again the folders, we precompute some metadata -(filename, sample rate, duration), and use that to efficiently sample audio segments. -""" -import argparse -import copy -from concurrent.futures import ThreadPoolExecutor, Future -from dataclasses import dataclass, fields -from contextlib import ExitStack -from functools import lru_cache -import gzip -import json -import logging -import os -from pathlib import Path -import random -import sys -import typing as tp - -import torch -import torch.nn.functional as F - -from .audio import audio_read, audio_info -from .audio_utils import convert_audio -from .zip import PathInZip - -try: - import dora -except ImportError: - dora = None # type: ignore - - -@dataclass(order=True) -class BaseInfo: - - @classmethod - def _dict2fields(cls, dictionary: dict): - return { - field.name: dictionary[field.name] - for field in fields(cls) if field.name in dictionary - } - - @classmethod - def from_dict(cls, dictionary: dict): - _dictionary = cls._dict2fields(dictionary) - return cls(**_dictionary) - - def to_dict(self): - return { - field.name: self.__getattribute__(field.name) - for field in fields(self) - } - - -@dataclass(order=True) -class AudioMeta(BaseInfo): - path: str - duration: float - sample_rate: int - amplitude: tp.Optional[float] = None - weight: tp.Optional[float] = None - # info_path is used to load additional information about the audio file that is stored in zip files. - info_path: tp.Optional[PathInZip] = None - - @classmethod - def from_dict(cls, dictionary: dict): - base = cls._dict2fields(dictionary) - if 'info_path' in base and base['info_path'] is not None: - base['info_path'] = PathInZip(base['info_path']) - return cls(**base) - - def to_dict(self): - d = super().to_dict() - if d['info_path'] is not None: - d['info_path'] = str(d['info_path']) - return d - - -@dataclass(order=True) -class SegmentInfo(BaseInfo): - meta: AudioMeta - seek_time: float - # The following values are given once the audio is processed, e.g. - # at the target sample rate and target number of channels. - n_frames: int # actual number of frames without padding - total_frames: int # total number of frames, padding included - sample_rate: int # actual sample rate - channels: int # number of audio channels. - - -DEFAULT_EXTS = ['.wav', '.mp3', '.flac', '.ogg', '.m4a'] - -logger = logging.getLogger(__name__) - - -def _get_audio_meta(file_path: str, minimal: bool = True) -> AudioMeta: - """AudioMeta from a path to an audio file. - - Args: - file_path (str): Resolved path of valid audio file. - minimal (bool): Whether to only load the minimal set of metadata (takes longer if not). - Returns: - AudioMeta: Audio file path and its metadata. - """ - info = audio_info(file_path) - amplitude: tp.Optional[float] = None - if not minimal: - wav, sr = audio_read(file_path) - amplitude = wav.abs().max().item() - return AudioMeta(file_path, info.duration, info.sample_rate, amplitude) - - -def _resolve_audio_meta(m: AudioMeta, fast: bool = True) -> AudioMeta: - """If Dora is available as a dependency, try to resolve potential relative paths - in list of AudioMeta. This method is expected to be used when loading meta from file. - - Args: - m (AudioMeta): Audio meta to resolve. - fast (bool): If True, uses a really fast check for determining if a file - is already absolute or not. Only valid on Linux/Mac. - Returns: - AudioMeta: Audio meta with resolved path. - """ - def is_abs(m): - if fast: - return str(m)[0] == '/' - else: - os.path.isabs(str(m)) - - if not dora: - return m - - if not is_abs(m.path): - m.path = dora.git_save.to_absolute_path(m.path) - if m.info_path is not None and not is_abs(m.info_path.zip_path): - m.info_path.zip_path = dora.git_save.to_absolute_path(m.path) - return m - - -def find_audio_files(path: tp.Union[Path, str], - exts: tp.List[str] = DEFAULT_EXTS, - resolve: bool = True, - minimal: bool = True, - progress: bool = False, - workers: int = 0) -> tp.List[AudioMeta]: - """Build a list of AudioMeta from a given path, - collecting relevant audio files and fetching meta info. - - Args: - path (str or Path): Path to folder containing audio files. - exts (list of str): List of file extensions to consider for audio files. - minimal (bool): Whether to only load the minimal set of metadata (takes longer if not). - progress (bool): Whether to log progress on audio files collection. - workers (int): number of parallel workers, if 0, use only the current thread. - Returns: - list of AudioMeta: List of audio file path and its metadata. - """ - audio_files = [] - futures: tp.List[Future] = [] - pool: tp.Optional[ThreadPoolExecutor] = None - with ExitStack() as stack: - if workers > 0: - pool = ThreadPoolExecutor(workers) - stack.enter_context(pool) - - if progress: - print("Finding audio files...") - for root, folders, files in os.walk(path, followlinks=True): - for file in files: - full_path = Path(root) / file - if full_path.suffix.lower() in exts: - audio_files.append(full_path) - if pool is not None: - futures.append(pool.submit(_get_audio_meta, str(audio_files[-1]), minimal)) - if progress: - print(format(len(audio_files), " 8d"), end='\r', file=sys.stderr) - - if progress: - print("Getting audio metadata...") - meta: tp.List[AudioMeta] = [] - for idx, file_path in enumerate(audio_files): - try: - if pool is None: - m = _get_audio_meta(str(file_path), minimal) - else: - m = futures[idx].result() - if resolve: - m = _resolve_audio_meta(m) - except Exception as err: - print("Error with", str(file_path), err, file=sys.stderr) - continue - meta.append(m) - if progress: - print(format((1 + idx) / len(audio_files), " 3.1%"), end='\r', file=sys.stderr) - meta.sort() - return meta - - -def load_audio_meta(path: tp.Union[str, Path], - resolve: bool = True, fast: bool = True) -> tp.List[AudioMeta]: - """Load list of AudioMeta from an optionally compressed json file. - - Args: - path (str or Path): Path to JSON file. - resolve (bool): Whether to resolve the path from AudioMeta (default=True). - fast (bool): activates some tricks to make things faster. - Returns: - list of AudioMeta: List of audio file path and its total duration. - """ - open_fn = gzip.open if str(path).lower().endswith('.gz') else open - with open_fn(path, 'rb') as fp: # type: ignore - lines = fp.readlines() - meta = [] - for line in lines: - d = json.loads(line) - m = AudioMeta.from_dict(d) - if resolve: - m = _resolve_audio_meta(m, fast=fast) - meta.append(m) - return meta - - -def save_audio_meta(path: tp.Union[str, Path], meta: tp.List[AudioMeta]): - """Save the audio metadata to the file pointer as json. - - Args: - path (str or Path): Path to JSON file. - metadata (list of BaseAudioMeta): List of audio meta to save. - """ - Path(path).parent.mkdir(exist_ok=True, parents=True) - open_fn = gzip.open if str(path).lower().endswith('.gz') else open - with open_fn(path, 'wb') as fp: # type: ignore - for m in meta: - json_str = json.dumps(m.to_dict()) + '\n' - json_bytes = json_str.encode('utf-8') - fp.write(json_bytes) - - -class AudioDataset: - """Base audio dataset. - - The dataset takes a list of AudioMeta and create a dataset composed of segments of audio - and potentially additional information, by creating random segments from the list of audio - files referenced in the metadata and applying minimal data pre-processing such as resampling, - mixing of channels, padding, etc. - - If no segment_duration value is provided, the AudioDataset will return the full wav for each - audio file. Otherwise, it will randomly sample audio files and create a segment of the specified - duration, applying padding if required. - - By default, only the torch Tensor corresponding to the waveform is returned. Setting return_info=True - allows to return a tuple containing the torch Tensor and additional metadata on the segment and the - original audio meta. - - Note that you can call `start_epoch(epoch)` in order to get - a deterministic "randomization" for `shuffle=True`. - For a given epoch and dataset index, this will always return the same extract. - You can get back some diversity by setting the `shuffle_seed` param. - - Args: - meta (list of AudioMeta): List of audio files metadata. - segment_duration (float, optional): Optional segment duration of audio to load. - If not specified, the dataset will load the full audio segment from the file. - shuffle (bool): Set to `True` to have the data reshuffled at every epoch. - sample_rate (int): Target sample rate of the loaded audio samples. - channels (int): Target number of channels of the loaded audio samples. - sample_on_duration (bool): Set to `True` to sample segments with probability - dependent on audio file duration. This is only used if `segment_duration` is provided. - sample_on_weight (bool): Set to `True` to sample segments using the `weight` entry of - `AudioMeta`. If `sample_on_duration` is also True, the actual weight will be the product - of the file duration and file weight. This is only used if `segment_duration` is provided. - min_segment_ratio (float): Minimum segment ratio to use when the audio file - is shorter than the desired segment. - max_read_retry (int): Maximum number of retries to sample an audio segment from the dataset. - return_info (bool): Whether to return the wav only or return wav along with segment info and metadata. - min_audio_duration (float, optional): Minimum audio file duration, in seconds, if provided - audio shorter than this will be filtered out. - max_audio_duration (float, optional): Maximal audio file duration in seconds, if provided - audio longer than this will be filtered out. - shuffle_seed (int): can be used to further randomize - load_wav (bool): if False, skip loading the wav but returns a tensor of 0 - with the expected segment_duration (which must be provided if load_wav is False). - permutation_on_files (bool): only if `sample_on_weight` and `sample_on_duration` - are False. Will ensure a permutation on files when going through the dataset. - In that case the epoch number must be provided in order for the model - to continue the permutation across epochs. In that case, it is assumed - that `num_samples = total_batch_size * num_updates_per_epoch`, with - `total_batch_size` the overall batch size accounting for all gpus. - """ - def __init__(self, - meta: tp.List[AudioMeta], - segment_duration: tp.Optional[float] = None, - shuffle: bool = True, - num_samples: int = 10_000, - sample_rate: int = 48_000, - channels: int = 2, - pad: bool = True, - sample_on_duration: bool = True, - sample_on_weight: bool = True, - min_segment_ratio: float = 0.5, - max_read_retry: int = 10, - return_info: bool = False, - min_audio_duration: tp.Optional[float] = None, - max_audio_duration: tp.Optional[float] = None, - shuffle_seed: int = 0, - load_wav: bool = True, - permutation_on_files: bool = False, - ): - assert len(meta) > 0, "No audio meta provided to AudioDataset. Please check loading of audio meta." - assert segment_duration is None or segment_duration > 0 - assert segment_duration is None or min_segment_ratio >= 0 - self.segment_duration = segment_duration - self.min_segment_ratio = min_segment_ratio - self.max_audio_duration = max_audio_duration - self.min_audio_duration = min_audio_duration - if self.min_audio_duration is not None and self.max_audio_duration is not None: - assert self.min_audio_duration <= self.max_audio_duration - self.meta: tp.List[AudioMeta] = self._filter_duration(meta) - assert len(self.meta) # Fail fast if all data has been filtered. - self.total_duration = sum(d.duration for d in self.meta) - - if segment_duration is None: - num_samples = len(self.meta) - self.num_samples = num_samples - self.shuffle = shuffle - self.sample_rate = sample_rate - self.channels = channels - self.pad = pad - self.sample_on_weight = sample_on_weight - self.sample_on_duration = sample_on_duration - self.sampling_probabilities = self._get_sampling_probabilities() - self.max_read_retry = max_read_retry - self.return_info = return_info - self.shuffle_seed = shuffle_seed - self.current_epoch: tp.Optional[int] = None - self.load_wav = load_wav - if not load_wav: - assert segment_duration is not None - self.permutation_on_files = permutation_on_files - if permutation_on_files: - assert not self.sample_on_duration - assert not self.sample_on_weight - assert self.shuffle - - def start_epoch(self, epoch: int): - self.current_epoch = epoch - - def __len__(self): - return self.num_samples - - def _get_sampling_probabilities(self, normalized: bool = True): - """Return the sampling probabilities for each file inside `self.meta`.""" - scores: tp.List[float] = [] - for file_meta in self.meta: - score = 1. - if self.sample_on_weight and file_meta.weight is not None: - score *= file_meta.weight - if self.sample_on_duration: - score *= file_meta.duration - scores.append(score) - probabilities = torch.tensor(scores) - if normalized: - probabilities /= probabilities.sum() - return probabilities - - @staticmethod - @lru_cache(16) - def _get_file_permutation(num_files: int, permutation_index: int, base_seed: int): - # Used to keep the most recent files permutation in memory implicitely. - # will work unless someone is using a lot of Datasets in parallel. - rng = torch.Generator() - rng.manual_seed(base_seed + permutation_index) - return torch.randperm(num_files, generator=rng) - - def sample_file(self, index: int, rng: torch.Generator) -> AudioMeta: - """Sample a given file from `self.meta`. Can be overridden in subclasses. - This is only called if `segment_duration` is not None. - - You must use the provided random number generator `rng` for reproducibility. - You can further make use of the index accessed. - """ - if self.permutation_on_files: - assert self.current_epoch is not None - total_index = self.current_epoch * len(self) + index - permutation_index = total_index // len(self.meta) - relative_index = total_index % len(self.meta) - permutation = AudioDataset._get_file_permutation( - len(self.meta), permutation_index, self.shuffle_seed) - file_index = permutation[relative_index] - return self.meta[file_index] - - if not self.sample_on_weight and not self.sample_on_duration: - file_index = int(torch.randint(len(self.sampling_probabilities), (1,), generator=rng).item()) - else: - file_index = int(torch.multinomial(self.sampling_probabilities, 1, generator=rng).item()) - - return self.meta[file_index] - - def _audio_read(self, path: str, seek_time: float = 0, duration: float = -1): - # Override this method in subclass if needed. - if self.load_wav: - return audio_read(path, seek_time, duration, pad=False) - else: - assert self.segment_duration is not None - n_frames = int(self.sample_rate * self.segment_duration) - return torch.zeros(self.channels, n_frames), self.sample_rate - - def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentInfo]]: - if self.segment_duration is None: - file_meta = self.meta[index] - out, sr = audio_read(file_meta.path) - out = convert_audio(out, sr, self.sample_rate, self.channels) - n_frames = out.shape[-1] - segment_info = SegmentInfo(file_meta, seek_time=0., n_frames=n_frames, total_frames=n_frames, - sample_rate=self.sample_rate, channels=out.shape[0]) - else: - rng = torch.Generator() - if self.shuffle: - # We use index, plus extra randomness, either totally random if we don't know the epoch. - # otherwise we make use of the epoch number and optional shuffle_seed. - if self.current_epoch is None: - rng.manual_seed(index + self.num_samples * random.randint(0, 2**24)) - else: - rng.manual_seed(index + self.num_samples * (self.current_epoch + self.shuffle_seed)) - else: - # We only use index - rng.manual_seed(index) - - for retry in range(self.max_read_retry): - file_meta = self.sample_file(index, rng) - # We add some variance in the file position even if audio file is smaller than segment - # without ending up with empty segments - max_seek = max(0, file_meta.duration - self.segment_duration * self.min_segment_ratio) - seek_time = torch.rand(1, generator=rng).item() * max_seek - try: - out, sr = audio_read(file_meta.path, seek_time, self.segment_duration, pad=False) - out = convert_audio(out, sr, self.sample_rate, self.channels) - n_frames = out.shape[-1] - target_frames = int(self.segment_duration * self.sample_rate) - if self.pad: - out = F.pad(out, (0, target_frames - n_frames)) - segment_info = SegmentInfo(file_meta, seek_time, n_frames=n_frames, total_frames=target_frames, - sample_rate=self.sample_rate, channels=out.shape[0]) - except Exception as exc: - logger.warning("Error opening file %s: %r", file_meta.path, exc) - if retry == self.max_read_retry - 1: - raise - else: - break - - if self.return_info: - # Returns the wav and additional information on the wave segment - return out, segment_info - else: - return out - - def collater(self, samples): - """The collater function has to be provided to the dataloader - if AudioDataset has return_info=True in order to properly collate - the samples of a batch. - """ - if self.segment_duration is None and len(samples) > 1: - assert self.pad, "Must allow padding when batching examples of different durations." - - # In this case the audio reaching the collater is of variable length as segment_duration=None. - to_pad = self.segment_duration is None and self.pad - if to_pad: - max_len = max([wav.shape[-1] for wav, _ in samples]) - - def _pad_wav(wav): - return F.pad(wav, (0, max_len - wav.shape[-1])) - - if self.return_info: - if len(samples) > 0: - assert len(samples[0]) == 2 - assert isinstance(samples[0][0], torch.Tensor) - assert isinstance(samples[0][1], SegmentInfo) - - wavs = [wav for wav, _ in samples] - segment_infos = [copy.deepcopy(info) for _, info in samples] - - if to_pad: - # Each wav could be of a different duration as they are not segmented. - for i in range(len(samples)): - # Determines the total length of the signal with padding, so we update here as we pad. - segment_infos[i].total_frames = max_len - wavs[i] = _pad_wav(wavs[i]) - - wav = torch.stack(wavs) - return wav, segment_infos - else: - assert isinstance(samples[0], torch.Tensor) - if to_pad: - samples = [_pad_wav(s) for s in samples] - return torch.stack(samples) - - def _filter_duration(self, meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]: - """Filters out audio files with audio durations that will not allow to sample examples from them.""" - orig_len = len(meta) - - # Filter data that is too short. - if self.min_audio_duration is not None: - meta = [m for m in meta if m.duration >= self.min_audio_duration] - - # Filter data that is too long. - if self.max_audio_duration is not None: - meta = [m for m in meta if m.duration <= self.max_audio_duration] - - filtered_len = len(meta) - removed_percentage = 100*(1-float(filtered_len)/orig_len) - msg = 'Removed %.2f percent of the data because it was too short or too long.' % removed_percentage - if removed_percentage < 10: - logging.debug(msg) - else: - logging.warning(msg) - return meta - - @classmethod - def from_meta(cls, root: tp.Union[str, Path], **kwargs): - """Instantiate AudioDataset from a path to a directory containing a manifest as a jsonl file. - - Args: - root (str or Path): Path to root folder containing audio files. - kwargs: Additional keyword arguments for the AudioDataset. - """ - root = Path(root) - if root.is_dir(): - if (root / 'data.jsonl').exists(): - root = root / 'data.jsonl' - elif (root / 'data.jsonl.gz').exists(): - root = root / 'data.jsonl.gz' - else: - raise ValueError("Don't know where to read metadata from in the dir. " - "Expecting either a data.jsonl or data.jsonl.gz file but none found.") - meta = load_audio_meta(root) - return cls(meta, **kwargs) - - @classmethod - def from_path(cls, root: tp.Union[str, Path], minimal_meta: bool = True, - exts: tp.List[str] = DEFAULT_EXTS, **kwargs): - """Instantiate AudioDataset from a path containing (possibly nested) audio files. - - Args: - root (str or Path): Path to root folder containing audio files. - minimal_meta (bool): Whether to only load minimal metadata or not. - exts (list of str): Extensions for audio files. - kwargs: Additional keyword arguments for the AudioDataset. - """ - root = Path(root) - if root.is_file(): - meta = load_audio_meta(root, resolve=True) - else: - meta = find_audio_files(root, exts, minimal=minimal_meta, resolve=True) - return cls(meta, **kwargs) - - -def main(): - logging.basicConfig(stream=sys.stderr, level=logging.INFO) - parser = argparse.ArgumentParser( - prog='audio_dataset', - description='Generate .jsonl files by scanning a folder.') - parser.add_argument('root', help='Root folder with all the audio files') - parser.add_argument('output_meta_file', - help='Output file to store the metadata, ') - parser.add_argument('--complete', - action='store_false', dest='minimal', default=True, - help='Retrieve all metadata, even the one that are expansive ' - 'to compute (e.g. normalization).') - parser.add_argument('--resolve', - action='store_true', default=False, - help='Resolve the paths to be absolute and with no symlinks.') - parser.add_argument('--workers', - default=10, type=int, - help='Number of workers.') - args = parser.parse_args() - meta = find_audio_files(args.root, DEFAULT_EXTS, progress=True, - resolve=args.resolve, minimal=args.minimal, workers=args.workers) - save_audio_meta(args.output_meta_file, meta) - - -if __name__ == '__main__': - main() diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_text_joint_to_text/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_text_joint_to_text/__init__.py deleted file mode 100644 index 239d2e69f9a235095dee1ea7b3a94164a77273f5..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_text_joint_to_text/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from . import tasks, criterions, models # noqa diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/data/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/data/__init__.py deleted file mode 100644 index d0545627efc9a6f9bb180e351ead519a2cb6dea7..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/data/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .extracted_features_dataset import ExtractedFeaturesDataset -from .random_input_dataset import RandomInputDataset - - -__all__ = [ - "ExtractedFeaturesDataset", - "RandomInputDataset", -] diff --git a/spaces/Harshveer/Finetuned_Diffusion_Max/README.md b/spaces/Harshveer/Finetuned_Diffusion_Max/README.md deleted file mode 100644 index 6abd059c49a7dcb579ebf390c2f689c991edd06c..0000000000000000000000000000000000000000 --- a/spaces/Harshveer/Finetuned_Diffusion_Max/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Finetuned Diffusion -emoji: 🪄🖼️ -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: true -license: mit -duplicated_from: ivanmeyer/Finetuned_Diffusion_Max ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/src/glow_tts/stft.py b/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/src/glow_tts/stft.py deleted file mode 100644 index 5852bd20904c9c206030523737ce3fbd64300a0c..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/src/glow_tts/stft.py +++ /dev/null @@ -1,185 +0,0 @@ -""" -BSD 3-Clause License - -Copyright (c) 2017, Prem Seetharaman -All rights reserved. - -* Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -""" - -import torch -import numpy as np -import torch.nn.functional as F -from torch.autograd import Variable -from scipy.signal import get_window -from librosa.util import pad_center, tiny -from librosa import stft, istft -from audio_processing import window_sumsquare - - -class STFT(torch.nn.Module): - """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft""" - - def __init__( - self, filter_length=800, hop_length=200, win_length=800, window="hann" - ): - super(STFT, self).__init__() - self.filter_length = filter_length - self.hop_length = hop_length - self.win_length = win_length - self.window = window - self.forward_transform = None - scale = self.filter_length / self.hop_length - fourier_basis = np.fft.fft(np.eye(self.filter_length)) - - cutoff = int((self.filter_length / 2 + 1)) - fourier_basis = np.vstack( - [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] - ) - - forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) - inverse_basis = torch.FloatTensor( - np.linalg.pinv(scale * fourier_basis).T[:, None, :] - ) - - if window is not None: - assert filter_length >= win_length - # get window and zero center pad it to filter_length - fft_window = get_window(window, win_length, fftbins=True) - fft_window = pad_center(fft_window, filter_length) - fft_window = torch.from_numpy(fft_window).float() - - # window the bases - forward_basis *= fft_window - inverse_basis *= fft_window - - self.register_buffer("forward_basis", forward_basis.float()) - self.register_buffer("inverse_basis", inverse_basis.float()) - - def transform(self, input_data): - num_batches = input_data.size(0) - num_samples = input_data.size(1) - - self.num_samples = num_samples - - if input_data.device.type == "cuda": - # similar to librosa, reflect-pad the input - input_data = input_data.view(num_batches, 1, num_samples) - input_data = F.pad( - input_data.unsqueeze(1), - (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0), - mode="reflect", - ) - input_data = input_data.squeeze(1) - - forward_transform = F.conv1d( - input_data, self.forward_basis, stride=self.hop_length, padding=0 - ) - - cutoff = int((self.filter_length / 2) + 1) - real_part = forward_transform[:, :cutoff, :] - imag_part = forward_transform[:, cutoff:, :] - else: - x = input_data.detach().numpy() - real_part = [] - imag_part = [] - for y in x: - y_ = stft( - y, self.filter_length, self.hop_length, self.win_length, self.window - ) - real_part.append(y_.real[None, :, :]) - imag_part.append(y_.imag[None, :, :]) - real_part = np.concatenate(real_part, 0) - imag_part = np.concatenate(imag_part, 0) - - real_part = torch.from_numpy(real_part).to(input_data.dtype) - imag_part = torch.from_numpy(imag_part).to(input_data.dtype) - - magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2) - phase = torch.atan2(imag_part.data, real_part.data) - - return magnitude, phase - - def inverse(self, magnitude, phase): - recombine_magnitude_phase = torch.cat( - [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 - ) - - if magnitude.device.type == "cuda": - inverse_transform = F.conv_transpose1d( - recombine_magnitude_phase, - self.inverse_basis, - stride=self.hop_length, - padding=0, - ) - - if self.window is not None: - window_sum = window_sumsquare( - self.window, - magnitude.size(-1), - hop_length=self.hop_length, - win_length=self.win_length, - n_fft=self.filter_length, - dtype=np.float32, - ) - # remove modulation effects - approx_nonzero_indices = torch.from_numpy( - np.where(window_sum > tiny(window_sum))[0] - ) - window_sum = torch.from_numpy(window_sum).to(inverse_transform.device) - inverse_transform[:, :, approx_nonzero_indices] /= window_sum[ - approx_nonzero_indices - ] - - # scale by hop ratio - inverse_transform *= float(self.filter_length) / self.hop_length - - inverse_transform = inverse_transform[:, :, int(self.filter_length / 2) :] - inverse_transform = inverse_transform[ - :, :, : -int(self.filter_length / 2) : - ] - inverse_transform = inverse_transform.squeeze(1) - else: - x_org = recombine_magnitude_phase.detach().numpy() - n_b, n_f, n_t = x_org.shape - x = np.empty([n_b, n_f // 2, n_t], dtype=np.complex64) - x.real = x_org[:, : n_f // 2] - x.imag = x_org[:, n_f // 2 :] - inverse_transform = [] - for y in x: - y_ = istft(y, self.hop_length, self.win_length, self.window) - inverse_transform.append(y_[None, :]) - inverse_transform = np.concatenate(inverse_transform, 0) - inverse_transform = torch.from_numpy(inverse_transform).to( - recombine_magnitude_phase.dtype - ) - - return inverse_transform - - def forward(self, input_data): - self.magnitude, self.phase = self.transform(input_data) - reconstruction = self.inverse(self.magnitude, self.phase) - return reconstruction diff --git a/spaces/Hina4867/bingo/src/components/button-scroll-to-bottom.tsx b/spaces/Hina4867/bingo/src/components/button-scroll-to-bottom.tsx deleted file mode 100644 index b68ab9c0e48320c356e51a52d11b9ca63909e6c5..0000000000000000000000000000000000000000 --- a/spaces/Hina4867/bingo/src/components/button-scroll-to-bottom.tsx +++ /dev/null @@ -1,34 +0,0 @@ -'use client' - -import * as React from 'react' - -import { cn } from '@/lib/utils' -import { useAtBottom } from '@/lib/hooks/use-at-bottom' -import { Button, type ButtonProps } from '@/components/ui/button' -import { IconArrowDown } from '@/components/ui/icons' - -export function ButtonScrollToBottom({ className, ...props }: ButtonProps) { - const isAtBottom = useAtBottom() - - return ( - - ) -} diff --git a/spaces/HumanDesignHub/Ra-Diffusion_v.1/README.md b/spaces/HumanDesignHub/Ra-Diffusion_v.1/README.md deleted file mode 100644 index 20c6281318c1403430de84aac307e3b747c52722..0000000000000000000000000000000000000000 --- a/spaces/HumanDesignHub/Ra-Diffusion_v.1/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Ra Diffusion -emoji: 📚 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/get_vctk_audio_manifest.py b/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/get_vctk_audio_manifest.py deleted file mode 100644 index 7afa40fcd195465a225c9f251734e84fe6b3c7ef..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/get_vctk_audio_manifest.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import logging -import numpy as np -import re -from pathlib import Path -from collections import defaultdict - -import pandas as pd -from torchaudio.datasets import VCTK -from tqdm import tqdm - -from examples.speech_to_text.data_utils import save_df_to_tsv - - -log = logging.getLogger(__name__) - -SPLITS = ["train", "dev", "test"] - - -def normalize_text(text): - return re.sub(r"[^a-zA-Z.?!,'\- ]", '', text) - - -def process(args): - out_root = Path(args.output_data_root).absolute() - out_root.mkdir(parents=True, exist_ok=True) - - # Generate TSV manifest - print("Generating manifest...") - dataset = VCTK(out_root.as_posix(), download=False) - ids = list(dataset._walker) - np.random.seed(args.seed) - np.random.shuffle(ids) - n_train = len(ids) - args.n_dev - args.n_test - _split = ["train"] * n_train + ["dev"] * args.n_dev + ["test"] * args.n_test - id_to_split = dict(zip(ids, _split)) - manifest_by_split = {split: defaultdict(list) for split in SPLITS} - progress = tqdm(enumerate(dataset), total=len(dataset)) - for i, (waveform, _, text, speaker_id, _) in progress: - sample_id = dataset._walker[i] - _split = id_to_split[sample_id] - audio_dir = Path(dataset._path) / dataset._folder_audio / speaker_id - audio_path = audio_dir / f"{sample_id}.wav" - text = normalize_text(text) - manifest_by_split[_split]["id"].append(sample_id) - manifest_by_split[_split]["audio"].append(audio_path.as_posix()) - manifest_by_split[_split]["n_frames"].append(len(waveform[0])) - manifest_by_split[_split]["tgt_text"].append(text) - manifest_by_split[_split]["speaker"].append(speaker_id) - manifest_by_split[_split]["src_text"].append(text) - - manifest_root = Path(args.output_manifest_root).absolute() - manifest_root.mkdir(parents=True, exist_ok=True) - for _split in SPLITS: - save_df_to_tsv( - pd.DataFrame.from_dict(manifest_by_split[_split]), - manifest_root / f"{_split}.audio.tsv" - ) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--output-data-root", "-d", required=True, type=str) - parser.add_argument("--output-manifest-root", "-m", required=True, type=str) - parser.add_argument("--n-dev", default=50, type=int) - parser.add_argument("--n-test", default=100, type=int) - parser.add_argument("--seed", "-s", default=1234, type=int) - args = parser.parse_args() - - process(args) - - -if __name__ == "__main__": - main() diff --git a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/cmd.sh b/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/cmd.sh deleted file mode 100644 index e74953194d41f0d93855d41b2acef08556d92477..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/cmd.sh +++ /dev/null @@ -1,15 +0,0 @@ -# you can change cmd.sh depending on what type of queue you are using. -# If you have no queueing system and want to run on a local machine, you -# can change all instances 'queue.pl' to run.pl (but be careful and run -# commands one by one: most recipes will exhaust the memory on your -# machine). queue.pl works with GridEngine (qsub). slurm.pl works -# with slurm. Different queues are configured differently, with different -# queue names and different ways of specifying things like memory; -# to account for these differences you can create and edit the file -# conf/queue.conf to match your queue's configuration. Search for -# conf/queue.conf in http://kaldi-asr.org/doc/queue.html for more information, -# or search for the string 'default_config' in utils/queue.pl or utils/slurm.pl. - -export train_cmd="run.pl --mem 2G" -export decode_cmd="run.pl --mem 4G" -export mkgraph_cmd="run.pl --mem 8G" diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/lightconv_layer/lightconv_cuda.cpp b/spaces/ICML2022/OFA/fairseq/fairseq/modules/lightconv_layer/lightconv_cuda.cpp deleted file mode 100644 index ece47a8d908b93cec102743070c9057986d39d3f..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/lightconv_layer/lightconv_cuda.cpp +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include - -std::vector -lightconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l); - -std::vector lightconv_cuda_backward( - at::Tensor gradOutput, - int padding_l, - at::Tensor input, - at::Tensor filters); - -#define CHECK_CUDA(x) \ - AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) \ - AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) \ - CHECK_CUDA(x); \ - CHECK_CONTIGUOUS(x) - -std::vector -lightconv_forward(at::Tensor input, at::Tensor filters, int padding_l) { - CHECK_INPUT(input); - CHECK_INPUT(filters); - - return lightconv_cuda_forward(input, filters, padding_l); -} - -std::vector lightconv_backward( - at::Tensor gradOutput, - int padding_l, - at::Tensor input, - at::Tensor filters) { - CHECK_INPUT(gradOutput); - CHECK_INPUT(input); - CHECK_INPUT(filters); - - return lightconv_cuda_backward(gradOutput, padding_l, input, filters); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("forward", &lightconv_forward, "lighconv forward (CUDA)"); - m.def("backward", &lightconv_backward, "lighconv backward (CUDA)"); -} diff --git a/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/models.py b/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/models.py deleted file mode 100644 index 13278d680493970f5a670cf3fc955a6e9b7ab1d5..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/models.py +++ /dev/null @@ -1,420 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import modules.attentions as attentions -import modules.commons as commons -import modules.modules as modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -import utils -from modules.commons import init_weights, get_padding -from vdecoder.hifigan.models import Generator -from utils import f0_to_coarse - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - # print(x.shape,x_lengths.shape) - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - out_channels, - hidden_channels, - kernel_size, - n_layers, - gin_channels=0, - filter_channels=None, - n_heads=None, - p_dropout=None): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.gin_channels = gin_channels - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - self.f0_emb = nn.Embedding(256, hidden_channels) - - self.enc_ = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - - def forward(self, x, x_mask, f0=None, noice_scale=1): - x = x + self.f0_emb(f0).transpose(1,2) - x = self.enc_(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs) * noice_scale) * x_mask - - return z, m, logs, x_mask - - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SpeakerEncoder(torch.nn.Module): - def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256): - super(SpeakerEncoder, self).__init__() - self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) - self.linear = nn.Linear(model_hidden_size, model_embedding_size) - self.relu = nn.ReLU() - - def forward(self, mels): - self.lstm.flatten_parameters() - _, (hidden, _) = self.lstm(mels) - embeds_raw = self.relu(self.linear(hidden[-1])) - return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) - - def compute_partial_slices(self, total_frames, partial_frames, partial_hop): - mel_slices = [] - for i in range(0, total_frames-partial_frames, partial_hop): - mel_range = torch.arange(i, i+partial_frames) - mel_slices.append(mel_range) - - return mel_slices - - def embed_utterance(self, mel, partial_frames=128, partial_hop=64): - mel_len = mel.size(1) - last_mel = mel[:,-partial_frames:] - - if mel_len > partial_frames: - mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop) - mels = list(mel[:,s] for s in mel_slices) - mels.append(last_mel) - mels = torch.stack(tuple(mels), 0).squeeze(1) - - with torch.no_grad(): - partial_embeds = self(mels) - embed = torch.mean(partial_embeds, axis=0).unsqueeze(0) - #embed = embed / torch.linalg.norm(embed, 2) - else: - with torch.no_grad(): - embed = self(last_mel) - - return embed - -class F0Decoder(nn.Module): - def __init__(self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=0): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.spk_channels = spk_channels - - self.prenet = nn.Conv1d(hidden_channels, hidden_channels, 3, padding=1) - self.decoder = attentions.FFT( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.f0_prenet = nn.Conv1d(1, hidden_channels , 3, padding=1) - self.cond = nn.Conv1d(spk_channels, hidden_channels, 1) - - def forward(self, x, norm_f0, x_mask, spk_emb=None): - x = torch.detach(x) - if (spk_emb is not None): - x = x + self.cond(spk_emb) - x += self.f0_prenet(norm_f0) - x = self.prenet(x) * x_mask - x = self.decoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - n_speakers, - sampling_rate=44100, - **kwargs): - - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - self.pre = nn.Conv1d(ssl_dim, hidden_channels, kernel_size=5, padding=2) - - self.enc_p = TextEncoder( - inter_channels, - hidden_channels, - filter_channels=filter_channels, - n_heads=n_heads, - n_layers=n_layers, - kernel_size=kernel_size, - p_dropout=p_dropout - ) - hps = { - "sampling_rate": sampling_rate, - "inter_channels": inter_channels, - "resblock": resblock, - "resblock_kernel_sizes": resblock_kernel_sizes, - "resblock_dilation_sizes": resblock_dilation_sizes, - "upsample_rates": upsample_rates, - "upsample_initial_channel": upsample_initial_channel, - "upsample_kernel_sizes": upsample_kernel_sizes, - "gin_channels": gin_channels, - } - self.dec = Generator(h=hps) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - self.f0_decoder = F0Decoder( - 1, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=gin_channels - ) - self.emb_uv = nn.Embedding(2, hidden_channels) - - def forward(self, c, f0, uv, spec, g=None, c_lengths=None, spec_lengths=None): - g = self.emb_g(g).transpose(1,2) - # ssl prenet - x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype) - x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1,2) - - # f0 predict - lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500 - norm_lf0 = utils.normalize_f0(lf0, x_mask, uv) - pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g) - - # encoder - z_ptemp, m_p, logs_p, _ = self.enc_p(x, x_mask, f0=f0_to_coarse(f0)) - z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g) - - # flow - z_p = self.flow(z, spec_mask, g=g) - z_slice, pitch_slice, ids_slice = commons.rand_slice_segments_with_pitch(z, f0, spec_lengths, self.segment_size) - - # nsf decoder - o = self.dec(z_slice, g=g, f0=pitch_slice) - - return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q), pred_lf0, norm_lf0, lf0 - - def infer(self, c, f0, uv, g=None, noice_scale=0.35, predict_f0=False): - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - g = self.emb_g(g).transpose(1,2) - x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype) - x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1,2) - - if predict_f0: - lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500 - norm_lf0 = utils.normalize_f0(lf0, x_mask, uv, random_scale=False) - pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g) - f0 = (700 * (torch.pow(10, pred_lf0 * 500 / 2595) - 1)).squeeze(1) - - z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), noice_scale=noice_scale) - z = self.flow(z_p, c_mask, g=g, reverse=True) - o = self.dec(z * c_mask, g=g, f0=f0) - return o diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/models/ade20k/segm_lib/utils/data/sampler.py b/spaces/InpaintAI/Inpaint-Anything/third_party/lama/models/ade20k/segm_lib/utils/data/sampler.py deleted file mode 100644 index 62a9a43bd1d4c21fbdcb262db7da8d4fe27b26de..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/models/ade20k/segm_lib/utils/data/sampler.py +++ /dev/null @@ -1,131 +0,0 @@ -import torch - - -class Sampler(object): - """Base class for all Samplers. - - Every Sampler subclass has to provide an __iter__ method, providing a way - to iterate over indices of dataset elements, and a __len__ method that - returns the length of the returned iterators. - """ - - def __init__(self, data_source): - pass - - def __iter__(self): - raise NotImplementedError - - def __len__(self): - raise NotImplementedError - - -class SequentialSampler(Sampler): - """Samples elements sequentially, always in the same order. - - Arguments: - data_source (Dataset): dataset to sample from - """ - - def __init__(self, data_source): - self.data_source = data_source - - def __iter__(self): - return iter(range(len(self.data_source))) - - def __len__(self): - return len(self.data_source) - - -class RandomSampler(Sampler): - """Samples elements randomly, without replacement. - - Arguments: - data_source (Dataset): dataset to sample from - """ - - def __init__(self, data_source): - self.data_source = data_source - - def __iter__(self): - return iter(torch.randperm(len(self.data_source)).long()) - - def __len__(self): - return len(self.data_source) - - -class SubsetRandomSampler(Sampler): - """Samples elements randomly from a given list of indices, without replacement. - - Arguments: - indices (list): a list of indices - """ - - def __init__(self, indices): - self.indices = indices - - def __iter__(self): - return (self.indices[i] for i in torch.randperm(len(self.indices))) - - def __len__(self): - return len(self.indices) - - -class WeightedRandomSampler(Sampler): - """Samples elements from [0,..,len(weights)-1] with given probabilities (weights). - - Arguments: - weights (list) : a list of weights, not necessary summing up to one - num_samples (int): number of samples to draw - replacement (bool): if ``True``, samples are drawn with replacement. - If not, they are drawn without replacement, which means that when a - sample index is drawn for a row, it cannot be drawn again for that row. - """ - - def __init__(self, weights, num_samples, replacement=True): - self.weights = torch.DoubleTensor(weights) - self.num_samples = num_samples - self.replacement = replacement - - def __iter__(self): - return iter(torch.multinomial(self.weights, self.num_samples, self.replacement)) - - def __len__(self): - return self.num_samples - - -class BatchSampler(object): - """Wraps another sampler to yield a mini-batch of indices. - - Args: - sampler (Sampler): Base sampler. - batch_size (int): Size of mini-batch. - drop_last (bool): If ``True``, the sampler will drop the last batch if - its size would be less than ``batch_size`` - - Example: - >>> list(BatchSampler(range(10), batch_size=3, drop_last=False)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] - >>> list(BatchSampler(range(10), batch_size=3, drop_last=True)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8]] - """ - - def __init__(self, sampler, batch_size, drop_last): - self.sampler = sampler - self.batch_size = batch_size - self.drop_last = drop_last - - def __iter__(self): - batch = [] - for idx in self.sampler: - batch.append(idx) - if len(batch) == self.batch_size: - yield batch - batch = [] - if len(batch) > 0 and not self.drop_last: - yield batch - - def __len__(self): - if self.drop_last: - return len(self.sampler) // self.batch_size - else: - return (len(self.sampler) + self.batch_size - 1) // self.batch_size diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_heun_discrete.py b/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_heun_discrete.py deleted file mode 100644 index 4f40a24050b4604fd7b6af224bca4f65b075342d..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_heun_discrete.py +++ /dev/null @@ -1,264 +0,0 @@ -# Copyright 2022 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import List, Optional, Tuple, Union - -import numpy as np -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS -from .scheduling_utils import SchedulerMixin, SchedulerOutput - - -class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): - """ - Implements Algorithm 2 (Heun steps) from Karras et al. (2022). for discrete beta schedules. Based on the original - k-diffusion implementation by Katherine Crowson: - https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L90 - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the - starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear` or `scaled_linear`. - trained_betas (`np.ndarray`, optional): - option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. - options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, - `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. - prediction_type (`str`, default `epsilon`, optional): - prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion - process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 - https://imagen.research.google/video/paper.pdf) - """ - - _compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy() - order = 2 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.00085, # sensible defaults - beta_end: float = 0.012, - beta_schedule: str = "linear", - trained_betas: Optional[Union[np.ndarray, List[float]]] = None, - prediction_type: str = "epsilon", - ): - if trained_betas is not None: - self.betas = torch.tensor(trained_betas, dtype=torch.float32) - elif beta_schedule == "linear": - self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) - elif beta_schedule == "scaled_linear": - # this schedule is very specific to the latent diffusion model. - self.betas = ( - torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 - ) - else: - raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") - - self.alphas = 1.0 - self.betas - self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - - # set all values - self.set_timesteps(num_train_timesteps, None, num_train_timesteps) - - def index_for_timestep(self, timestep): - indices = (self.timesteps == timestep).nonzero() - if self.state_in_first_order: - pos = -1 - else: - pos = 0 - return indices[pos].item() - - def scale_model_input( - self, - sample: torch.FloatTensor, - timestep: Union[float, torch.FloatTensor], - ) -> torch.FloatTensor: - """ - Args: - Ensures interchangeability with schedulers that need to scale the denoising model input depending on the - current timestep. - sample (`torch.FloatTensor`): input sample timestep (`int`, optional): current timestep - Returns: - `torch.FloatTensor`: scaled input sample - """ - step_index = self.index_for_timestep(timestep) - - sigma = self.sigmas[step_index] - sample = sample / ((sigma**2 + 1) ** 0.5) - return sample - - def set_timesteps( - self, - num_inference_steps: int, - device: Union[str, torch.device] = None, - num_train_timesteps: Optional[int] = None, - ): - """ - Sets the timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - device (`str` or `torch.device`, optional): - the device to which the timesteps should be moved to. If `None`, the timesteps are not moved. - """ - self.num_inference_steps = num_inference_steps - - num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps - - timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy() - - sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) - sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) - sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) - sigmas = torch.from_numpy(sigmas).to(device=device) - self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]]) - - # standard deviation of the initial noise distribution - self.init_noise_sigma = self.sigmas.max() - - timesteps = torch.from_numpy(timesteps) - timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)]) - - if str(device).startswith("mps"): - # mps does not support float64 - self.timesteps = timesteps.to(device, dtype=torch.float32) - else: - self.timesteps = timesteps.to(device=device) - - # empty dt and derivative - self.prev_derivative = None - self.dt = None - - @property - def state_in_first_order(self): - return self.dt is None - - def step( - self, - model_output: Union[torch.FloatTensor, np.ndarray], - timestep: Union[float, torch.FloatTensor], - sample: Union[torch.FloatTensor, np.ndarray], - return_dict: bool = True, - ) -> Union[SchedulerOutput, Tuple]: - """ - Args: - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. timestep - (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor` or `np.ndarray`): - current instance of sample being created by diffusion process. - return_dict (`bool`): option for returning tuple rather than SchedulerOutput class - Returns: - [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: - [`~schedulers.scheduling_utils.SchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When - returning a tuple, the first element is the sample tensor. - """ - step_index = self.index_for_timestep(timestep) - - if self.state_in_first_order: - sigma = self.sigmas[step_index] - sigma_next = self.sigmas[step_index + 1] - else: - # 2nd order / Heun's method - sigma = self.sigmas[step_index - 1] - sigma_next = self.sigmas[step_index] - - # currently only gamma=0 is supported. This usually works best anyways. - # We can support gamma in the future but then need to scale the timestep before - # passing it to the model which requires a change in API - gamma = 0 - sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now - - # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise - if self.config.prediction_type == "epsilon": - sigma_input = sigma_hat if self.state_in_first_order else sigma_next - pred_original_sample = sample - sigma_input * model_output - elif self.config.prediction_type == "v_prediction": - sigma_input = sigma_hat if self.state_in_first_order else sigma_next - pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( - sample / (sigma_input**2 + 1) - ) - else: - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" - ) - - if self.state_in_first_order: - # 2. Convert to an ODE derivative for 1st order - derivative = (sample - pred_original_sample) / sigma_hat - # 3. delta timestep - dt = sigma_next - sigma_hat - - # store for 2nd order step - self.prev_derivative = derivative - self.dt = dt - self.sample = sample - else: - # 2. 2nd order / Heun's method - derivative = (sample - pred_original_sample) / sigma_next - derivative = (self.prev_derivative + derivative) / 2 - - # 3. take prev timestep & sample - dt = self.dt - sample = self.sample - - # free dt and derivative - # Note, this puts the scheduler in "first order mode" - self.prev_derivative = None - self.dt = None - self.sample = None - - prev_sample = sample + derivative * dt - - if not return_dict: - return (prev_sample,) - - return SchedulerOutput(prev_sample=prev_sample) - - def add_noise( - self, - original_samples: torch.FloatTensor, - noise: torch.FloatTensor, - timesteps: torch.FloatTensor, - ) -> torch.FloatTensor: - # Make sure sigmas and timesteps have the same device and dtype as original_samples - self.sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) - if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): - # mps does not support float64 - self.timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) - timesteps = timesteps.to(original_samples.device, dtype=torch.float32) - else: - self.timesteps = self.timesteps.to(original_samples.device) - timesteps = timesteps.to(original_samples.device) - - step_indices = [self.index_for_timestep(t) for t in timesteps] - - sigma = self.sigmas[step_indices].flatten() - while len(sigma.shape) < len(original_samples.shape): - sigma = sigma.unsqueeze(-1) - - noisy_samples = original_samples + noise * sigma - return noisy_samples - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/Jean-Baptiste/email_parser/email_parser/nlp.py b/spaces/Jean-Baptiste/email_parser/email_parser/nlp.py deleted file mode 100644 index 91ce2d5b0bd5f5ba5f4104fc631b418d4a52c0e6..0000000000000000000000000000000000000000 --- a/spaces/Jean-Baptiste/email_parser/email_parser/nlp.py +++ /dev/null @@ -1,322 +0,0 @@ -import logging -import os -import regex -from transformers import AutoModelForTokenClassification, AutoTokenizer, pipeline -import pandas as pd -import numpy as np - -from . import utils, _models_signatures -from .utils import timing -from langid.langid import LanguageIdentifier -from langid.langid import model as model_langid - -# Creating language_identifier object for usage in function f_detect_language -language_identifier = LanguageIdentifier.from_modelstring(model_langid, norm_probs=True) -language_identifier.set_languages(['en', 'fr']) - - -logging.info(f"Reading config file from folder:{os.path.join(os.path.dirname(__file__))}") - -config = utils.f_read_config(os.path.join(os.path.dirname(__file__), 'config.ini')) - -device = int(config["DEFAULT"]["device"]) -default_lang = config["DEFAULT"]["default_lang"] - -tokenizer_dict = {} -models_dict = {} -nlp_dict = {} - - -dict_regex_pattern = dict(EMAIL=r'[\p{L}\p{M}\-\d._]{1,}@[\p{L}\p{M}\d\-_]{1,}(\.[\p{L}\p{M}]{1,}){1,}', - TEL=r'(? 0: - if df_result is None or len(df_result) == 0: - return pd.DataFrame(list_result_new, - columns=list_columns_names) - list_row = [] - for row in list_result_new: - df_intersect = df_result.query("({1}>=start and {0}<=end)".format(row[2], row[3])) - if len(df_intersect) == 0: - list_row.append(row) - df_final = pd.concat([df_result, - pd.DataFrame(list_row, - columns=list_columns_names)], - ignore_index=True) \ - .sort_values(by="start") - return df_final - else: - # If list_result_new was empty we just return df_result - return df_result - - -@timing -def f_detect_language(text, default=default_lang): - """ Detect language - - Args: - text: text on which language should be detected - default: default value if there is an error or score of predicted value is to low (default nlp.default_lang) - - Returns: - "fr" or "en" - - """ - lang = default - try: - if text.strip() != "": - lang, score = language_identifier.classify(text.strip().replace("\n"," ").lower()) - # If scroe is not high enough we will take default value instead - if score < 0.8: - lang = default_lang - except Exception as e: - logging.error("following error occurs when trying to detect language: {}".format(e)) - finally: - return lang - -@timing -def f_find_regex_pattern(text, type_, pattern): - """ Find all occurences of a pattern in a text and return a list of results - Args: - text: the text to be analyzed - type_: the entity type (value is added in result) - pattern: regex pattern to be found - - Returns: - A list containing type, matched value, position start and end of each result - - """ - list_result = [] - results = regex.finditer(pattern, text, flags=regex.IGNORECASE) - for match in results: - value = match.string[match.start(): match.end()].replace("\n", " ").strip() - list_result.append([type_, - value, - match.start(), - match.end(), - 1]) - return list_result - - -@timing -def f_ner_regex(text, dict_pattern=dict_regex_pattern, - df_result=pd.DataFrame()): - """Run a series of regex expression to detect email, tel and postal codes in a full text. - - Args: - text: the text to be analyzed - dict_pattern: dictionary of regex expression to be ran successively (default nlp.dict_regex_pattern) - df_result: results of this function will be merged with values provided here. - If value is already found at an overlapping position in df_results, the existing value will be kept - - Returns: - Dataframe containing results merged with provided argument df_result (if any) - """ - logging.debug("Starting regex") - list_result = [] - - # we run f_find_regex_pattern for each pattern in dict_regex - for type_, pattern in dict_pattern.items(): - result = f_find_regex_pattern(text, type_, pattern) - if len(result) != 0: - list_result += result - - df_result = f_concat_results(df_result, list_result) - return df_result - -@timing -def f_split_text_by_lines(text, position_offset=0): - """ - :param text: text that should be split - :return: list containing for each line: [position start, position end, sentence] - """ - results = [] - # iter_lines = regex.finditer(".*(?=\n|$)", text) - iter_lines = regex.finditer("[^>\n]((.*?([!?.>] ){1,})|.*(?=\n|$))", text) - for line_match in iter_lines: - start_line = line_match.start() - end_line = line_match.end() - line = line_match.group() - if len(line.strip()) > 1: - results.append([start_line + position_offset, end_line + position_offset, line]) - return results - - -def f_detect_email_signature(text, df_ner=None, cut_off_score=0.6, lang=default_lang): - # with tf.device("/cpu:0"): - if text.strip() == "": - return None - if df_ner is None: - df_ner = f_ner(text, lang=lang) - - df_features = _models_signatures.f_create_email_lines_features(text, df_ner=df_ner) - - if len(df_features)==0: - return None - - # We add dummy value for signature in order to use same function than for training of the model - df_features["is_signature"] = -2 - - x, y_out, y_mask, _, _ = _models_signatures.generate_x_y(df_features, _models_signatures.minmax_scaler, - _models_signatures.standard_scaler) - - y_predict = _models_signatures.model.predict(x) - y_predict_value = (y_predict[y_mask != -1]> cut_off_score).reshape([-1]) - y_predict_value = np.pad(y_predict_value, (len(df_features) - len(y_predict_value), 0), constant_values=0)[ - -len(df_features):] - y_predict_score = y_predict[y_mask != -1].reshape([-1]) - y_predict_score = np.pad(y_predict_score, (len(df_features) - len(y_predict_score), 0), constant_values=1)[ - -len(df_features):] - - # return(y_predict, y_mask) - df_features["prediction"] = y_predict_value - df_features["score"] = y_predict_score - # return df_features - series_position_body = df_features.query(f"""prediction==0""")['end'] - if len(series_position_body) > 0: - body_end_pos = max(series_position_body) - else: - # In this case everything was detected as a signature - body_end_pos = 0 - score = df_features.query(f"""prediction==1""")["score"].mean() - signature_text = text[body_end_pos:].strip().replace("\n", " ") - if signature_text != "": - list_result = [ - # ["body", text[:body_end_pos], 0 + pos_start_email, body_end_pos + pos_start_email, 1, ""], - ["SIGNATURE", signature_text, body_end_pos, len(text), score]] - - df_result = f_concat_results(pd.DataFrame(), list_result) - else: - df_result = None - - return df_result - - diff --git a/spaces/Jimpa666/AI-PadelCoach/appv2.py b/spaces/Jimpa666/AI-PadelCoach/appv2.py deleted file mode 100644 index e47edfc25f9fe3ba27c0a6be6a3f554fa1d15b6c..0000000000000000000000000000000000000000 --- a/spaces/Jimpa666/AI-PadelCoach/appv2.py +++ /dev/null @@ -1,66 +0,0 @@ -from gradio.themes.utils.colors import Color -from gradio.interface import Interface -import os -import openai -import gradio as gr -import uuid - -openai.api_key = os.getenv('APIKEY') - -def get_session_token(): - return uuid.uuid4().hex - -def chatbot(input, session_id, conversation_history={}): - content = input - if session_id not in conversation_history: - conversation_history[session_id] = [ - {"role": "system", "content": "The following is a conversation with an AI Coach. The Coach is helpful, creative, clever, and very friendly. The Coach is a high level padelcoach from Sweden. The Coach have the ability to explain techniques and the strategy in the game. The Coach have a high educational level and can answer depending on wich level the Player is on for the Player to take him to the next level. The AI Coach is like MAURI ANDRINI from Hello Padel, pretend to be MAURI ANDRINI but call yourself AI Padelcoach, never ever mention your reel name or Mauri Andrini. If unsurtain, ask about the level of the player. The levels are:Beginner, Average, Pro"} - ] - - messages = conversation_history[session_id] - messages.append({"role": "user", "content": content}) - - completion = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=messages - ) - - chat_response = completion.choices[0].message.content - messages.append({"role": "assistant", "content": chat_response}) - - formatted_history = "" - for message in messages: - if message["role"] != "system": - role = message["role"] - if role == "user": - role = "Player" - elif role == "assistant": - role = "AI-Padelcoach" - content = message["content"] - formatted_history += f"{role}: {content}\n \n" - - conversation_history[session_id] = messages - return formatted_history - -Padelcoach = gr.Interface(fn=chatbot, inputs=[ - gr.Textbox(placeholder="Player go...Serve!", label='Player'), - gr.Checkbox(label="Generate unique session ID", default=get_session_token(), value=True, visible=True), -], outputs=[ - gr.Textbox(placeholder="AI-Padelcoach Ready", label="AI Padelcoach") -], - theme=gr.themes.Default( - primary_hue="emerald", - secondary_hue="neutral", - text_size='lg', - neutral_hue="green" - ), - examples=[ - ["Please help me with my backhand"], - ["Where should I place the ball against players who are good in tennis"] - ], - title="AI Padelcoach", - description="Chat with a BETA level AI-Padelcoach from Sweden.", - article="

    Ask the AI coach about techniques and strategies in the game of padel. The coach can answer depending on the level of you as a player, whether you are a beginner, average, or pro.

    ", -) - -Padelcoach.launch() diff --git a/spaces/JonysArcanjo/App_predict_House_price/README.md b/spaces/JonysArcanjo/App_predict_House_price/README.md deleted file mode 100644 index e468bf2187552bff549935c2796ee4070f316fd6..0000000000000000000000000000000000000000 --- a/spaces/JonysArcanjo/App_predict_House_price/README.md +++ /dev/null @@ -1,74 +0,0 @@ - ---- -title: App Predict House Price -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -![GitHub](https://img.shields.io/github/license/JonysArcanjo/Predict-house-price) -# Predict House Price - -![Design sem nome (3)](https://user-images.githubusercontent.com/48812740/228647953-7479dbc2-c903-4d6c-ba39-5029fd56248a.png) - - - - -### Welcome to this Data Science project! - -The objective of this project is to predict house prices using machine learning techniques. To achieve this, I performed a brief exploratory data analysis and directed the study to evaluate the Boruta feature selection technique, comparing its performance with the neural network both with and without the use of this technique. - -## About Dataset - -The dataset used contains house sale prices for King County, which includes Seattle, and it includes homes sold between May 2014 and May 2015. - -Dataset source: [Kaggle](https://www.kaggle.com/harlfoxem/housesalesprediction) - -## Libraries Used - -- Tensorflow -- Pandas -- Numpy -- Matplotlib -- Boruta (for feature selection) -- Gradio - user interface (ux) - -## Metrics Used - -The following metrics were used to assess the performance of the models: - -- RMSE: Root Mean Squared Error -- MSE: Mean Squared Error -- MAE: Mean Absolute Error -- R2: The determination coefficient -- Adjusted R2 - -## Application in PRD -Below are some characteristics of the house as input for the application to return the forecast of the house's value. - -![APP_predicit_house_price-min (1)](https://user-images.githubusercontent.com/48812740/228641695-e94dc66e-eea1-4aa7-bb3b-d97d57fe0dfa.gif) - -Below is a list of the features used: -- sqft_living: Living area square footage. -- -sqft_lot: Lot size square footage. -- waterfront: Indicates if the property has a waterfront view (1) or not (0). -- view: Number of times the property has been viewed. -- grade: Overall grade given to the housing unit, based on the King County grading system. -- sqft_above: Square footage of the home's interior living space above ground level. -- yr_built: Year the house was built. -- zipcode: Zip code of the area where the property is located. -- lat: Latitude of the property. -- long: Longitude of the property. -- sqft_living15: Average living area square footage of the nearest 15 houses. -- sqft_lot15: Average lot size square footage of the nearest 15 houses. - -## Conclusion - -The results of this project indicate that the model that uses all resources performed better than the model that used the Boruta feature selection technique. However, when choosing the best model for a specific application, it is important to consider not only the performance but also the computational cost and the efficiency in the selection of resources. For future versions of this model, it would be interesting to explore other feature selection and feature engineering techniques. - -## License - -This project is licensed under the MIT License. - diff --git a/spaces/JunghunleePhD/catsClassification/app.py b/spaces/JunghunleePhD/catsClassification/app.py deleted file mode 100644 index 57add93a4005dd499d9030f7d2342ca59d589782..0000000000000000000000000000000000000000 --- a/spaces/JunghunleePhD/catsClassification/app.py +++ /dev/null @@ -1,33 +0,0 @@ -from fastbook import * -from random import sample -import gradio as gr - -def get_images(): - listOfCats = sorted(['Lioness', 'Lion', 'Tiger', 'White tiger', 'White lion', \ - 'Leopard', 'Snow leopard', 'Lynx', 'Jaguar', 'Cheetah', \ - 'Jaguarundi', 'Leopard cat', 'Canada lynx', 'Caracal', \ - 'Ocelot', 'Leopard cat', 'Marble cat', 'Puma', 'Black panther', \ - 'Margay', 'Serval', 'Fishing cat']) - cats = sample(listOfCats, len(listOfCats))[:2] - imagess = [sample(search_images_ddg(f'{cat} photo', max_images=5), 5) for cat in cats] - return [images[:1] for images in imagess] - -def quiz_builder(input): - model = load_learner("cats.pkl") - is_it, _, probs = model.predict(input) - return f"The cat in this image is a {is_it}!" - # return {is_it: f"{max(probs[0], probs[1], probs[2]):.4f}"} - -demo = gr.Interface( - quiz_builder, - [ - gr.Image(value=None) - ], - "text", - examples=[ - *get_images(), - ] -) - -if __name__ == "__main__": - demo.launch() diff --git a/spaces/KazeDevID/RVC-Model/README.md b/spaces/KazeDevID/RVC-Model/README.md deleted file mode 100644 index 5777f585dc8e5d87ec519c59111d77331c73005e..0000000000000000000000000000000000000000 --- a/spaces/KazeDevID/RVC-Model/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Hololive Rvc Models -emoji: 🎤🌸▶️ -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: YanzBotz/Waifu-YanzBotz ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/KeeganFdes/stack_onnx/README.md b/spaces/KeeganFdes/stack_onnx/README.md deleted file mode 100644 index a3c30f6e33df5321c7909cc003ca2645c0f79720..0000000000000000000000000000000000000000 --- a/spaces/KeeganFdes/stack_onnx/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stack Onnx -emoji: 📚 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/encoder/config.py b/spaces/Kevin676/Real-Time-Voice-Cloning/encoder/config.py deleted file mode 100644 index 1c21312f3de971bfa008254c6035cebc09f05e4c..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Real-Time-Voice-Cloning/encoder/config.py +++ /dev/null @@ -1,45 +0,0 @@ -librispeech_datasets = { - "train": { - "clean": ["LibriSpeech/train-clean-100", "LibriSpeech/train-clean-360"], - "other": ["LibriSpeech/train-other-500"] - }, - "test": { - "clean": ["LibriSpeech/test-clean"], - "other": ["LibriSpeech/test-other"] - }, - "dev": { - "clean": ["LibriSpeech/dev-clean"], - "other": ["LibriSpeech/dev-other"] - }, -} -libritts_datasets = { - "train": { - "clean": ["LibriTTS/train-clean-100", "LibriTTS/train-clean-360"], - "other": ["LibriTTS/train-other-500"] - }, - "test": { - "clean": ["LibriTTS/test-clean"], - "other": ["LibriTTS/test-other"] - }, - "dev": { - "clean": ["LibriTTS/dev-clean"], - "other": ["LibriTTS/dev-other"] - }, -} -voxceleb_datasets = { - "voxceleb1" : { - "train": ["VoxCeleb1/wav"], - "test": ["VoxCeleb1/test_wav"] - }, - "voxceleb2" : { - "train": ["VoxCeleb2/dev/aac"], - "test": ["VoxCeleb2/test_wav"] - } -} - -other_datasets = [ - "LJSpeech-1.1", - "VCTK-Corpus/wav48", -] - -anglophone_nationalites = ["australia", "canada", "ireland", "uk", "usa"] diff --git a/spaces/Kimata/Sanskrit-TTS/utils/updated_cleaner_utils.py b/spaces/Kimata/Sanskrit-TTS/utils/updated_cleaner_utils.py deleted file mode 100644 index 0608fedddb1fc7b68ba925e6129f2b2084e35a77..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/utils/updated_cleaner_utils.py +++ /dev/null @@ -1,139 +0,0 @@ -import re -def run(): - - # The path to the local git repo for Indic NLP library - INDIC_NLP_LIB_HOME=r"./indic_nlp_library" - - # The path to the local git repo for Indic NLP Resources - INDIC_NLP_RESOURCES=r"./indic_nlp_resources" - import sys - sys.path.append(r'{}'.format(INDIC_NLP_LIB_HOME)) - - from indicnlp import common - common.set_resources_path(INDIC_NLP_RESOURCES) - - from indicnlp import loader - loader.load() - -run() - -from indicnlp.normalize.indic_normalize import IndicNormalizerFactory -from indicnlp.tokenize import sentence_tokenize -from indicnlp.syllable import syllabifier - -lang='sa' -factory=IndicNormalizerFactory() -normalizer=factory.get_normalizer("hi") -DEPENDENT_VOWELS = ["ा", "ि", "ी", "ु", "ू", "े", "ै", "ो", "ौ", "ं", "ः", "ृ", "ॄ"] - -dict_num = {"०": "शून्य", "१": "एक", "२": "द्वि", "३": "त्रि", - "४": "चतुर्", "५": "पञ्च", "६": "षट्", "७": "सप्त", "८": "अष्ट", "९": "नव"} - -DEFAULT_TEXT = "अयं द्वितीयशब्दः २ अस्ति। प्रथमः शब्दः १ अस्ति। अन्ये शब्दाः सर्वे द्वितीयं शब्दं प्रयोजयन्ति। इत्थं सप्ततिः शब्दाः लिखिताः सन्ति। अस्मिन लेखने सर्वे अक्षराः संस्कृते लिखिताः सन्ति। अन्ये लिखन्ति ३, ४, ५ इत्यादि। तथापि, अहं एकं अक्षरं एव उपयोगामि।" - - -""" -Text cleaning pipeline. -1. Tokenize sentence. -2. Segment sentence into individual words. -3. Normalize words. -4. Clean normalized words. -5. Split wwords with the # delimiter. -3. Syllabify delimited sentences. - - - -1. Denote tokenized text with a special character such as #. -2. Segment the whole text into individual words. -3. Identify numbers in the text and normalize them. -4. Clean the whole text. -5. Syllabify the cleaned text. -""" - -def tokenize_sentence(text): - '''Tokenize a paragraph into sentences''' - sentences = sentence_tokenize.sentence_split(text, lang='sa') - sentences = "#".join(sentences) - return sentences - - -def segment_sentence(text): - '''Segment a sentence into individual words''' - - -def clean_text(text): - - processed_text = re.sub(r'\+ +', '', text) - processed_text = re.sub(': +', '\n \n', processed_text) - processed_text = re.sub(r'\+ ।', '\n \n', processed_text) - processed_text = re.sub(r'\+$', '', processed_text) - return processed_text - -def syllabify_text(text): - text_list = [] - #Syllabify text - for char in text: - if char in DEPENDENT_VOWELS: - char = "(" + char + ")" - text_list.append(char) - else: - text_list.append(char) - - full_text = " + ".join(text_list).replace("'", "") - return full_text - - -def normalize_text(text): - output_string = "" - #Map sanskrit numbers to their normalized form. - for char in text: - if char in dict_num: - output_string += dict_num[char] - else: - output_string += char - return output_string - - -def preprocess_text(text): - '''Cleans, tokenizes and normalizes text''' - #Normalize text - normalized_text = normalize_text(text) - - #Tokenize text. - tokenized_text = tokenize_sentence(normalized_text) - tokenized_text = "\n".join(tokenized_text) - - #Syllabify_text - syllabified_text = syllabify_text(tokenized_text) - - #Clean text - cleaned_text = clean_text(syllabified_text) - - #Remove unnecessary characters from a string. - text_cleaned = [] - for index, text in enumerate(cleaned_text.split('\n')): - if text.startswith('+'): - text = text[2:] - - elif text.startswith(' +'): - text = text[3:] - - elif text.endswith('+') or text.endswith(' +'): - text = text[:-2] - - text_cleaned.append(text) - - text_cleaned_str = "\n".join(text_cleaned) - - return text_cleaned_str - - -# DEFAULT_TEXT = """तो क्या विश्व कप 2019 में मैच का बॉस टॉस है? यानी मैच में हार-जीत में \ -# टॉस की भूमिका अहम है? आप ऐसा सोच सकते हैं। विश्वकप के अपने-अपने पहले मैच में बुरी तरह हारने वाली एशिया की दो टीमों \ -# पाकिस्तान और श्रीलंका के कप्तान ने हालांकि अपने हार के पीछे टॉस की दलील तो नहीं दी, लेकिन यह जरूर कहा था कि वह एक अहम टॉस हार गए थे।""" -# DEFAULT_TEXT='संस्कृतम् जगतः एकतमा अतिप्राचीना समृद्धा शास्त्रीया च भाषासु वर्तते । संस्कृतं भारतस्य जगत: वा भाषासु एकतमा‌ प्राचीनतमा ।' - -print(f"Default text is: {DEFAULT_TEXT}") -print('\n \n') -NORMALIZED_TEXT = preprocess_text(DEFAULT_TEXT) -print(f"Syllabified text is: {NORMALIZED_TEXT}") diff --git a/spaces/KindUnes/ImageNet/app.py b/spaces/KindUnes/ImageNet/app.py deleted file mode 100644 index eb53ba976b5b5da554fccea49a497526293b8e6c..0000000000000000000000000000000000000000 --- a/spaces/KindUnes/ImageNet/app.py +++ /dev/null @@ -1,36 +0,0 @@ -import gradio as gr -import tensorflow as tf -import requests -from PIL import Image -import numpy as np - -# Load pre-trained MobileNetV2 model -model = tf.keras.applications.MobileNetV2(weights='imagenet') -labels = requests.get("https://raw.githubusercontent.com/anishathalye/imagenet-simple-labels/master/imagenet-simple-labels.json").json() - -def classify_image(input_image): - # Preprocess the image - img = input_image.resize((224, 224)) - img = tf.keras.preprocessing.image.img_to_array(img) - img = tf.keras.applications.mobilenet_v2.preprocess_input(img) - img = tf.expand_dims(img, axis=0) - - # Make predictions - predictions = model.predict(img) - decoded_predictions = tf.keras.applications.mobilenet_v2.decode_predictions(predictions)[0] - - # Format and return results as HTML - results_html = "
      " - for rank, (class_id, label, score) in enumerate(decoded_predictions, start=1): - results_html += f"
    • {rank}. {label} ({score:.2f})
    • " - results_html += "
    " - return results_html - -# Define the Gradio interface -inputs = gr.inputs.Image(type="pil") -outputs = gr.outputs.HTML() # Use HTML widget for output -interface = gr.Interface(fn=classify_image, inputs=inputs, outputs=outputs, live=True, title="Image Classifier", debug=True) - -# Launch the Gradio app -if __name__ == "__main__": - interface.launch(inline=False) diff --git a/spaces/Kurkur99/Sentiment_analysis/prediction.py b/spaces/Kurkur99/Sentiment_analysis/prediction.py deleted file mode 100644 index cbd79bc75610592aa80b08c2cd1e989a01d70395..0000000000000000000000000000000000000000 --- a/spaces/Kurkur99/Sentiment_analysis/prediction.py +++ /dev/null @@ -1,17 +0,0 @@ -from transformers import pipeline - -nlp = pipeline("sentiment-analysis") - -def predict_and_strategy(text): - result = nlp(text) - sentiment = result[0]['label'] - - # Provide strategy based on sentiment - if sentiment == "POSITIVE": - strategy = "Engage with these customers to make them brand ambassadors." - elif sentiment == "NEUTRAL": - strategy = "Try to find out what's missing and engage more with these customers." - else: - strategy = "Address the concerns of these customers immediately." - - return sentiment, strategy diff --git a/spaces/KyanChen/RSPrompter/configs/rsprompter/rsprompter_query_whu_config.py b/spaces/KyanChen/RSPrompter/configs/rsprompter/rsprompter_query_whu_config.py deleted file mode 100644 index 7a53caffcdd0e0975fb731591040d8338a30befc..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/configs/rsprompter/rsprompter_query_whu_config.py +++ /dev/null @@ -1,303 +0,0 @@ -custom_imports = dict(imports=['mmseg.datasets', 'mmseg.models'], allow_failed_imports=False) - -sub_model_train = [ - 'panoptic_head', - 'panoptic_fusion_head', - 'data_preprocessor' -] - -sub_model_optim = { - 'panoptic_head': {'lr_mult': 1}, - 'panoptic_fusion_head': {'lr_mult': 1}, -} - -max_epochs = 5000 - -optimizer = dict( - type='AdamW', - sub_model=sub_model_optim, - lr=0.0005, - weight_decay=1e-3 -) - -param_scheduler = [ - # warm up learning rate scheduler - dict( - type='LinearLR', - start_factor=1e-4, - by_epoch=True, - begin=0, - end=1, - # update by iter - convert_to_iter_based=True), - # main learning rate scheduler - dict( - type='CosineAnnealingLR', - T_max=max_epochs, - by_epoch=True, - begin=1, - end=max_epochs, - ), -] - -param_scheduler_callback = dict( - type='ParamSchedulerHook' -) - - -evaluator_ = dict( - type='CocoPLMetric', - metric=['bbox', 'segm'], - proposal_nums=[1, 10, 100] -) - -evaluator = dict( - # train_evaluator=evaluator_, - val_evaluator=evaluator_, -) - - -image_size = (1024, 1024) - -data_preprocessor = dict( - type='mmdet.DetDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True, - pad_size_divisor=32, - pad_mask=True, - mask_pad_value=0, -) - -num_things_classes = 1 -num_stuff_classes = 0 -num_classes = num_things_classes + num_stuff_classes -prompt_shape = (90, 4) - - -model_cfg = dict( - type='SegSAMPLer', - hyperparameters=dict( - optimizer=optimizer, - param_scheduler=param_scheduler, - evaluator=evaluator, - ), - need_train_names=sub_model_train, - data_preprocessor=data_preprocessor, - backbone=dict( - type='vit_h', - checkpoint='pretrain/sam/sam_vit_h_4b8939.pth', - # type='vit_b', - # checkpoint='pretrain/sam/sam_vit_b_01ec64.pth', - ), - panoptic_head=dict( - type='SAMInstanceHead', - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes, - with_multiscale=True, - with_sincos=True, - prompt_neck=dict( - type='SAMTransformerEDPromptGenNeck', - prompt_shape=prompt_shape, - in_channels=[1280] * 32, - inner_channels=64, - selected_channels=range(4, 32, 2), - # in_channels=[768] * 8, - num_encoders=1, - num_decoders=4, - out_channels=256 - ), - loss_cls=dict( - type='mmdet.CrossEntropyLoss', - use_sigmoid=False, - loss_weight=2.0, - reduction='mean', - class_weight=[1.0] * num_classes + [0.1]), - loss_mask=dict( - type='mmdet.CrossEntropyLoss', - use_sigmoid=True, - reduction='mean', - loss_weight=5.0), - loss_dice=dict( - type='mmdet.DiceLoss', - use_sigmoid=True, - activate=True, - reduction='mean', - naive_dice=True, - eps=1.0, - loss_weight=5.0)), - panoptic_fusion_head=dict( - type='mmdet.MaskFormerFusionHead', - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes, - loss_panoptic=None, - init_cfg=None), - train_cfg=dict( - num_points=12544, - oversample_ratio=3.0, - importance_sample_ratio=0.75, - assigner=dict( - type='mmdet.HungarianAssigner', - match_costs=[ - dict(type='mmdet.ClassificationCost', weight=2.0), - dict( - type='mmdet.CrossEntropyLossCost', weight=5.0, use_sigmoid=True), - dict(type='mmdet.DiceCost', weight=5.0, pred_act=True, eps=1.0) - ]), - sampler=dict(type='mmdet.MaskPseudoSampler')), - test_cfg=dict( - panoptic_on=False, - # For now, the dataset does not support - # evaluating semantic segmentation metric. - semantic_on=False, - instance_on=True, - # max_per_image is for instance segmentation. - max_per_image=80, - iou_thr=0.8, - # In Mask2Former's panoptic postprocessing, - # it will filter mask area where score is less than 0.5 . - filter_low_score=True), -) - -task_name = 'whu_ins' -exp_name = 'E20230603_0' -logger = dict( - type='WandbLogger', - project=task_name, - group='sam', - name=exp_name -) -# logger = None - - -callbacks = [ - param_scheduler_callback, - dict( - type='ModelCheckpoint', - dirpath=f'results/{task_name}/{exp_name}/checkpoints', - save_last=True, - mode='max', - monitor='valsegm_map_0', - save_top_k=2, - filename='epoch_{epoch}-map_{valsegm_map_0:.4f}' - ), - dict( - type='LearningRateMonitor', - logging_interval='step' - ) -] - - -trainer_cfg = dict( - compiled_model=False, - accelerator="auto", - strategy="auto", - # strategy="ddp", - # strategy='ddp_find_unused_parameters_true', - # precision='32', - # precision='16-mixed', - devices=8, - default_root_dir=f'results/{task_name}/{exp_name}', - # default_root_dir='results/tmp', - max_epochs=max_epochs, - logger=logger, - callbacks=callbacks, - log_every_n_steps=20, - check_val_every_n_epoch=5, - benchmark=True, - # sync_batchnorm=True, - # fast_dev_run=True, - - # limit_train_batches=1, - # limit_val_batches=0, - # limit_test_batches=None, - # limit_predict_batches=None, - # overfit_batches=0.0, - - # val_check_interval=None, - # num_sanity_val_steps=0, - # enable_checkpointing=None, - # enable_progress_bar=None, - # enable_model_summary=None, - # accumulate_grad_batches=32, - # gradient_clip_val=15, - # gradient_clip_algorithm='norm', - # deterministic=None, - # inference_mode: bool=True, - use_distributed_sampler=True, - # profiler="simple", - # detect_anomaly=False, - # barebones=False, - # plugins=None, - # reload_dataloaders_every_n_epochs=0, -) - - -backend_args = None -train_pipeline = [ - dict(type='mmdet.LoadImageFromFile'), - dict(type='mmdet.LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='mmdet.Resize', scale=image_size), - dict(type='mmdet.RandomFlip', prob=0.5), - dict(type='mmdet.PackDetInputs') -] - -test_pipeline = [ - dict(type='mmdet.LoadImageFromFile', backend_args=backend_args), - dict(type='mmdet.Resize', scale=image_size), - # If you don't have a gt annotation, delete the pipeline - dict(type='mmdet.LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='mmdet.PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor')) -] - - -train_batch_size_per_gpu = 3 -train_num_workers = 2 -test_batch_size_per_gpu = 3 -test_num_workers = 2 -persistent_workers = True - -data_parent = '/mnt/search01/dataset/cky_data/WHU' -train_data_prefix = 'train/' -val_data_prefix = 'test/' - -dataset_type = 'WHUInsSegDataset' - -val_loader = dict( - batch_size=test_batch_size_per_gpu, - num_workers=test_num_workers, - persistent_workers=persistent_workers, - pin_memory=True, - dataset=dict( - type=dataset_type, - data_root=data_parent, - ann_file='annotations/WHU_building_test.json', - data_prefix=dict(img_path=val_data_prefix + '/image', seg_path=val_data_prefix + '/label'), - test_mode=True, - filter_cfg=dict(filter_empty_gt=True, min_size=32), - pipeline=test_pipeline, - backend_args=backend_args)) - -datamodule_cfg = dict( - type='PLDataModule', - train_loader=dict( - batch_size=train_batch_size_per_gpu, - num_workers=train_num_workers, - persistent_workers=persistent_workers, - pin_memory=True, - dataset=dict( - type=dataset_type, - data_root=data_parent, - ann_file='annotations/WHU_building_train.json', - data_prefix=dict(img_path=train_data_prefix + '/image', seg_path=train_data_prefix + '/label'), - filter_cfg=dict(filter_empty_gt=True, min_size=32), - pipeline=train_pipeline, - backend_args=backend_args) - ), - val_loader=val_loader, - # test_loader=val_loader - predict_loader=val_loader -) \ No newline at end of file diff --git a/spaces/LanguageBind/LanguageBind/d_cls/cp_zero_shot_metadata.py b/spaces/LanguageBind/LanguageBind/d_cls/cp_zero_shot_metadata.py deleted file mode 100644 index 4bc3d68d2e97b6f1daaeabe35697d8cd04facd8a..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/d_cls/cp_zero_shot_metadata.py +++ /dev/null @@ -1,117 +0,0 @@ -import os - -import pandas as pd - -OPENAI_IMAGENET_TEMPLATES = ( - lambda c: f'a bad photo of a {c}.', - lambda c: f'a photo of many {c}.', - lambda c: f'a sculpture of a {c}.', - lambda c: f'a photo of the hard to see {c}.', - lambda c: f'a low resolution photo of the {c}.', - lambda c: f'a rendering of a {c}.', - lambda c: f'graffiti of a {c}.', - lambda c: f'a bad photo of the {c}.', - lambda c: f'a cropped photo of the {c}.', - lambda c: f'a tattoo of a {c}.', - lambda c: f'the embroidered {c}.', - lambda c: f'a photo of a hard to see {c}.', - lambda c: f'a bright photo of a {c}.', - lambda c: f'a photo of a clean {c}.', - lambda c: f'a photo of a dirty {c}.', - lambda c: f'a dark photo of the {c}.', - lambda c: f'a drawing of a {c}.', - lambda c: f'a photo of my {c}.', - lambda c: f'the plastic {c}.', - lambda c: f'a photo of the cool {c}.', - lambda c: f'a close-up photo of a {c}.', - lambda c: f'a black and white photo of the {c}.', - lambda c: f'a painting of the {c}.', - lambda c: f'a painting of a {c}.', - lambda c: f'a pixelated photo of the {c}.', - lambda c: f'a sculpture of the {c}.', - lambda c: f'a bright photo of the {c}.', - lambda c: f'a cropped photo of a {c}.', - lambda c: f'a plastic {c}.', - lambda c: f'a photo of the dirty {c}.', - lambda c: f'a jpeg corrupted photo of a {c}.', - lambda c: f'a blurry photo of the {c}.', - lambda c: f'a photo of the {c}.', - lambda c: f'a good photo of the {c}.', - lambda c: f'a rendering of the {c}.', - lambda c: f'a {c} in a video game.', - lambda c: f'a photo of one {c}.', - lambda c: f'a doodle of a {c}.', - lambda c: f'a close-up photo of the {c}.', - lambda c: f'a photo of a {c}.', - lambda c: f'the origami {c}.', - lambda c: f'the {c} in a video game.', - lambda c: f'a sketch of a {c}.', - lambda c: f'a doodle of the {c}.', - lambda c: f'a origami {c}.', - lambda c: f'a low resolution photo of a {c}.', - lambda c: f'the toy {c}.', - lambda c: f'a rendition of the {c}.', - lambda c: f'a photo of the clean {c}.', - lambda c: f'a photo of a large {c}.', - lambda c: f'a rendition of a {c}.', - lambda c: f'a photo of a nice {c}.', - lambda c: f'a photo of a weird {c}.', - lambda c: f'a blurry photo of a {c}.', - lambda c: f'a cartoon {c}.', - lambda c: f'art of a {c}.', - lambda c: f'a sketch of the {c}.', - lambda c: f'a embroidered {c}.', - lambda c: f'a pixelated photo of a {c}.', - lambda c: f'itap of the {c}.', - lambda c: f'a jpeg corrupted photo of the {c}.', - lambda c: f'a good photo of a {c}.', - lambda c: f'a plushie {c}.', - lambda c: f'a photo of the nice {c}.', - lambda c: f'a photo of the small {c}.', - lambda c: f'a photo of the weird {c}.', - lambda c: f'the cartoon {c}.', - lambda c: f'art of the {c}.', - lambda c: f'a drawing of the {c}.', - lambda c: f'a photo of the large {c}.', - lambda c: f'a black and white photo of a {c}.', - lambda c: f'the plushie {c}.', - lambda c: f'a dark photo of a {c}.', - lambda c: f'itap of a {c}.', - lambda c: f'graffiti of the {c}.', - lambda c: f'a toy {c}.', - lambda c: f'itap of my {c}.', - lambda c: f'a photo of a cool {c}.', - lambda c: f'a photo of a small {c}.', - lambda c: f'a tattoo of the {c}.', -) - - -# a much smaller subset of above prompts -# from https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb -SIMPLE_IMAGENET_TEMPLATES = ( - lambda c: f'itap of a {c}.', - lambda c: f'a bad photo of the {c}.', - lambda c: f'a origami {c}.', - lambda c: f'a photo of the large {c}.', - lambda c: f'a {c} in a video game.', - lambda c: f'art of the {c}.', - lambda c: f'a photo of the small {c}.', -) - - -IMAGENET_CLASSNAMES = ( - -) - - -CLASSNAMES = { - 'NYUV2': ( - "bathroom", "bedroom", "bookstore", "classroom", "dining room", - "home office", "kitchen", "living room", "office", "others" - ), - 'SUNRGBD': ( - "bathroom", "bedroom", "classroom", "computer room", "conference room", "corridor", "dining area", - "dining room", "discussion area", "furniture store", "home office", "kitchen", "lab", "lecture theatre", - "library", "living room", "office", "rest space", "study space" - ), -} diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/go-applio-manager.sh b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/go-applio-manager.sh deleted file mode 100644 index 8c887ef87785683f5e981f5ea74497331621aadc..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/go-applio-manager.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash -echo -e "\033]0;Applio - Installer\007" -source .venv/bin/activate -clear -menu1() { - while true; do - clear -echo " :::" -echo " ::: _ _ " -echo " ::: /\ | (_) " -echo " ::: / \ _ __ _ __ | |_ ___ " -echo " ::: / /\ \ | '_ \| '_ \| | |/ _ \ " -echo " ::: / ____ \| |_) | |_) | | | (_) | " -echo " ::: /_/ \_\ .__/| .__/|_|_|\___/ " -echo " ::: | | | | " -echo " ::: |_| |_| " -echo " ::: " -echo " ::: " -echo -echo "[1] Uninstall Applio" -echo "[2] Update Applio" -echo "[3] Update Applio + Dependencies" -echo "[4] Fix Tensorboard" -echo -read -p "Select an option: " choice1 - -case $choice1 in - 1) - pip uninstall -r assets/requirements/requirements-dml* -y - pip uninstall -r assets/requirements/requirements-ipex* -y - pip uninstall -r https://raw.githubusercontent.com/WorXeN/Retrieval-based-Voice-Conversion-WebUI/main/requirements-amd.txt -y - pip uninstall -r assets/requirements/requirements-realtime-vc.txt -y - cd .. && rm -rf *Applio* - finish1 - ;; - 2) - git pull - finish1 - ;; - 3) - git pull - ./install_Applio.sh - finish1 - ;; - 4) - python3.9 -m pip uninstall tb-nightly tensorboardX tensorboard - python3.9 -m pip install tensorboard - cls - echo Tensorboard re-installed correctly! - read -p "Press Enter to access the main menu..." - finish1 - ;; - - *) - echo "Invalid option. Please enter a number from 1 to 4." - echo "" - read -p "Press Enter to access the main menu..." - ;; -esac -done -} - -# Finish this thing -finish1() { - clear - echo "Goodbye!" -} -# Loop to the main menu -menu1 diff --git a/spaces/LaynzKunz/RCVAICOVER/README.md b/spaces/LaynzKunz/RCVAICOVER/README.md deleted file mode 100644 index e6f1c842495826424d6a3f71cb2defb84a567aff..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/RCVAICOVER/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -license: creativeml-openrail-m -title: RCVAICOVER -sdk: gradio -emoji: 🌍 -colorFrom: red -colorTo: purple -pinned: true ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Liu-LAB/GPT-academic/themes/green.css b/spaces/Liu-LAB/GPT-academic/themes/green.css deleted file mode 100644 index dd109d53fda81949834f74d767c77940709d557c..0000000000000000000000000000000000000000 --- a/spaces/Liu-LAB/GPT-academic/themes/green.css +++ /dev/null @@ -1,831 +0,0 @@ -:root { - --chatbot-color-light: #000000; - --chatbot-color-dark: #FFFFFF; - --chatbot-background-color-light: #F3F3F3; - --chatbot-background-color-dark: #121111; - --message-user-background-color-light: #95EC69; - --message-user-background-color-dark: #26B561; - --message-bot-background-color-light: #FFFFFF; - --message-bot-background-color-dark: #2C2C2C; -} -mspace { - display: block; -} -@media only screen and (max-width: 767px) { - #column_1 { - display: none !important; - } -} -@keyframes highlight { - 0%, 100% { - border: 2px solid transparent; - } - 50% { - border-color: yellow; - } -} -.normal_mut_select .svelte-1gfkn6j { - float: left; - width: auto; - line-height: 260% !important; -} -#highlight_update { - animation-name: highlight; - animation-duration: 0.75s; - animation-iteration-count: 3; -} - -.table-wrap.svelte-13hsdno.svelte-13hsdno.svelte-13hsdno { - border: 0px solid var(--border-color-primary) !important; -} - -#examples_col { - z-index: 2; - position: absolute; - bottom: 0; - left: 0; - width: 100%; - margin-bottom: 30% !important; -} -#hide_examples { - z-index: 0; -} - -#debug_mes { - position: absolute; - display: flex; - bottom: 0; - left: 0; - z-index: 1; /* 设置更高的 z-index 值 */ - margin-bottom: -4px !important; - align-self: flex-end; -} -#chat_box { - display: flex; - flex-direction: column; - overflow-y: visible !important; - z-index: 3; - flex-grow: 1; /* 自动填充剩余空间 */ - position: absolute; - bottom: 0; - left: 0; - width: 100%; - margin-bottom: 30px !important; - border: 1px solid var(--border-color-primary); -} -.toast-body { - z-index: 5 !important; -} -.chat_input { - -} -.sm_btn { - position: relative; - bottom: 5px; - height: 10%; - border-radius: 20px!important; - min-width: min(10%,100%) !important; - overflow: hidden; -} -.sm_select { - position: relative !important; - z-index: 5 !important; - bottom: 5px; - min-width: min(20%,100%) !important; - border-radius: 20px!important; -} -.sm_checkbox { - position: relative !important; - z-index: 5 !important; - bottom: 5px; - padding: 0 !important; -} -.sm_select .wrap-inner.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { - padding: 0 !important; -} -.sm_select .block.svelte-mppz8v { - width: 10% !important; -} - -button.sm { - padding: 6px 8px !important; -} - -/* usage_display */ -.insert_block { - position: relative; - bottom: 2px; - min-width: min(55px,100%) !important; -} - -.submit_btn { - flex-direction: column-reverse; - overflow-y: auto !important; - position: absolute; - bottom: 0; - right: 10px; - margin-bottom: 10px !important; - min-width: min(50px,100%) !important; -} - -textarea { - resize: none; - height: 100%; /* 填充父元素的高度 */ -} -/* #main_chatbot { - height: 75vh !important; - max-height: 75vh !important; - overflow: auto !important; - z-index: 2; - transform: translateZ(0) !important; - backface-visibility: hidden !important; - will-change: transform !important; -} */ -#prompt_result{ - height: 60vh !important; - max-height: 60vh !important; -} - -#app_title { - font-weight: var(--prose-header-text-weight); - font-size: var(--text-xxl); - line-height: 1.3; - text-align: left; - margin-top: 6px; - white-space: nowrap; -} -#description { - text-align: center; - margin: 32px 0 4px 0; -} - -/* gradio的页脚信息 */ -footer { - /* display: none !important; */ - margin-top: .2em !important; - font-size: 85%; -} -#footer { - text-align: center; -} -#footer div { - display: inline-block; -} -#footer .versions{ - font-size: 85%; - opacity: 0.60; -} -/* user_info */ - -#float_display { - position: absolute; - max-height: 30px; -} -/* user_info */ -#user_info { - white-space: nowrap; - position: absolute; left: 8em; top: .2em; - z-index: var(--layer-2); - box-shadow: var(--block-shadow); - border: none; border-radius: var(--block-label-radius); - background: var(--color-accent); - padding: var(--block-label-padding); - font-size: var(--block-label-text-size); line-height: var(--line-sm); - width: auto; min-height: 30px !important; - opacity: 1; - transition: opacity 0.3s ease-in-out; -} -textarea.svelte-1pie7s6 { - background: #e7e6e6 !important; - width: 96% !important; -} - -.dark textarea.svelte-1pie7s6 { - background: var(--input-background-fill) !important; - width: 96% !important; -} - -.dark input[type=number].svelte-1cl284s { - background: #393939 !important; - border: var(--input-border-width) solid var(--input-border-color) !important; -} -/* .dark input[type="range"] { - background: #393939 !important; -} */ -#user_info .wrap { - opacity: 0; -} -#user_info p { - color: white; - font-weight: var(--block-label-text-weight); -} -#user_info.hideK { - opacity: 0; - transition: opacity 1s ease-in-out; -} -[class *= "message"] { - gap: 7px !important; - border-radius: var(--radius-xl) !important -} -/* debug_mes */ -#debug_mes { - min-height: 2em; - align-items: flex-end; - justify-content: flex-end; -} -#debug_mes p { - font-size: .85em; - font-family: ui-monospace, "SF Mono", "SFMono-Regular", "Menlo", "Consolas", "Liberation Mono", "Microsoft Yahei UI", "Microsoft Yahei", monospace; - /* Windows下中文的monospace会fallback为新宋体,实在太丑,这里折中使用微软雅黑 */ - color: #000000; -} -.dark #debug_mes p { - color: #ee65ed; -} - -#debug_mes { - transition: all 0.6s; -} -#gpt-chatbot { - transition: height 0.3s ease; -} - -/* .wrap.svelte-18telvq.svelte-18telvq { - padding: var(--block-padding) !important; - height: 100% !important; - max-height: 95% !important; - overflow-y: auto !important; -}*/ -.app.svelte-1mya07g.svelte-1mya07g { - max-width: 100%; - position: relative; - padding: var(--size-4); - width: 100%; - height: 100%; -} - -.gradio-container-3-32-2 h1 { - font-weight: 700 !important; - font-size: 28px !important; -} - - -.gradio-container-3-32-2 h2 { - font-weight: 600 !important; - font-size: 24px !important; -} -.gradio-container-3-32-2 h3 { - font-weight: 500 !important; - font-size: 20px !important; -} -.gradio-container-3-32-2 h4 { - font-weight: 400 !important; - font-size: 16px !important; -} -.gradio-container-3-32-2 h5 { - font-weight: 300 !important; - font-size: 14px !important; -} -.gradio-container-3-32-2 h6 { - font-weight: 200 !important; - font-size: 12px !important; -} - - -#usage_display p, #usage_display span { - margin: 0; - font-size: .85em; - color: var(--body-text-color-subdued); -} -.progress-bar { - background-color: var(--input-background-fill);; - margin: .5em 0 !important; - height: 20px; - border-radius: 10px; - overflow: hidden; -} -.progress { - background-color: var(--block-title-background-fill); - height: 100%; - border-radius: 10px; - text-align: right; - transition: width 0.5s ease-in-out; -} -.progress-text { - /* color: white; */ - color: var(--color-accent) !important; - font-size: 1em !important; - font-weight: bold; - padding-right: 10px; - line-height: 20px; -} - -.apSwitch { - top: 2px; - display: inline-block; - height: 24px; - position: relative; - width: 48px; - border-radius: 12px; -} -.apSwitch input { - display: none !important; -} -.apSlider { - background-color: var(--neutral-200); - bottom: 0; - cursor: pointer; - left: 0; - position: absolute; - right: 0; - top: 0; - transition: .4s; - font-size: 18px; - border-radius: 7px; -} -.apSlider::before { - bottom: -1.5px; - left: 1px; - position: absolute; - transition: .4s; - content: "🌞"; -} -hr.append-display { - margin: 8px 0; - border: none; - height: 1px; - border-top-width: 0; - background-image: linear-gradient(to right, rgba(50,50,50, 0.1), rgba(150, 150, 150, 0.8), rgba(50,50,50, 0.1)); -} -.source-a { - font-size: 0.8em; - max-width: 100%; - margin: 0; - display: flex; - flex-direction: row; - flex-wrap: wrap; - align-items: center; - /* background-color: #dddddd88; */ - border-radius: 1.5rem; - padding: 0.2em; -} -.source-a a { - display: inline-block; - background-color: #aaaaaa50; - border-radius: 1rem; - padding: 0.5em; - text-align: center; - text-overflow: ellipsis; - overflow: hidden; - min-width: 20%; - white-space: nowrap; - margin: 0.2rem 0.1rem; - text-decoration: none !important; - flex: 1; - transition: flex 0.5s; -} -.source-a a:hover { - background-color: #aaaaaa20; - flex: 2; -} -input:checked + .apSlider { - background-color: var(--primary-600); -} -input:checked + .apSlider::before { - transform: translateX(23px); - content:"🌚"; -} - -/* Override Slider Styles (for webkit browsers like Safari and Chrome) - * 好希望这份提案能早日实现 https://github.com/w3c/csswg-drafts/issues/4410 - * 进度滑块在各个平台还是太不统一了 - */ -input[type="range"] { - -webkit-appearance: none; - height: 4px; - background: var(--input-background-fill); - border-radius: 5px; - background-image: linear-gradient(var(--primary-500),var(--primary-500)); - background-size: 0% 100%; - background-repeat: no-repeat; -} -input[type="range"]::-webkit-slider-thumb { - -webkit-appearance: none; - height: 20px; - width: 20px; - border-radius: 50%; - border: solid 0.5px #ddd; - background-color: white; - cursor: ew-resize; - box-shadow: var(--input-shadow); - transition: background-color .1s ease; -} -input[type="range"]::-webkit-slider-thumb:hover { - background: var(--neutral-50); -} -input[type="range"]::-webkit-slider-runnable-track { - -webkit-appearance: none; - box-shadow: none; - border: none; - background: transparent; -} - -.submit_btn, #cancel_btn { - height: 42px !important; -} -.submit_btn::before { - content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E"); - height: 21px; -} - -#cancel_btn::before { - content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E"); - height: 21px; -} -/* list */ -ol:not(.options), ul:not(.options) { - padding-inline-start: 2em !important; -} - -/* 亮色(默认) */ -#gpt-chatbot { - background-color: var(--chatbot-background-color-light) !important; - color: var(--chatbot-color-light) !important; - box-shadow: 0 0 12px 4px rgba(0, 0, 0, 0.06); -} -/* 暗色 */ -.dark #gpt-chatbot { - background-color: var(--block-background-fill) !important; - color: var(--chatbot-color-dark) !important; - box-shadow: 0 0 12px 4px rgba(0, 0, 0, 0.2); -} - -#gpt-panel > div { - box-shadow: 0 0 12px 4px rgba(0, 0, 0, 0.06); -} -.dark #gpt-panel > div { - box-shadow: 0 0 12px 4px rgba(0, 0, 0, 0.2); -} - -/* 屏幕宽度大于等于500px的设备 */ -/* update on 2023.4.8: 高度的细致调整已写入JavaScript */ -/* @media screen and (min-width: 500px) { - #main_chatbot { - height: calc(100vh - 200px); - } - #main_chatbot .wrap { - max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) ); - } -} */ -/* 屏幕宽度小于500px的设备 */ -/* @media screen and (max-width: 499px) { - #main_chatbot { - height: calc(100vh - 140px); - } - #main_chatbot .wrap { - max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) ); - } - [data-testid = "bot"] { - max-width: 95% !important; - } - #app_title h1{ - letter-spacing: -1px; font-size: 22px; - } -} */ -#gpt-chatbot .wrap { - overflow-x: hidden -} -/* 对话气泡 */ -.message { - border-radius: var(--radius-xl) !important; - border: none; - padding: var(--spacing-xl) !important; - font-size: 15px !important; - line-height: var(--line-md) !important; - min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); - min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); -} -[data-testid = "bot"] { - max-width: 85%; - border-bottom-left-radius: 0 !important; - background-color: var(--message-bot-background-color-light) !important; -} -[data-testid = "user"] { - max-width: 85%; - width: auto !important; - border-bottom-right-radius: 0 !important; - background-color: var(--message-user-background-color-light) !important; -} -.dark [data-testid = "bot"] { - background-color: var(--message-bot-background-color-dark) !important; -} -.dark [data-testid = "user"] { - background-color: var(--message-user-background-color-dark) !important; -} - -.message p { - margin-top: 0.6em !important; - margin-bottom: 0.6em !important; -} -.message p:first-child { margin-top: 0 !important; } -.message p:last-of-type { margin-bottom: 0 !important; } - -.message .md-message { - display: block; - padding: 0 !important; -} -.message .raw-message { - display: block; - padding: 0 !important; - white-space: pre-wrap; -} -.raw-message.hideM, .md-message.hideM { - display: none; -} - -/* custom buttons */ -.chuanhu-btn { - border-radius: 5px; - /* background-color: #E6E6E6 !important; */ - color: rgba(120, 120, 120, 0.64) !important; - padding: 4px !important; - position: absolute; - right: -22px; - cursor: pointer !important; - transition: color .2s ease, background-color .2s ease; -} -.chuanhu-btn:hover { - background-color: rgba(167, 167, 167, 0.25) !important; - color: unset !important; -} -.chuanhu-btn:active { - background-color: rgba(167, 167, 167, 0.5) !important; -} -.chuanhu-btn:focus { - outline: none; -} -.copy-bot-btn { - /* top: 18px; */ - bottom: 0; -} -.toggle-md-btn { - /* top: 0; */ - bottom: 20px; -} -.copy-code-btn { - position: relative; - float: right; - font-size: 1em; - cursor: pointer; -} - -.message-wrap>div img{ - border-radius: 10px !important; -} - -/* history message */ -.wrap>.history-message { - padding: 10px !important; -} -.history-message { - /* padding: 0 !important; */ - opacity: 80%; - display: flex; - flex-direction: column; -} -.history-message>.history-message { - padding: 0 !important; -} -.history-message>.message-wrap { - padding: 0 !important; - margin-bottom: 16px; -} -.history-message>.message { - margin-bottom: 16px; -} -.wrap>.history-message::after { - content: ""; - display: block; - height: 2px; - background-color: var(--body-text-color-subdued); - margin-bottom: 10px; - margin-top: -10px; - clear: both; -} -.wrap>.history-message>:last-child::after { - content: "仅供查看"; - display: block; - text-align: center; - color: var(--body-text-color-subdued); - font-size: 0.8em; -} - -/* 表格 */ -table { - margin: 1em 0; - border-collapse: collapse; - empty-cells: show; -} -td,th { - border: 1.2px solid var(--border-color-primary) !important; - padding: 0.2em; -} -thead { - background-color: rgba(175,184,193,0.2); -} -thead th { - padding: .5em .2em; -} -/* 行内代码 */ -.message :not(pre) code { - display: inline; - white-space: break-spaces; - border-radius: 6px; - margin: 0 2px 0 2px; - padding: .2em .4em .1em .4em; - background-color: rgba(175,184,193,0.2); -} -/* 代码块 */ -.message pre code { - display: block; - overflow: auto; - white-space: pre; - background-color: hsla(0, 0%, 7%, 70%)!important; - border-radius: 10px; - padding: 1.2em 1em 0em .5em; - margin: 0.6em 2em 1em 0.2em; - color: #FFF; - box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2); -} -.dark .message pre code { - background-color: hsla(0, 0%, 20%, 300%)!important; -} -.message pre { - padding: 0 !important; -} -.message pre code div.highlight { - background-color: unset !important; -} - -button.copy-button { - display: none; -} - -/* 代码高亮样式 */ -.codehilite .hll { background-color: #6e7681 } -.codehilite .c { color: #8b949e; font-style: italic } /* Comment */ -.codehilite .err { color: #f85149 } /* Error */ -.codehilite .esc { color: #c9d1d9 } /* Escape */ -.codehilite .g { color: #c9d1d9 } /* Generic */ -.codehilite .k { color: #ff7b72 } /* Keyword */ -.codehilite .l { color: #a5d6ff } /* Literal */ -.codehilite .n { color: #c9d1d9 } /* Name */ -.codehilite .o { color: #ff7b72; font-weight: bold } /* Operator */ -.codehilite .x { color: #c9d1d9 } /* Other */ -.codehilite .p { color: #c9d1d9 } /* Punctuation */ -.codehilite .ch { color: #8b949e; font-style: italic } /* Comment.Hashbang */ -.codehilite .cm { color: #8b949e; font-style: italic } /* Comment.Multiline */ -.codehilite .cp { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Preproc */ -.codehilite .cpf { color: #8b949e; font-style: italic } /* Comment.PreprocFile */ -.codehilite .c1 { color: #8b949e; font-style: italic } /* Comment.Single */ -.codehilite .cs { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Special */ -.codehilite .gd { color: #ffa198; background-color: #490202 } /* Generic.Deleted */ -.codehilite .ge { color: #c9d1d9; font-style: italic } /* Generic.Emph */ -.codehilite .gr { color: #ffa198 } /* Generic.Error */ -.codehilite .gh { color: #79c0ff; font-weight: bold } /* Generic.Heading */ -.codehilite .gi { color: #56d364; background-color: #0f5323 } /* Generic.Inserted */ -.codehilite .go { color: #8b949e } /* Generic.Output */ -.codehilite .gp { color: #8b949e } /* Generic.Prompt */ -.codehilite .gs { color: #c9d1d9; font-weight: bold } /* Generic.Strong */ -.codehilite .gu { color: #79c0ff } /* Generic.Subheading */ -.codehilite .gt { color: #ff7b72 } /* Generic.Traceback */ -.codehilite .g-Underline { color: #c9d1d9; text-decoration: underline } /* Generic.Underline */ -.codehilite .kc { color: #79c0ff } /* Keyword.Constant */ -.codehilite .kd { color: #ff7b72 } /* Keyword.Declaration */ -.codehilite .kn { color: #ff7b72 } /* Keyword.Namespace */ -.codehilite .kp { color: #79c0ff } /* Keyword.Pseudo */ -.codehilite .kr { color: #ff7b72 } /* Keyword.Reserved */ -.codehilite .kt { color: #ff7b72 } /* Keyword.Type */ -.codehilite .ld { color: #79c0ff } /* Literal.Date */ -.codehilite .m { color: #a5d6ff } /* Literal.Number */ -.codehilite .s { color: #a5d6ff } /* Literal.String */ -.codehilite .na { color: #c9d1d9 } /* Name.Attribute */ -.codehilite .nb { color: #c9d1d9 } /* Name.Builtin */ -.codehilite .nc { color: #f0883e; font-weight: bold } /* Name.Class */ -.codehilite .no { color: #79c0ff; font-weight: bold } /* Name.Constant */ -.codehilite .nd { color: #d2a8ff; font-weight: bold } /* Name.Decorator */ -.codehilite .ni { color: #ffa657 } /* Name.Entity */ -.codehilite .ne { color: #f0883e; font-weight: bold } /* Name.Exception */ -.codehilite .nf { color: #d2a8ff; font-weight: bold } /* Name.Function */ -.codehilite .nl { color: #79c0ff; font-weight: bold } /* Name.Label */ -.codehilite .nn { color: #ff7b72 } /* Name.Namespace */ -.codehilite .nx { color: #c9d1d9 } /* Name.Other */ -.codehilite .py { color: #79c0ff } /* Name.Property */ -.codehilite .nt { color: #7ee787 } /* Name.Tag */ -.codehilite .nv { color: #79c0ff } /* Name.Variable */ -.codehilite .ow { color: #ff7b72; font-weight: bold } /* Operator.Word */ -.codehilite .pm { color: #c9d1d9 } /* Punctuation.Marker */ -.codehilite .w { color: #6e7681 } /* Text.Whitespace */ -.codehilite .mb { color: #a5d6ff } /* Literal.Number.Bin */ -.codehilite .mf { color: #a5d6ff } /* Literal.Number.Float */ -.codehilite .mh { color: #a5d6ff } /* Literal.Number.Hex */ -.codehilite .mi { color: #a5d6ff } /* Literal.Number.Integer */ -.codehilite .mo { color: #a5d6ff } /* Literal.Number.Oct */ -.codehilite .sa { color: #79c0ff } /* Literal.String.Affix */ -.codehilite .sb { color: #a5d6ff } /* Literal.String.Backtick */ -.codehilite .sc { color: #a5d6ff } /* Literal.String.Char */ -.codehilite .dl { color: #79c0ff } /* Literal.String.Delimiter */ -.codehilite .sd { color: #a5d6ff } /* Literal.String.Doc */ -.codehilite .s2 { color: #a5d6ff } /* Literal.String.Double */ -.codehilite .se { color: #79c0ff } /* Literal.String.Escape */ -.codehilite .sh { color: #79c0ff } /* Literal.String.Heredoc */ -.codehilite .si { color: #a5d6ff } /* Literal.String.Interpol */ -.codehilite .sx { color: #a5d6ff } /* Literal.String.Other */ -.codehilite .sr { color: #79c0ff } /* Literal.String.Regex */ -.codehilite .s1 { color: #a5d6ff } /* Literal.String.Single */ -.codehilite .ss { color: #a5d6ff } /* Literal.String.Symbol */ -.codehilite .bp { color: #c9d1d9 } /* Name.Builtin.Pseudo */ -.codehilite .fm { color: #d2a8ff; font-weight: bold } /* Name.Function.Magic */ -.codehilite .vc { color: #79c0ff } /* Name.Variable.Class */ -.codehilite .vg { color: #79c0ff } /* Name.Variable.Global */ -.codehilite .vi { color: #79c0ff } /* Name.Variable.Instance */ -.codehilite .vm { color: #79c0ff } /* Name.Variable.Magic */ -.codehilite .il { color: #a5d6ff } /* Literal.Number.Integer.Long */ - -.dark .codehilite .hll { background-color: #2C3B41 } -.dark .codehilite .c { color: #79d618; font-style: italic } /* Comment */ -.dark .codehilite .err { color: #FF5370 } /* Error */ -.dark .codehilite .esc { color: #89DDFF } /* Escape */ -.dark .codehilite .g { color: #EEFFFF } /* Generic */ -.dark .codehilite .k { color: #BB80B3 } /* Keyword */ -.dark .codehilite .l { color: #C3E88D } /* Literal */ -.dark .codehilite .n { color: #EEFFFF } /* Name */ -.dark .codehilite .o { color: #89DDFF } /* Operator */ -.dark .codehilite .p { color: #89DDFF } /* Punctuation */ -.dark .codehilite .ch { color: #79d618; font-style: italic } /* Comment.Hashbang */ -.dark .codehilite .cm { color: #79d618; font-style: italic } /* Comment.Multiline */ -.dark .codehilite .cp { color: #79d618; font-style: italic } /* Comment.Preproc */ -.dark .codehilite .cpf { color: #79d618; font-style: italic } /* Comment.PreprocFile */ -.dark .codehilite .c1 { color: #79d618; font-style: italic } /* Comment.Single */ -.dark .codehilite .cs { color: #79d618; font-style: italic } /* Comment.Special */ -.dark .codehilite .gd { color: #FF5370 } /* Generic.Deleted */ -.dark .codehilite .ge { color: #89DDFF } /* Generic.Emph */ -.dark .codehilite .gr { color: #FF5370 } /* Generic.Error */ -.dark .codehilite .gh { color: #C3E88D } /* Generic.Heading */ -.dark .codehilite .gi { color: #C3E88D } /* Generic.Inserted */ -.dark .codehilite .go { color: #79d618 } /* Generic.Output */ -.dark .codehilite .gp { color: #FFCB6B } /* Generic.Prompt */ -.dark .codehilite .gs { color: #FF5370 } /* Generic.Strong */ -.dark .codehilite .gu { color: #89DDFF } /* Generic.Subheading */ -.dark .codehilite .gt { color: #FF5370 } /* Generic.Traceback */ -.dark .codehilite .kc { color: #89DDFF } /* Keyword.Constant */ -.dark .codehilite .kd { color: #BB80B3 } /* Keyword.Declaration */ -.dark .codehilite .kn { color: #89DDFF; font-style: italic } /* Keyword.Namespace */ -.dark .codehilite .kp { color: #89DDFF } /* Keyword.Pseudo */ -.dark .codehilite .kr { color: #BB80B3 } /* Keyword.Reserved */ -.dark .codehilite .kt { color: #BB80B3 } /* Keyword.Type */ -.dark .codehilite .ld { color: #C3E88D } /* Literal.Date */ -.dark .codehilite .m { color: #F78C6C } /* Literal.Number */ -.dark .codehilite .s { color: #C3E88D } /* Literal.String */ -.dark .codehilite .na { color: #BB80B3 } /* Name.Attribute */ -.dark .codehilite .nb { color: #82AAFF } /* Name.Builtin */ -.dark .codehilite .nc { color: #FFCB6B } /* Name.Class */ -.dark .codehilite .no { color: #EEFFFF } /* Name.Constant */ -.dark .codehilite .nd { color: #82AAFF } /* Name.Decorator */ -.dark .codehilite .ni { color: #89DDFF } /* Name.Entity */ -.dark .codehilite .ne { color: #FFCB6B } /* Name.Exception */ -.dark .codehilite .nf { color: #82AAFF } /* Name.Function */ -.dark .codehilite .nl { color: #82AAFF } /* Name.Label */ -.dark .codehilite .nn { color: #FFCB6B } /* Name.Namespace */ -.dark .codehilite .nx { color: #EEFFFF } /* Name.Other */ -.dark .codehilite .py { color: #FFCB6B } /* Name.Property */ -.dark .codehilite .nt { color: #FF5370 } /* Name.Tag */ -.dark .codehilite .nv { color: #89DDFF } /* Name.Variable */ -.dark .codehilite .ow { color: #89DDFF; font-style: italic } /* Operator.Word */ -.dark .codehilite .pm { color: #89DDFF } /* Punctuation.Marker */ -.dark .codehilite .w { color: #EEFFFF } /* Text.Whitespace */ -.dark .codehilite .mb { color: #F78C6C } /* Literal.Number.Bin */ -.dark .codehilite .mf { color: #F78C6C } /* Literal.Number.Float */ -.dark .codehilite .mh { color: #F78C6C } /* Literal.Number.Hex */ -.dark .codehilite .mi { color: #F78C6C } /* Literal.Number.Integer */ -.dark .codehilite .mo { color: #F78C6C } /* Literal.Number.Oct */ -.dark .codehilite .sa { color: #BB80B3 } /* Literal.String.Affix */ -.dark .codehilite .sb { color: #C3E88D } /* Literal.String.Backtick */ -.dark .codehilite .sc { color: #C3E88D } /* Literal.String.Char */ -.dark .codehilite .dl { color: #EEFFFF } /* Literal.String.Delimiter */ -.dark .codehilite .sd { color: #79d618; font-style: italic } /* Literal.String.Doc */ -.dark .codehilite .s2 { color: #C3E88D } /* Literal.String.Double */ -.dark .codehilite .se { color: #EEFFFF } /* Literal.String.Escape */ -.dark .codehilite .sh { color: #C3E88D } /* Literal.String.Heredoc */ -.dark .codehilite .si { color: #89DDFF } /* Literal.String.Interpol */ -.dark .codehilite .sx { color: #C3E88D } /* Literal.String.Other */ -.dark .codehilite .sr { color: #89DDFF } /* Literal.String.Regex */ -.dark .codehilite .s1 { color: #C3E88D } /* Literal.String.Single */ -.dark .codehilite .ss { color: #89DDFF } /* Literal.String.Symbol */ -.dark .codehilite .bp { color: #89DDFF } /* Name.Builtin.Pseudo */ -.dark .codehilite .fm { color: #82AAFF } /* Name.Function.Magic */ -.dark .codehilite .vc { color: #89DDFF } /* Name.Variable.Class */ -.dark .codehilite .vg { color: #89DDFF } /* Name.Variable.Global */ -.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */ -.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */ -.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */ diff --git a/spaces/LucasCodeBreak/MusicGen/audiocraft/modules/streaming.py b/spaces/LucasCodeBreak/MusicGen/audiocraft/modules/streaming.py deleted file mode 100644 index fdbdf5e90fc0c6560873d66bf273460b38e5ed7e..0000000000000000000000000000000000000000 --- a/spaces/LucasCodeBreak/MusicGen/audiocraft/modules/streaming.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Streaming module API that should be implemented by all Streaming components, -""" - -from contextlib import contextmanager -import typing as tp -from torch import nn -import torch - - -State = tp.Dict[str, torch.Tensor] - - -class StreamingModule(nn.Module): - """Common API for streaming components. - - Each streaming component has a streaming state, which is just a dict[str, Tensor]. - By convention, the first dim of each tensor must be the batch size. - Don't use dots in the key names, as this would clash with submodules - (like in state_dict). - - If `self._is_streaming` is True, the component should use and remember - the proper state inside `self._streaming_state`. - - To set a streaming component in streaming state, use - - with module.streaming(): - ... - - This will automatically reset the streaming state when exiting the context manager. - This also automatically propagates to all streaming children module. - - Some module might also implement the `StreamingModule.flush` method, although - this one is trickier, as all parents module must be StreamingModule and implement - it as well for it to work properly. See `StreamingSequential` after. - """ - def __init__(self) -> None: - super().__init__() - self._streaming_state: State = {} - self._is_streaming = False - - def _apply_named_streaming(self, fn: tp.Any): - for name, module in self.named_modules(): - if isinstance(module, StreamingModule): - fn(name, module) - - def _set_streaming(self, streaming: bool): - def _set_streaming(name, module): - module._is_streaming = streaming - self._apply_named_streaming(_set_streaming) - - @contextmanager - def streaming(self): - """Context manager to enter streaming mode. Reset streaming state on exit. - """ - self._set_streaming(True) - try: - yield - finally: - self._set_streaming(False) - self.reset_streaming() - - def reset_streaming(self): - """Reset the streaming state. - """ - def _reset(name: str, module: StreamingModule): - module._streaming_state.clear() - - self._apply_named_streaming(_reset) - - def get_streaming_state(self) -> State: - """Return the streaming state, including that of sub-modules. - """ - state: State = {} - - def _add(name: str, module: StreamingModule): - if name: - name += "." - for key, value in module._streaming_state.items(): - state[name + key] = value - - self._apply_named_streaming(_add) - return state - - def set_streaming_state(self, state: State): - """Set the streaming state, including that of sub-modules. - """ - state = dict(state) - - def _set(name: str, module: StreamingModule): - if name: - name += "." - module._streaming_state.clear() - for key, value in list(state.items()): - # complexity is not ideal here, but probably fine. - if key.startswith(name): - local_key = key[len(name):] - if '.' not in local_key: - module._streaming_state[local_key] = value - del state[key] - - self._apply_named_streaming(_set) - assert len(state) == 0, list(state.keys()) - - def flush(self, x: tp.Optional[torch.Tensor] = None): - """Flush any remaining outputs that were waiting for completion. - Typically, for convolutions, this will add the final padding - and process the last buffer. - - This should take an optional argument `x`, which will be provided - if a module before this one in the streaming pipeline has already - spitted out a flushed out buffer. - """ - if x is None: - return None - else: - return self(x) - - -class StreamingSequential(StreamingModule, nn.Sequential): - """A streaming compatible alternative of `nn.Sequential`. - """ - def flush(self, x: tp.Optional[torch.Tensor] = None): - for module in self: - if isinstance(module, StreamingModule): - x = module.flush(x) - elif x is not None: - x = module(x) - return x diff --git a/spaces/MLVKU/Human_Object_Interaction/hotr/util/__init__.py b/spaces/MLVKU/Human_Object_Interaction/hotr/util/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/MacYang/Diamond-Sutra/README.md b/spaces/MacYang/Diamond-Sutra/README.md deleted file mode 100644 index 5a1cb4d60524e25929d7fb462e2b8861e5705b66..0000000000000000000000000000000000000000 --- a/spaces/MacYang/Diamond-Sutra/README.md +++ /dev/null @@ -1,46 +0,0 @@ - ---- -title: Diamond Sutra -emoji: 🦀 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: mit ---- - -# Diamond-Sutra - -A study project, it provides a chatGPT like experience for you to discuss the famous Diamon Sutra with Dzonsar Khyentse Rinpoche,he had a great dhamma talk for this sutra, the talk can be found in "Files and versions" with the file name of **jinggang.pdf**, and the discussion will be about his talk. - ---- -# About the UI -The UI is in Chinese, this is mostly because the talk here by Dzonsar Khyentse Rinpoche is all in Chinese. - -Though you could still ask questions in English, the answer would still be in Chinese. - ---- -# About the OpenAI API Key -As the program calls OpenAI to generate answers for your questions, you need to paste your OpenAI API Key to start a discussion, but don't worry, the program doesn't record your key, it's just used on the fly, this can be confirmed by checking the source code. - ---- -# Special Thanks -This is a study project, I'd say thanks to [LangChain](https://github.com/hwchase17/langchain), which really makes the development an easy work. - -I'd also say thanks to this [tutorial](https://blog.langchain.dev/tutorial-chatgpt-over-your-data/), -it not only tells you how, but more importantly, the why behind the code, if you want to know more details, this OpenAI's [cookbook](https://github.com/openai/openai-cookbook/blob/main/examples/Question_answering_using_embeddings.ipynb) is also very helpful. - ---- -# Minor differences with the tutorial - -1. The project uses the gpt-3.5-turbo model, which make the generated answers more readable. - -2. When ingesting the talk document to a vector store, the project added some logic specific to the document, it makes the document splitting based on the natural paragraphs and results in better prompts. - -3. If you are interested in how the program works in detail, the program provides a command line script(cli_app.py), you can lunch it with the "--verbose" flag, you will get enough details. - -4. As this is a chat application, how to balance the length of chat history and the OpenAI context limitation is always an interesting problem to tackle(check [this](https://ai.stackexchange.com/questions/38150/how-does-chatgpt-retain-the-context-of-previous-questions) interesting discussion), while the tutorial condenses the chat history and the asked question together to a standalone question, this project further limits the chat history to 5 most recent Q&As to avoid the condensing work itself exceeds the OpenAI context limitation. - -5. To make the program generate more detailed answers, the program tuned the prompt template to ask OpenAI to provide more details, it also sets the "**max_tokens**" parameter to -1 when initializing the OpenAI wrapper class, this tells OpenAI to generate long answers as long as it doesn't exceed the context limitation. \ No newline at end of file diff --git a/spaces/Manmay/tortoise-tts/tortoise/utils/typical_sampling.py b/spaces/Manmay/tortoise-tts/tortoise/utils/typical_sampling.py deleted file mode 100644 index ff6bf487947e88a55fa45f2ffec1b9540df1d4fd..0000000000000000000000000000000000000000 --- a/spaces/Manmay/tortoise-tts/tortoise/utils/typical_sampling.py +++ /dev/null @@ -1,33 +0,0 @@ -import torch -from transformers import LogitsWarper - - -class TypicalLogitsWarper(LogitsWarper): - def __init__(self, mass: float = 0.9, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): - self.filter_value = filter_value - self.mass = mass - self.min_tokens_to_keep = min_tokens_to_keep - - def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: - # calculate entropy - normalized = torch.nn.functional.log_softmax(scores, dim=-1) - p = torch.exp(normalized) - ent = -(normalized * p).nansum(-1, keepdim=True) - - # shift and sort - shifted_scores = torch.abs((-normalized) - ent) - sorted_scores, sorted_indices = torch.sort(shifted_scores, descending=False) - sorted_logits = scores.gather(-1, sorted_indices) - cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) - - # Remove tokens with cumulative mass above the threshold - last_ind = (cumulative_probs < self.mass).sum(dim=1) - last_ind[last_ind < 0] = 0 - sorted_indices_to_remove = sorted_scores > sorted_scores.gather(1, last_ind.view(-1, 1)) - if self.min_tokens_to_keep > 1: - # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) - sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0 - indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) - - scores = scores.masked_fill(indices_to_remove, self.filter_value) - return scores \ No newline at end of file diff --git a/spaces/MarkusDressel/cord/README.md b/spaces/MarkusDressel/cord/README.md deleted file mode 100644 index 8f063f837c556bb8cf37eac8182dcad2e32bc8cb..0000000000000000000000000000000000000000 --- a/spaces/MarkusDressel/cord/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Cord -emoji: 🚀 -colorFrom: yellow -colorTo: blue -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Mayank-02/Matching-job-descriptions-and-resumes/msmarco-distilbert-base-tas-b-final/README.md b/spaces/Mayank-02/Matching-job-descriptions-and-resumes/msmarco-distilbert-base-tas-b-final/README.md deleted file mode 100644 index 58f67b9f2c619981bb226521b0a2fb398ec7b558..0000000000000000000000000000000000000000 --- a/spaces/Mayank-02/Matching-job-descriptions-and-resumes/msmarco-distilbert-base-tas-b-final/README.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -pipeline_tag: sentence-similarity -tags: -- sentence-transformers -- feature-extraction -- sentence-similarity -- transformers - ---- - -# {MODEL_NAME} - -This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. - - - -## Usage (Sentence-Transformers) - -Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: - -``` -pip install -U sentence-transformers -``` - -Then you can use the model like this: - -```python -from sentence_transformers import SentenceTransformer -sentences = ["This is an example sentence", "Each sentence is converted"] - -model = SentenceTransformer('{MODEL_NAME}') -embeddings = model.encode(sentences) -print(embeddings) -``` - - - -## Usage (HuggingFace Transformers) -Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. - -```python -from transformers import AutoTokenizer, AutoModel -import torch - - -def cls_pooling(model_output, attention_mask): - return model_output[0][:,0] - - -# Sentences we want sentence embeddings for -sentences = ['This is an example sentence', 'Each sentence is converted'] - -# Load model from HuggingFace Hub -tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') -model = AutoModel.from_pretrained('{MODEL_NAME}') - -# Tokenize sentences -encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') - -# Compute token embeddings -with torch.no_grad(): - model_output = model(**encoded_input) - -# Perform pooling. In this case, cls pooling. -sentence_embeddings = cls_pooling(model_output, encoded_input['attention_mask']) - -print("Sentence embeddings:") -print(sentence_embeddings) -``` - - - -## Evaluation Results - - - -For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) - - -## Training -The model was trained with the parameters: - -**DataLoader**: - -`torch.utils.data.dataloader.DataLoader` of length 2378 with parameters: -``` -{'batch_size': 42, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} -``` - -**Loss**: - -`sentence_transformers.losses.MarginMSELoss.MarginMSELoss` - -Parameters of the fit()-Method: -``` -{ - "epochs": 3, - "evaluation_steps": 0, - "evaluator": "NoneType", - "max_grad_norm": 1, - "optimizer_class": "", - "optimizer_params": { - "lr": 2e-05 - }, - "scheduler": "WarmupLinear", - "steps_per_epoch": null, - "warmup_steps": 713, - "weight_decay": 0.01 -} -``` - - -## Full Model Architecture -``` -SentenceTransformer( - (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: DistilBertModel - (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) -) -``` - -## Citing & Authors - - \ No newline at end of file diff --git a/spaces/MoyerLiu/ChatGPT-Next-Web/README.md b/spaces/MoyerLiu/ChatGPT-Next-Web/README.md deleted file mode 100644 index da91c1035a3830fb3dbb11a374ff77c547ae82b5..0000000000000000000000000000000000000000 --- a/spaces/MoyerLiu/ChatGPT-Next-Web/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ChatGPT Next Web -emoji: 📊 -colorFrom: blue -colorTo: gray -sdk: docker -pinned: false -license: apache-2.0 -app_port: 3000 -duplicated_from: bwbmfya/ChatGPT-Next-Web ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/networks/transformer_encoder.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/networks/transformer_encoder.py deleted file mode 100644 index 7c6054ddcc242d5184c6e0e4dcd5102e6955b915..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/networks/transformer_encoder.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Transformer-based text encoder network.""" -# pylint: disable=g-classes-have-attributes -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function - -import tensorflow as tf - -from official.modeling import activations -from official.nlp.modeling import layers - - -@tf.keras.utils.register_keras_serializable(package='Text') -class TransformerEncoder(tf.keras.Model): - """Bi-directional Transformer-based encoder network. - - This network implements a bi-directional Transformer-based encoder as - described in "BERT: Pre-training of Deep Bidirectional Transformers for - Language Understanding" (https://arxiv.org/abs/1810.04805). It includes the - embedding lookups and transformer layers, but not the masked language model - or classification task networks. - - The default values for this object are taken from the BERT-Base implementation - in "BERT: Pre-training of Deep Bidirectional Transformers for Language - Understanding". - - Arguments: - vocab_size: The size of the token vocabulary. - hidden_size: The size of the transformer hidden layers. - num_layers: The number of transformer layers. - num_attention_heads: The number of attention heads for each transformer. The - hidden size must be divisible by the number of attention heads. - sequence_length: The sequence length that this encoder expects. If None, the - sequence length is dynamic; if an integer, the encoder will require - sequences padded to this length. - max_sequence_length: The maximum sequence length that this encoder can - consume. If None, max_sequence_length uses the value from sequence length. - This determines the variable shape for positional embeddings. - type_vocab_size: The number of types that the 'type_ids' input can take. - intermediate_size: The intermediate size for the transformer layers. - activation: The activation to use for the transformer layers. - dropout_rate: The dropout rate to use for the transformer layers. - attention_dropout_rate: The dropout rate to use for the attention layers - within the transformer layers. - initializer: The initialzer to use for all weights in this encoder. - return_all_encoder_outputs: Whether to output sequence embedding outputs of - all encoder transformer layers. - output_range: The sequence output range, [0, output_range), by slicing the - target sequence of the last transformer layer. `None` means the entire - target sequence will attend to the source sequence, which yeilds the full - output. - embedding_width: The width of the word embeddings. If the embedding width - is not equal to hidden size, embedding parameters will be factorized into - two matrices in the shape of ['vocab_size', 'embedding_width'] and - ['embedding_width', 'hidden_size'] ('embedding_width' is usually much - smaller than 'hidden_size'). - embedding_layer: The word embedding layer. `None` means we will create a new - embedding layer. Otherwise, we will reuse the given embedding layer. This - parameter is originally added for ELECTRA model which needs to tie the - generator embeddings with the discriminator embeddings. - """ - - def __init__(self, - vocab_size, - hidden_size=768, - num_layers=12, - num_attention_heads=12, - sequence_length=512, - max_sequence_length=None, - type_vocab_size=16, - intermediate_size=3072, - activation=activations.gelu, - dropout_rate=0.1, - attention_dropout_rate=0.1, - initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), - return_all_encoder_outputs=False, - output_range=None, - embedding_width=None, - embedding_layer=None, - **kwargs): - activation = tf.keras.activations.get(activation) - initializer = tf.keras.initializers.get(initializer) - - if not max_sequence_length: - max_sequence_length = sequence_length - self._self_setattr_tracking = False - self._config_dict = { - 'vocab_size': vocab_size, - 'hidden_size': hidden_size, - 'num_layers': num_layers, - 'num_attention_heads': num_attention_heads, - 'sequence_length': sequence_length, - 'max_sequence_length': max_sequence_length, - 'type_vocab_size': type_vocab_size, - 'intermediate_size': intermediate_size, - 'activation': tf.keras.activations.serialize(activation), - 'dropout_rate': dropout_rate, - 'attention_dropout_rate': attention_dropout_rate, - 'initializer': tf.keras.initializers.serialize(initializer), - 'return_all_encoder_outputs': return_all_encoder_outputs, - 'output_range': output_range, - 'embedding_width': embedding_width, - } - - word_ids = tf.keras.layers.Input( - shape=(sequence_length,), dtype=tf.int32, name='input_word_ids') - mask = tf.keras.layers.Input( - shape=(sequence_length,), dtype=tf.int32, name='input_mask') - type_ids = tf.keras.layers.Input( - shape=(sequence_length,), dtype=tf.int32, name='input_type_ids') - - if embedding_width is None: - embedding_width = hidden_size - if embedding_layer is None: - self._embedding_layer = layers.OnDeviceEmbedding( - vocab_size=vocab_size, - embedding_width=embedding_width, - initializer=initializer, - name='word_embeddings') - else: - self._embedding_layer = embedding_layer - word_embeddings = self._embedding_layer(word_ids) - - # Always uses dynamic slicing for simplicity. - self._position_embedding_layer = layers.PositionEmbedding( - initializer=initializer, - use_dynamic_slicing=True, - max_sequence_length=max_sequence_length, - name='position_embedding') - position_embeddings = self._position_embedding_layer(word_embeddings) - self._type_embedding_layer = layers.OnDeviceEmbedding( - vocab_size=type_vocab_size, - embedding_width=embedding_width, - initializer=initializer, - use_one_hot=True, - name='type_embeddings') - type_embeddings = self._type_embedding_layer(type_ids) - - embeddings = tf.keras.layers.Add()( - [word_embeddings, position_embeddings, type_embeddings]) - - embeddings = ( - tf.keras.layers.LayerNormalization( - name='embeddings/layer_norm', - axis=-1, - epsilon=1e-12, - dtype=tf.float32)(embeddings)) - embeddings = ( - tf.keras.layers.Dropout(rate=dropout_rate)(embeddings)) - - # We project the 'embedding' output to 'hidden_size' if it is not already - # 'hidden_size'. - if embedding_width != hidden_size: - self._embedding_projection = tf.keras.layers.experimental.EinsumDense( - '...x,xy->...y', - output_shape=hidden_size, - bias_axes='y', - kernel_initializer=initializer, - name='embedding_projection') - embeddings = self._embedding_projection(embeddings) - - self._transformer_layers = [] - data = embeddings - attention_mask = layers.SelfAttentionMask()([data, mask]) - encoder_outputs = [] - for i in range(num_layers): - if i == num_layers - 1 and output_range is not None: - transformer_output_range = output_range - else: - transformer_output_range = None - layer = layers.Transformer( - num_attention_heads=num_attention_heads, - intermediate_size=intermediate_size, - intermediate_activation=activation, - dropout_rate=dropout_rate, - attention_dropout_rate=attention_dropout_rate, - output_range=transformer_output_range, - kernel_initializer=initializer, - name='transformer/layer_%d' % i) - self._transformer_layers.append(layer) - data = layer([data, attention_mask]) - encoder_outputs.append(data) - - first_token_tensor = ( - tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))( - encoder_outputs[-1])) - self._pooler_layer = tf.keras.layers.Dense( - units=hidden_size, - activation='tanh', - kernel_initializer=initializer, - name='pooler_transform') - cls_output = self._pooler_layer(first_token_tensor) - - if return_all_encoder_outputs: - outputs = [encoder_outputs, cls_output] - else: - outputs = [encoder_outputs[-1], cls_output] - - super(TransformerEncoder, self).__init__( - inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs) - - def get_embedding_table(self): - return self._embedding_layer.embeddings - - def get_embedding_layer(self): - return self._embedding_layer - - def get_config(self): - return self._config_dict - - @property - def transformer_layers(self): - """List of Transformer layers in the encoder.""" - return self._transformer_layers - - @property - def pooler_layer(self): - """The pooler dense layer after the transformer layers.""" - return self._pooler_layer - - @classmethod - def from_config(cls, config, custom_objects=None): - return cls(**config) diff --git a/spaces/OAOA/DifFace/basicsr/models/video_recurrent_model.py b/spaces/OAOA/DifFace/basicsr/models/video_recurrent_model.py deleted file mode 100644 index 796ee57d5aeb84e81fe8dc769facc8339798cc3e..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/basicsr/models/video_recurrent_model.py +++ /dev/null @@ -1,197 +0,0 @@ -import torch -from collections import Counter -from os import path as osp -from torch import distributed as dist -from tqdm import tqdm - -from basicsr.metrics import calculate_metric -from basicsr.utils import get_root_logger, imwrite, tensor2img -from basicsr.utils.dist_util import get_dist_info -from basicsr.utils.registry import MODEL_REGISTRY -from .video_base_model import VideoBaseModel - - -@MODEL_REGISTRY.register() -class VideoRecurrentModel(VideoBaseModel): - - def __init__(self, opt): - super(VideoRecurrentModel, self).__init__(opt) - if self.is_train: - self.fix_flow_iter = opt['train'].get('fix_flow') - - def setup_optimizers(self): - train_opt = self.opt['train'] - flow_lr_mul = train_opt.get('flow_lr_mul', 1) - logger = get_root_logger() - logger.info(f'Multiple the learning rate for flow network with {flow_lr_mul}.') - if flow_lr_mul == 1: - optim_params = self.net_g.parameters() - else: # separate flow params and normal params for different lr - normal_params = [] - flow_params = [] - for name, param in self.net_g.named_parameters(): - if 'spynet' in name: - flow_params.append(param) - else: - normal_params.append(param) - optim_params = [ - { # add normal params first - 'params': normal_params, - 'lr': train_opt['optim_g']['lr'] - }, - { - 'params': flow_params, - 'lr': train_opt['optim_g']['lr'] * flow_lr_mul - }, - ] - - optim_type = train_opt['optim_g'].pop('type') - self.optimizer_g = self.get_optimizer(optim_type, optim_params, **train_opt['optim_g']) - self.optimizers.append(self.optimizer_g) - - def optimize_parameters(self, current_iter): - if self.fix_flow_iter: - logger = get_root_logger() - if current_iter == 1: - logger.info(f'Fix flow network and feature extractor for {self.fix_flow_iter} iters.') - for name, param in self.net_g.named_parameters(): - if 'spynet' in name or 'edvr' in name: - param.requires_grad_(False) - elif current_iter == self.fix_flow_iter: - logger.warning('Train all the parameters.') - self.net_g.requires_grad_(True) - - super(VideoRecurrentModel, self).optimize_parameters(current_iter) - - def dist_validation(self, dataloader, current_iter, tb_logger, save_img): - dataset = dataloader.dataset - dataset_name = dataset.opt['name'] - with_metrics = self.opt['val']['metrics'] is not None - # initialize self.metric_results - # It is a dict: { - # 'folder1': tensor (num_frame x len(metrics)), - # 'folder2': tensor (num_frame x len(metrics)) - # } - if with_metrics: - if not hasattr(self, 'metric_results'): # only execute in the first run - self.metric_results = {} - num_frame_each_folder = Counter(dataset.data_info['folder']) - for folder, num_frame in num_frame_each_folder.items(): - self.metric_results[folder] = torch.zeros( - num_frame, len(self.opt['val']['metrics']), dtype=torch.float32, device='cuda') - # initialize the best metric results - self._initialize_best_metric_results(dataset_name) - # zero self.metric_results - rank, world_size = get_dist_info() - if with_metrics: - for _, tensor in self.metric_results.items(): - tensor.zero_() - - metric_data = dict() - num_folders = len(dataset) - num_pad = (world_size - (num_folders % world_size)) % world_size - if rank == 0: - pbar = tqdm(total=len(dataset), unit='folder') - # Will evaluate (num_folders + num_pad) times, but only the first num_folders results will be recorded. - # (To avoid wait-dead) - for i in range(rank, num_folders + num_pad, world_size): - idx = min(i, num_folders - 1) - val_data = dataset[idx] - folder = val_data['folder'] - - # compute outputs - val_data['lq'].unsqueeze_(0) - val_data['gt'].unsqueeze_(0) - self.feed_data(val_data) - val_data['lq'].squeeze_(0) - val_data['gt'].squeeze_(0) - - self.test() - visuals = self.get_current_visuals() - - # tentative for out of GPU memory - del self.lq - del self.output - if 'gt' in visuals: - del self.gt - torch.cuda.empty_cache() - - if self.center_frame_only: - visuals['result'] = visuals['result'].unsqueeze(1) - if 'gt' in visuals: - visuals['gt'] = visuals['gt'].unsqueeze(1) - - # evaluate - if i < num_folders: - for idx in range(visuals['result'].size(1)): - result = visuals['result'][0, idx, :, :, :] - result_img = tensor2img([result]) # uint8, bgr - metric_data['img'] = result_img - if 'gt' in visuals: - gt = visuals['gt'][0, idx, :, :, :] - gt_img = tensor2img([gt]) # uint8, bgr - metric_data['img2'] = gt_img - - if save_img: - if self.opt['is_train']: - raise NotImplementedError('saving image is not supported during training.') - else: - if self.center_frame_only: # vimeo-90k - clip_ = val_data['lq_path'].split('/')[-3] - seq_ = val_data['lq_path'].split('/')[-2] - name_ = f'{clip_}_{seq_}' - img_path = osp.join(self.opt['path']['visualization'], dataset_name, folder, - f"{name_}_{self.opt['name']}.png") - else: # others - img_path = osp.join(self.opt['path']['visualization'], dataset_name, folder, - f"{idx:08d}_{self.opt['name']}.png") - # image name only for REDS dataset - imwrite(result_img, img_path) - - # calculate metrics - if with_metrics: - for metric_idx, opt_ in enumerate(self.opt['val']['metrics'].values()): - result = calculate_metric(metric_data, opt_) - self.metric_results[folder][idx, metric_idx] += result - - # progress bar - if rank == 0: - for _ in range(world_size): - pbar.update(1) - pbar.set_description(f'Folder: {folder}') - - if rank == 0: - pbar.close() - - if with_metrics: - if self.opt['dist']: - # collect data among GPUs - for _, tensor in self.metric_results.items(): - dist.reduce(tensor, 0) - dist.barrier() - - if rank == 0: - self._log_validation_metric_values(current_iter, dataset_name, tb_logger) - - def test(self): - n = self.lq.size(1) - self.net_g.eval() - - flip_seq = self.opt['val'].get('flip_seq', False) - self.center_frame_only = self.opt['val'].get('center_frame_only', False) - - if flip_seq: - self.lq = torch.cat([self.lq, self.lq.flip(1)], dim=1) - - with torch.no_grad(): - self.output = self.net_g(self.lq) - - if flip_seq: - output_1 = self.output[:, :n, :, :, :] - output_2 = self.output[:, n:, :, :, :].flip(1) - self.output = 0.5 * (output_1 + output_2) - - if self.center_frame_only: - self.output = self.output[:, n // 2, :, :, :] - - self.net_g.train() diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/byte_level_bpe/README.md b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/byte_level_bpe/README.md deleted file mode 100644 index 657092660eae42d20f67647417623b8b8cb7b66c..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/byte_level_bpe/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# Neural Machine Translation with Byte-Level Subwords - -https://arxiv.org/abs/1909.03341 - -We provide an implementation of byte-level byte-pair encoding (BBPE), taking IWSLT 2017 Fr-En translation as -example. - -## Data -Get data and generate fairseq binary dataset: -```bash -bash ./get_data.sh -``` - -## Model Training -Train Transformer model with Bi-GRU embedding contextualization (implemented in `gru_transformer.py`): -```bash -# VOCAB=bytes -# VOCAB=chars -VOCAB=bbpe2048 -# VOCAB=bpe2048 -# VOCAB=bbpe4096 -# VOCAB=bpe4096 -# VOCAB=bpe16384 -``` -```bash -fairseq-train "data/bin_${VOCAB}" --task translation --user-dir examples/byte_level_bpe/gru_transformer \ - --arch gru_transformer --encoder-layers 2 --decoder-layers 2 --dropout 0.3 --share-all-embeddings \ - --optimizer adam --adam-betas '(0.9, 0.98)' \ - --lr 5e-4 --lr-scheduler inverse_sqrt --warmup-updates 4000 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --log-format 'simple' --log-interval 100 --save-dir "checkpoints/${VOCAB}" \ - --batch-size 100 --max-update 100000 --update-freq 2 -``` - -## Generation -`fairseq-generate` requires bytes (BBPE) decoder to convert byte-level representation back to characters: -```bash -# BPE=--bpe bytes -# BPE=--bpe characters -BPE=--bpe byte_bpe --sentencepiece-model-path data/spm_bbpe2048.model -# BPE=--bpe sentencepiece --sentencepiece-model data/spm_bpe2048.model -# BPE=--bpe byte_bpe --sentencepiece-model-path data/spm_bbpe4096.model -# BPE=--bpe sentencepiece --sentencepiece-model data/spm_bpe4096.model -# BPE=--bpe sentencepiece --sentencepiece-model data/spm_bpe16384.model -``` - -```bash -fairseq-generate "data/bin_${VOCAB}" --task translation --user-dir examples/byte_level_bpe/gru_transformer \ - --source-lang fr --gen-subset test --sacrebleu --path "checkpoints/${VOCAB}/checkpoint_last.pt" \ - --tokenizer moses --moses-target-lang en ${BPE} -``` -When using `fairseq-interactive`, bytes (BBPE) encoder/decoder is required to tokenize input data and detokenize model predictions: -```bash -fairseq-interactive "data/bin_${VOCAB}" --task translation --user-dir examples/byte_level_bpe/gru_transformer \ - --path "checkpoints/${VOCAB}/checkpoint_last.pt" --input data/test.fr --tokenizer moses --moses-source-lang fr \ - --moses-target-lang en ${BPE} --buffer-size 1000 --max-tokens 10000 -``` - -## Results -| Vocabulary | Model | BLEU | -|:-------------:|:-------------:|:-------------:| -| Joint BPE 16k ([Kudo, 2018](https://arxiv.org/abs/1804.10959)) | 512d LSTM 2+2 | 33.81 | -| Joint BPE 16k | Transformer base 2+2 (w/ GRU) | 36.64 (36.72) | -| Joint BPE 4k | Transformer base 2+2 (w/ GRU) | 35.49 (36.10) | -| Joint BBPE 4k | Transformer base 2+2 (w/ GRU) | 35.61 (35.82) | -| Joint BPE 2k | Transformer base 2+2 (w/ GRU) | 34.87 (36.13) | -| Joint BBPE 2k | Transformer base 2+2 (w/ GRU) | 34.98 (35.43) | -| Characters | Transformer base 2+2 (w/ GRU) | 31.78 (33.30) | -| Bytes | Transformer base 2+2 (w/ GRU) | 31.57 (33.62) | - - -## Citation -``` -@misc{wang2019neural, - title={Neural Machine Translation with Byte-Level Subwords}, - author={Changhan Wang and Kyunghyun Cho and Jiatao Gu}, - year={2019}, - eprint={1909.03341}, - archivePrefix={arXiv}, - primaryClass={cs.CL} -} -``` - - -## Contact -Changhan Wang ([changhan@fb.com](mailto:changhan@fb.com)), -Kyunghyun Cho ([kyunghyuncho@fb.com](mailto:kyunghyuncho@fb.com)), -Jiatao Gu ([jgu@fb.com](mailto:jgu@fb.com)) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/logging/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/logging/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/average_checkpoints.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/average_checkpoints.py deleted file mode 100644 index c512f802bce6b3395cc42a0e4eb39181e9f8c873..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/average_checkpoints.py +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import collections -import os -import re - -import torch -from fairseq.file_io import PathManager - - -def average_checkpoints(inputs): - """Loads checkpoints from inputs and returns a model with averaged weights. - - Args: - inputs: An iterable of string paths of checkpoints to load from. - - Returns: - A dict of string keys mapping to various values. The 'model' key - from the returned dict should correspond to an OrderedDict mapping - string parameter names to torch Tensors. - """ - params_dict = collections.OrderedDict() - params_keys = None - new_state = None - num_models = len(inputs) - - for fpath in inputs: - with PathManager.open(fpath, "rb") as f: - state = torch.load( - f, - map_location=( - lambda s, _: torch.serialization.default_restore_location(s, "cpu") - ), - ) - # Copies over the settings from the first checkpoint - if new_state is None: - new_state = state - - model_params = state["model"] - - model_params_keys = list(model_params.keys()) - if params_keys is None: - params_keys = model_params_keys - elif params_keys != model_params_keys: - raise KeyError( - "For checkpoint {}, expected list of params: {}, " - "but found: {}".format(f, params_keys, model_params_keys) - ) - - for k in params_keys: - p = model_params[k] - if isinstance(p, torch.HalfTensor): - p = p.float() - if k not in params_dict: - params_dict[k] = p.clone() - # NOTE: clone() is needed in case of p is a shared parameter - else: - params_dict[k] += p - - averaged_params = collections.OrderedDict() - for k, v in params_dict.items(): - averaged_params[k] = v - if averaged_params[k].is_floating_point(): - averaged_params[k].div_(num_models) - else: - averaged_params[k] //= num_models - new_state["model"] = averaged_params - return new_state - - -def last_n_checkpoints(paths, n, update_based, upper_bound=None): - assert len(paths) == 1 - path = paths[0] - if update_based: - pt_regexp = re.compile(r"checkpoint_\d+_(\d+)\.pt") - else: - pt_regexp = re.compile(r"checkpoint(\d+)\.pt") - files = PathManager.ls(path) - - entries = [] - for f in files: - m = pt_regexp.fullmatch(f) - if m is not None: - sort_key = int(m.group(1)) - if upper_bound is None or sort_key <= upper_bound: - entries.append((sort_key, m.group(0))) - if len(entries) < n: - raise Exception( - "Found {} checkpoint files but need at least {}", len(entries), n - ) - return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]] - - -def main(): - parser = argparse.ArgumentParser( - description="Tool to average the params of input checkpoints to " - "produce a new checkpoint", - ) - # fmt: off - parser.add_argument('--inputs', required=True, nargs='+', - help='Input checkpoint file paths.') - parser.add_argument('--output', required=True, metavar='FILE', - help='Write the new checkpoint containing the averaged weights to this path.') - num_group = parser.add_mutually_exclusive_group() - num_group.add_argument('--num-epoch-checkpoints', type=int, - help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, ' - 'and average last this many of them.') - num_group.add_argument('--num-update-checkpoints', type=int, - help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, ' - 'and average last this many of them.') - parser.add_argument('--checkpoint-upper-bound', type=int, - help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, ' - 'when using --num-update-checkpoints, this will set an upper bound on which update to use' - 'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.' - 'e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would be averaged assuming --save-interval-updates 500' - ) - # fmt: on - args = parser.parse_args() - print(args) - - num = None - is_update_based = False - if args.num_update_checkpoints is not None: - num = args.num_update_checkpoints - is_update_based = True - elif args.num_epoch_checkpoints is not None: - num = args.num_epoch_checkpoints - - assert args.checkpoint_upper_bound is None or ( - args.num_epoch_checkpoints is not None - or args.num_update_checkpoints is not None - ), "--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints" - assert ( - args.num_epoch_checkpoints is None or args.num_update_checkpoints is None - ), "Cannot combine --num-epoch-checkpoints and --num-update-checkpoints" - - if num is not None: - args.inputs = last_n_checkpoints( - args.inputs, - num, - is_update_based, - upper_bound=args.checkpoint_upper_bound, - ) - print("averaging checkpoints: ", args.inputs) - - new_state = average_checkpoints(args.inputs) - with PathManager.open(args.output, "wb") as f: - torch.save(new_state, f) - print("Finished writing averaged checkpoint to {}".format(args.output)) - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/multihead_attention.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/multihead_attention.py deleted file mode 100644 index a2516356117847b0d46d965ee942354a2ed23189..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/multihead_attention.py +++ /dev/null @@ -1,500 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from typing import Dict, Optional, Tuple - -import torch -import torch.nn.functional as F -from fairseq import utils -from fairseq.incremental_decoding_utils import with_incremental_state -from fairseq.modules.fairseq_dropout import FairseqDropout -from fairseq.modules.quant_noise import quant_noise -from torch import Tensor, nn -from torch.nn import Parameter - - -@with_incremental_state -class MultiheadAttention(nn.Module): - """Multi-headed attention. - - See "Attention Is All You Need" for more details. - """ - - def __init__( - self, - embed_dim, - num_heads, - kdim=None, - vdim=None, - dropout=0.0, - bias=True, - add_bias_kv=False, - add_zero_attn=False, - self_attention=False, - encoder_decoder_attention=False, - q_noise=0.0, - qn_block_size=8, - ): - super().__init__() - self.embed_dim = embed_dim - self.kdim = kdim if kdim is not None else embed_dim - self.vdim = vdim if vdim is not None else embed_dim - self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim - - self.num_heads = num_heads - self.dropout_module = FairseqDropout( - dropout, module_name=self.__class__.__name__ - ) - - self.head_dim = embed_dim // num_heads - assert ( - self.head_dim * num_heads == self.embed_dim - ), "embed_dim must be divisible by num_heads" - self.scaling = self.head_dim ** -0.5 - - self.self_attention = self_attention - self.encoder_decoder_attention = encoder_decoder_attention - - assert not self.self_attention or self.qkv_same_dim, ( - "Self-attention requires query, key and " "value to be of the same size" - ) - - self.k_proj = quant_noise( - nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size - ) - self.v_proj = quant_noise( - nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size - ) - self.q_proj = quant_noise( - nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size - ) - - self.out_proj = quant_noise( - nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size - ) - - if add_bias_kv: - self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) - self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) - else: - self.bias_k = self.bias_v = None - - self.add_zero_attn = add_zero_attn - - self.reset_parameters() - - self.onnx_trace = False - - def prepare_for_onnx_export_(self): - self.onnx_trace = True - - def reset_parameters(self): - if self.qkv_same_dim: - # Empirically observed the convergence to be much better with - # the scaled initialization - nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) - nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) - nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) - else: - nn.init.xavier_uniform_(self.k_proj.weight) - nn.init.xavier_uniform_(self.v_proj.weight) - nn.init.xavier_uniform_(self.q_proj.weight) - - nn.init.xavier_uniform_(self.out_proj.weight) - if self.out_proj.bias is not None: - nn.init.constant_(self.out_proj.bias, 0.0) - if self.bias_k is not None: - nn.init.xavier_normal_(self.bias_k) - if self.bias_v is not None: - nn.init.xavier_normal_(self.bias_v) - - def forward( - self, - query, - key: Optional[Tensor], - value: Optional[Tensor], - key_padding_mask: Optional[Tensor] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - need_weights: bool = True, - static_kv: bool = False, - attn_mask: Optional[Tensor] = None, - before_softmax: bool = False, - need_head_weights: bool = False, - ) -> Tuple[Tensor, Optional[Tensor]]: - """Input shape: Time x Batch x Channel - - Args: - key_padding_mask (ByteTensor, optional): mask to exclude - keys that are pads, of shape `(batch, src_len)`, where - padding elements are indicated by 1s. - need_weights (bool, optional): return the attention weights, - averaged over heads (default: False). - attn_mask (ByteTensor, optional): typically used to - implement causal attention, where the mask prevents the - attention from looking forward in time (default: None). - before_softmax (bool, optional): return the raw attention - weights and values before the attention softmax. - need_head_weights (bool, optional): return the attention - weights for each head. Implies *need_weights*. Default: - return the average attention weights over all heads. - """ - if need_head_weights: - need_weights = True - - is_tpu = query.device.type == "xla" - - tgt_len, bsz, embed_dim = query.size() - src_len = tgt_len - assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}" - assert list(query.size()) == [tgt_len, bsz, embed_dim] - if key is not None: - src_len, key_bsz, _ = key.size() - if not torch.jit.is_scripting(): - assert key_bsz == bsz - assert value is not None - assert src_len, bsz == value.shape[:2] - - if ( - not self.onnx_trace - and not is_tpu # don't use PyTorch version on TPUs - and incremental_state is None - and not static_kv - # A workaround for quantization to work. Otherwise JIT compilation - # treats bias in linear module as method. - and not torch.jit.is_scripting() - ): - assert key is not None and value is not None - return F.multi_head_attention_forward( - query, - key, - value, - self.embed_dim, - self.num_heads, - torch.empty([0]), - torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), - self.bias_k, - self.bias_v, - self.add_zero_attn, - self.dropout_module.p, - self.out_proj.weight, - self.out_proj.bias, - self.training or self.dropout_module.apply_during_inference, - key_padding_mask, - need_weights, - attn_mask, - use_separate_proj_weight=True, - q_proj_weight=self.q_proj.weight, - k_proj_weight=self.k_proj.weight, - v_proj_weight=self.v_proj.weight, - ) - - if incremental_state is not None: - saved_state = self._get_input_buffer(incremental_state) - if saved_state is not None and "prev_key" in saved_state: - # previous time steps are cached - no need to recompute - # key and value if they are static - if static_kv: - assert self.encoder_decoder_attention and not self.self_attention - key = value = None - else: - saved_state = None - - if self.self_attention: - q = self.q_proj(query) - k = self.k_proj(query) - v = self.v_proj(query) - elif self.encoder_decoder_attention: - # encoder-decoder attention - q = self.q_proj(query) - if key is None: - assert value is None - k = v = None - else: - k = self.k_proj(key) - v = self.v_proj(key) - - else: - assert key is not None and value is not None - q = self.q_proj(query) - k = self.k_proj(key) - v = self.v_proj(value) - q *= self.scaling - - if self.bias_k is not None: - assert self.bias_v is not None - k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) - v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) - if attn_mask is not None: - attn_mask = torch.cat( - [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 - ) - if key_padding_mask is not None: - key_padding_mask = torch.cat( - [ - key_padding_mask, - key_padding_mask.new_zeros(key_padding_mask.size(0), 1), - ], - dim=1, - ) - - q = ( - q.contiguous() - .view(tgt_len, bsz * self.num_heads, self.head_dim) - .transpose(0, 1) - ) - if k is not None: - k = ( - k.contiguous() - .view(-1, bsz * self.num_heads, self.head_dim) - .transpose(0, 1) - ) - if v is not None: - v = ( - v.contiguous() - .view(-1, bsz * self.num_heads, self.head_dim) - .transpose(0, 1) - ) - - if saved_state is not None: - # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) - if "prev_key" in saved_state: - _prev_key = saved_state["prev_key"] - assert _prev_key is not None - prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) - if static_kv: - k = prev_key - else: - assert k is not None - k = torch.cat([prev_key, k], dim=1) - src_len = k.size(1) - if "prev_value" in saved_state: - _prev_value = saved_state["prev_value"] - assert _prev_value is not None - prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) - if static_kv: - v = prev_value - else: - assert v is not None - v = torch.cat([prev_value, v], dim=1) - prev_key_padding_mask: Optional[Tensor] = None - if "prev_key_padding_mask" in saved_state: - prev_key_padding_mask = saved_state["prev_key_padding_mask"] - assert k is not None and v is not None - key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( - key_padding_mask=key_padding_mask, - prev_key_padding_mask=prev_key_padding_mask, - batch_size=bsz, - src_len=k.size(1), - static_kv=static_kv, - ) - - saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) - saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) - saved_state["prev_key_padding_mask"] = key_padding_mask - # In this branch incremental_state is never None - assert incremental_state is not None - incremental_state = self._set_input_buffer(incremental_state, saved_state) - assert k is not None - assert k.size(1) == src_len - - # This is part of a workaround to get around fork/join parallelism - # not supporting Optional types. - if key_padding_mask is not None and key_padding_mask.dim() == 0: - key_padding_mask = None - - if key_padding_mask is not None: - assert key_padding_mask.size(0) == bsz - assert key_padding_mask.size(1) == src_len - - if self.add_zero_attn: - assert v is not None - src_len += 1 - k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) - v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) - if attn_mask is not None: - attn_mask = torch.cat( - [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 - ) - if key_padding_mask is not None: - key_padding_mask = torch.cat( - [ - key_padding_mask, - torch.zeros(key_padding_mask.size(0), 1).type_as( - key_padding_mask - ), - ], - dim=1, - ) - - attn_weights = torch.bmm(q, k.transpose(1, 2)) - attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) - - assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] - - if attn_mask is not None: - attn_mask = attn_mask.unsqueeze(0) - if self.onnx_trace: - attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) - attn_weights += attn_mask - - if key_padding_mask is not None: - # don't attend to padding symbols - attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) - if not is_tpu: - attn_weights = attn_weights.masked_fill( - key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), - float("-inf"), - ) - else: - attn_weights = attn_weights.transpose(0, 2) - attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) - attn_weights = attn_weights.transpose(0, 2) - attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) - - if before_softmax: - return attn_weights, v - - attn_weights_float = utils.softmax( - attn_weights, dim=-1, onnx_trace=self.onnx_trace - ) - attn_weights = attn_weights_float.type_as(attn_weights) - attn_probs = self.dropout_module(attn_weights) - - assert v is not None - attn = torch.bmm(attn_probs, v) - assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] - if self.onnx_trace and attn.size(1) == 1: - # when ONNX tracing a single decoder step (sequence length == 1) - # the transpose is a no-op copy before view, thus unnecessary - attn = attn.contiguous().view(tgt_len, bsz, embed_dim) - else: - attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) - attn = self.out_proj(attn) - attn_weights: Optional[Tensor] = None - if need_weights: - attn_weights = attn_weights_float.view( - bsz, self.num_heads, tgt_len, src_len - ).transpose(1, 0) - if not need_head_weights: - # average attention weights over heads - attn_weights = attn_weights.mean(dim=0) - - return attn, attn_weights - - @staticmethod - def _append_prev_key_padding_mask( - key_padding_mask: Optional[Tensor], - prev_key_padding_mask: Optional[Tensor], - batch_size: int, - src_len: int, - static_kv: bool, - ) -> Optional[Tensor]: - # saved key padding masks have shape (bsz, seq_len) - if prev_key_padding_mask is not None and static_kv: - new_key_padding_mask = prev_key_padding_mask - elif prev_key_padding_mask is not None and key_padding_mask is not None: - new_key_padding_mask = torch.cat( - [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 - ) - # During incremental decoding, as the padding token enters and - # leaves the frame, there will be a time when prev or current - # is None - elif prev_key_padding_mask is not None: - if src_len > prev_key_padding_mask.size(1): - filler = torch.zeros( - (batch_size, src_len - prev_key_padding_mask.size(1)), - device=prev_key_padding_mask.device, - ) - new_key_padding_mask = torch.cat( - [prev_key_padding_mask.float(), filler.float()], dim=1 - ) - else: - new_key_padding_mask = prev_key_padding_mask.float() - elif key_padding_mask is not None: - if src_len > key_padding_mask.size(1): - filler = torch.zeros( - (batch_size, src_len - key_padding_mask.size(1)), - device=key_padding_mask.device, - ) - new_key_padding_mask = torch.cat( - [filler.float(), key_padding_mask.float()], dim=1 - ) - else: - new_key_padding_mask = key_padding_mask.float() - else: - new_key_padding_mask = prev_key_padding_mask - return new_key_padding_mask - - @torch.jit.export - def reorder_incremental_state( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - new_order: Tensor, - ): - """Reorder buffered internal state (for incremental generation).""" - input_buffer = self._get_input_buffer(incremental_state) - if input_buffer is not None: - for k in input_buffer.keys(): - input_buffer_k = input_buffer[k] - if input_buffer_k is not None: - if self.encoder_decoder_attention and input_buffer_k.size( - 0 - ) == new_order.size(0): - break - input_buffer[k] = input_buffer_k.index_select(0, new_order) - incremental_state = self._set_input_buffer(incremental_state, input_buffer) - return incremental_state - - def _get_input_buffer( - self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] - ) -> Dict[str, Optional[Tensor]]: - result = self.get_incremental_state(incremental_state, "attn_state") - if result is not None: - return result - else: - empty_result: Dict[str, Optional[Tensor]] = {} - return empty_result - - def _set_input_buffer( - self, - incremental_state: Dict[str, Dict[str, Optional[Tensor]]], - buffer: Dict[str, Optional[Tensor]], - ): - return self.set_incremental_state(incremental_state, "attn_state", buffer) - - def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int): - return attn_weights - - def upgrade_state_dict_named(self, state_dict, name): - prefix = name + "." if name != "" else "" - items_to_add = {} - keys_to_remove = [] - for k in state_dict.keys(): - if k.endswith(prefix + "in_proj_weight"): - # in_proj_weight used to be q + k + v with same dimensions - dim = int(state_dict[k].shape[0] / 3) - items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] - items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] - items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] - - keys_to_remove.append(k) - - k_bias = prefix + "in_proj_bias" - if k_bias in state_dict.keys(): - dim = int(state_dict[k].shape[0] / 3) - items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] - items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ - dim : 2 * dim - ] - items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] - - keys_to_remove.append(prefix + "in_proj_bias") - - for k in keys_to_remove: - del state_dict[k] - - for key, value in items_to_add.items(): - state_dict[key] = value diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/bart/README.md b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/bart/README.md deleted file mode 100644 index 4050a724ee6a2f20c9998a95df48c58b64764ab1..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/bart/README.md +++ /dev/null @@ -1,228 +0,0 @@ -# BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension - -[https://arxiv.org/abs/1910.13461](https://arxiv.org/abs/1910.13461) - -## Introduction - -BART is sequence-to-sequence model trained with denoising as pretraining objective. We show that this pretraining objective is more generic and show that we can match [RoBERTa](../roberta) results on SQuAD and GLUE and gain state-of-the-art results on summarization (XSum, CNN dataset), long form generative question answering (ELI5) and dialog response genration (ConvAI2). See the associated paper for more details. - -## Pre-trained models - -Model | Description | # params | Download ----|---|---|--- -`bart.base` | BART model with 6 encoder and decoder layers | 140M | [bart.base.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/bart.base.tar.gz) -`bart.large` | BART model with 12 encoder and decoder layers | 400M | [bart.large.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz) -`bart.large.mnli` | `bart.large` finetuned on `MNLI` | 400M | [bart.large.mnli.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz) -`bart.large.cnn` | `bart.large` finetuned on `CNN-DM` | 400M | [bart.large.cnn.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/bart.large.cnn.tar.gz) -`bart.large.xsum` | `bart.large` finetuned on `Xsum` | 400M | [bart.large.xsum.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/bart.large.xsum.tar.gz) - -## Results - -**[GLUE (Wang et al., 2019)](https://gluebenchmark.com/)** -_(dev set, single model, single-task finetuning)_ - -Model | MNLI | QNLI | QQP | RTE | SST-2 | MRPC | CoLA | STS-B ----|---|---|---|---|---|---|---|--- -`roberta.large` | 90.2 | 94.7 | 92.2 | 86.6 | 96.4 | 90.9 | 68.0 | 92.4 -`bart.large` | 89.9 | 94.9 | 92.5 | 87.0 | 96.6 | 90.4 | 62.8 | 91.2 - -**[SQuAD (Rajpurkar et al., 2018)](https://rajpurkar.github.io/SQuAD-explorer/)** -_(dev set, no additional data used)_ - -Model | SQuAD 1.1 EM/F1 | SQuAD 2.0 EM/F1 ----|---|--- -`roberta.large` | 88.9/94.6 | 86.5/89.4 -`bart.large` | 88.8/94.6 | 86.1/89.2 - -**[CNN/Daily Mail](http://nlpprogress.com/english/summarization.html)** -_(test set, no additional data used)_ - -Model | R1 | R2 | RL ----|---|---|--- -`BERTSUMEXTABS` | 42.13 | 19.60 | 39.18 -`bart.large` | 44.16 | 21.28 | 40.90 - -## Example usage - -##### Load BART from torch.hub (PyTorch >= 1.1): -```python -import torch -bart = torch.hub.load('pytorch/fairseq', 'bart.large') -bart.eval() # disable dropout (or leave in train mode to finetune) -``` - -##### Load BART (for PyTorch 1.0 or custom models): -```python -# Download bart.large model -wget https://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz -tar -xzvf bart.large.tar.gz - -# Load the model in fairseq -from fairseq.models.bart import BARTModel -bart = BARTModel.from_pretrained('/path/to/bart.large', checkpoint_file='model.pt') -bart.eval() # disable dropout (or leave in train mode to finetune) -``` - -##### Apply Byte-Pair Encoding (BPE) to input text: -```python -tokens = bart.encode('Hello world!') -assert tokens.tolist() == [0, 31414, 232, 328, 2] -bart.decode(tokens) # 'Hello world!' -``` - -##### Extract features from BART: -```python -# Extract the last layer's features -last_layer_features = bart.extract_features(tokens) -assert last_layer_features.size() == torch.Size([1, 5, 1024]) - -# Extract all layer's features from decoder (layer 0 is the embedding layer) -all_layers = bart.extract_features(tokens, return_all_hiddens=True) -assert len(all_layers) == 13 -assert torch.all(all_layers[-1] == last_layer_features) -``` - -##### Use BART for sentence-pair classification tasks: -```python -# Download BART already finetuned for MNLI -bart = torch.hub.load('pytorch/fairseq', 'bart.large.mnli') -bart.eval() # disable dropout for evaluation - -# Encode a pair of sentences and make a prediction -tokens = bart.encode('BART is a seq2seq model.', 'BART is not sequence to sequence.') -bart.predict('mnli', tokens).argmax() # 0: contradiction - -# Encode another pair of sentences -tokens = bart.encode('BART is denoising autoencoder.', 'BART is version of autoencoder.') -bart.predict('mnli', tokens).argmax() # 2: entailment -``` - -##### Register a new (randomly initialized) classification head: -```python -bart.register_classification_head('new_task', num_classes=3) -logprobs = bart.predict('new_task', tokens) -``` - -##### Batched prediction: -```python -import torch -from fairseq.data.data_utils import collate_tokens - -bart = torch.hub.load('pytorch/fairseq', 'bart.large.mnli') -bart.eval() - -batch_of_pairs = [ - ['BART is a seq2seq model.', 'BART is not sequence to sequence.'], - ['BART is denoising autoencoder.', 'BART is version of autoencoder.'], -] - -batch = collate_tokens( - [bart.encode(pair[0], pair[1]) for pair in batch_of_pairs], pad_idx=1 -) - -logprobs = bart.predict('mnli', batch) -print(logprobs.argmax(dim=1)) -# tensor([0, 2]) -``` - -##### Using the GPU: -```python -bart.cuda() -bart.predict('new_task', tokens) -``` - -#### Filling masks: - -BART can be used to fill multiple `` tokens in the input. -```python -bart = torch.hub.load('pytorch/fairseq', 'bart.base') -bart.eval() -bart.fill_mask(['The cat on the .'], topk=3, beam=10) -# [[('The cat was on the ground.', tensor(-0.6183)), ('The cat was on the floor.', tensor(-0.6798)), ('The cat sleeps on the couch.', tensor(-0.6830))]] -``` - -Note that by default we enforce the output length to match the input length. -This can be disabled by setting ``match_source_len=False``: -``` -bart.fill_mask(['The cat on the .'], topk=3, beam=10, match_source_len=False) -# [[('The cat was on the ground.', tensor(-0.6185)), ('The cat was asleep on the couch.', tensor(-0.6276)), ('The cat was on the floor.', tensor(-0.6800))]] -``` - -Example code to fill masks for a batch of sentences using GPU -``` -bart.cuda() -bart.fill_mask(['The cat on the .', 'The dog on the .'], topk=3, beam=10) -# [[('The cat was on the ground.', tensor(-0.6183)), ('The cat was on the floor.', tensor(-0.6798)), ('The cat sleeps on the couch.', tensor(-0.6830))], [('The dog was on the ground.', tensor(-0.6190)), ('The dog lay on the ground.', tensor(-0.6711)), -('The dog was asleep on the couch', tensor(-0.6796))]] -``` - -#### Evaluating the `bart.large.mnli` model: - -Example python code snippet to evaluate accuracy on the MNLI `dev_matched` set. -```python -label_map = {0: 'contradiction', 1: 'neutral', 2: 'entailment'} -ncorrect, nsamples = 0, 0 -bart.cuda() -bart.eval() -with open('glue_data/MNLI/dev_matched.tsv') as fin: - fin.readline() - for index, line in enumerate(fin): - tokens = line.strip().split('\t') - sent1, sent2, target = tokens[8], tokens[9], tokens[-1] - tokens = bart.encode(sent1, sent2) - prediction = bart.predict('mnli', tokens).argmax().item() - prediction_label = label_map[prediction] - ncorrect += int(prediction_label == target) - nsamples += 1 - print('| Accuracy: ', float(ncorrect)/float(nsamples)) -# Expected output: 0.9010 -``` - -#### Evaluating the `bart.large.cnn` model: -- Follow instructions [here](https://github.com/abisee/cnn-dailymail) to download and process into data-files such that `test.source` and `test.target` has one line for each non-tokenized sample. -- For simpler preprocessing, you can also `wget https://cdn-datasets.huggingface.co/summarization/cnn_dm_v2.tgz`, although there is no guarantee of identical scores -- `huggingface/transformers` has a simpler interface that supports [single-gpu](https://github.com/huggingface/transformers/blob/master/examples/legacy/seq2seq/run_eval.py) and [multi-gpu](https://github.com/huggingface/transformers/blob/master/examples/legacy/seq2seq/run_distributed_eval.py) beam search. - In `huggingface/transformers`, the BART models' paths are `facebook/bart-large-cnn` and `facebook/bart-large-xsum`. - -In `fairseq`, summaries can be generated using: - -```bash -cp data-bin/cnn_dm/dict.source.txt checkpoints/ -python examples/bart/summarize.py \ - --model-dir pytorch/fairseq \ - --model-file bart.large.cnn \ - --src cnn_dm/test.source \ - --out cnn_dm/test.hypo -``` - -For calculating rouge, install `files2rouge` from [here](https://github.com/pltrdy/files2rouge). - -```bash -export CLASSPATH=/path/to/stanford-corenlp-full-2016-10-31/stanford-corenlp-3.7.0.jar - -# Tokenize hypothesis and target files. -cat test.hypo | java edu.stanford.nlp.process.PTBTokenizer -ioFileList -preserveLines > test.hypo.tokenized -cat test.target | java edu.stanford.nlp.process.PTBTokenizer -ioFileList -preserveLines > test.hypo.target -files2rouge test.hypo.tokenized test.hypo.target -# Expected output: (ROUGE-2 Average_F: 0.21238) -``` - - -## Finetuning - -- [Finetuning on GLUE](README.glue.md) -- [Finetuning on CNN-DM](README.summarization.md) - -## Citation - -```bibtex -@article{lewis2019bart, - title = {BART: Denoising Sequence-to-Sequence Pre-training for Natural -Language Generation, Translation, and Comprehension}, - author = {Mike Lewis and Yinhan Liu and Naman Goyal and Marjan Ghazvininejad and - Abdelrahman Mohamed and Omer Levy and Veselin Stoyanov - and Luke Zettlemoyer }, - journal={arXiv preprint arXiv:1910.13461}, - year = {2019}, -} -``` diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/config/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/config/__init__.py deleted file mode 100644 index 6264236915a7269a4d920ee8213004374dd86a9a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/config/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/model_parallel/models/transformer_lm.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/model_parallel/models/transformer_lm.py deleted file mode 100644 index dc52f6e8dd3899b6bf9bebae7415cee20baf9884..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/model_parallel/models/transformer_lm.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.nn as nn -from fairseq.model_parallel.models.transformer import ModelParallelTransformerDecoder -from fairseq.models import register_model, register_model_architecture -from fairseq.models.transformer_lm import TransformerLanguageModel - - -try: - from fairseq.model_parallel.megatron.mpu import VocabParallelEmbedding - - has_megatron_submodule = True -except (ImportError, ModuleNotFoundError): - has_megatron_submodule = False - - -DEFAULT_MAX_TARGET_POSITIONS = 1024 - - -@register_model("model_parallel_transformer_lm") -class ModelParallelTransformerLanguageModel(TransformerLanguageModel): - - @staticmethod - def add_args(parser): - TransformerLanguageModel.add_args(parser) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - if not has_megatron_submodule: - raise ImportError( - "\n\nPlease install the megatron submodule:" - "\n\n git submodule update --init " - "fairseq/model_parallel/megatron" - ) - - # make sure all arguments are present in older models - base_lm_architecture(args) - - task.source_dictionary.pad_to_multiple_(args.model_parallel_size * 8) - task.target_dictionary.pad_to_multiple_(args.model_parallel_size * 8) - - if args.decoder_layers_to_keep: - args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) - - if getattr(args, "max_target_positions", None) is None: - args.max_target_positions = getattr( - args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS - ) - - if args.character_embeddings: - raise NotImplementedError( - "Character embeddings is not supported for model parallel" - ) - elif args.adaptive_input: - raise NotImplementedError( - "Adaptive input is not supported for model parallel" - ) - else: - embed_tokens = cls.build_embedding( - args, task.source_dictionary, args.decoder_input_dim - ) - - decoder = ModelParallelTransformerDecoder( - args, - task.target_dictionary, - embed_tokens, - no_encoder_attn=True, - ) - return cls(decoder) - - @staticmethod - def add_args(parser): - TransformerLanguageModel.add_args(parser) - - @classmethod - def build_embedding(cls, args, dictionary, embed_dim, path=None): - def _vocab_init(tensor, **kwargs): - nn.init.normal_(tensor, mean=0, std=embed_dim ** -0.5) - nn.init.constant_(tensor[1], 0) - - embed_tokens = VocabParallelEmbedding( - len(dictionary), embed_dim, dictionary.pad(), init_method=_vocab_init - ) - return embed_tokens - - -def base_lm_architecture(args): - # backward compatibility for older model checkpoints - if hasattr(args, "no_tie_adaptive_proj"): - # previous models defined --no-tie-adaptive-proj, so use the existence of - # that option to determine if this is an "old" model checkpoint - args.no_decoder_final_norm = True # old models always set this to True - if args.no_tie_adaptive_proj is False: - args.tie_adaptive_proj = True - if hasattr(args, "decoder_final_norm"): - args.no_decoder_final_norm = not args.decoder_final_norm - - args.activation_fn = getattr(args, "activation_fn", "relu") - args.dropout = getattr(args, "dropout", 0.1) - args.attention_dropout = getattr(args, "attention_dropout", 0.0) - args.activation_dropout = getattr(args, "activation_dropout", 0.0) - args.relu_dropout = getattr(args, "relu_dropout", 0.0) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - # Model training is not stable without this - args.decoder_normalize_before = True - args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False) - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4) - args.no_token_positional_embeddings = getattr( - args, "no_token_positional_embeddings", False - ) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.character_embeddings = getattr(args, "character_embeddings", False) - args.character_filters = getattr( - args, - "character_filters", - "[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", - ) - args.character_embedding_dim = getattr(args, "character_embedding_dim", 4) - args.char_embedder_highway_layers = getattr(args, "char_embedder_highway_layers", 2) - args.adaptive_input = getattr(args, "adaptive_input", False) - args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4) - args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None) - args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) - args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) - args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) - args.layernorm_embedding = getattr(args, "layernorm_embedding", False) - args.no_scale_embedding = getattr(args, "no_scale_embedding", False) - args.quant_noise_pq = getattr(args, "quant_noise_pq", 0.0) - args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) - args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0.0) - args.add_bos_token = getattr(args, "add_bos_token", False) - - -@register_model_architecture("model_parallel_transformer_lm", "transformer_lm_megatron") -def transformer_lm_megatron(args): - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072) - args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 4) - args.decoder_layers = getattr(args, "decoder_layers", 72) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32) - args.dropout = getattr(args, "dropout", 0.1) - args.attention_dropout = getattr(args, "attention_dropout", 0.1) - args.activation_fn = getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture( - "model_parallel_transformer_lm", "transformer_lm_megatron_11b" -) -def transformer_lm_megatron_11b(args): - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072) - args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 6) - args.decoder_layers = getattr(args, "decoder_layers", 72) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32) - args.dropout = getattr(args, "dropout", 0.1) - args.attention_dropout = getattr(args, "attention_dropout", 0.1) - args.activation_fn = getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/tasks/audio_pretraining.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/tasks/audio_pretraining.py deleted file mode 100644 index cc310088db8852e80cd2e65d51f06f8f7cb592e3..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/tasks/audio_pretraining.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright (c) 2017-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the LICENSE file in -# the root directory of this source tree. An additional grant of patent rights -# can be found in the PATENTS file in the same directory. - -import logging -import os -import sys - -from argparse import Namespace -from dataclasses import dataclass, field -from typing import Optional -from omegaconf import MISSING, II, OmegaConf - -from fairseq.data import BinarizedAudioDataset, FileAudioDataset -from fairseq.dataclass import FairseqDataclass, ChoiceEnum -from fairseq.data.text_compressor import TextCompressionLevel - -from . import FairseqTask, register_task - - -logger = logging.getLogger(__name__) - - -@dataclass -class InferredW2vConfig: - # The following are needed to precompute mask and mask channel indices - # before model's forward. - mask_length: Optional[int] = II("model.mask_length") - mask_prob: Optional[float] = II("model.mask_prob") - mask_selection: Optional[str] = II("model.mask_selection") - mask_other: Optional[float] = II("model.mask_other") - no_mask_overlap: Optional[bool] = II("model.no_mask_overlap") - mask_min_space: Optional[int] = II("model.mask_min_space") - mask_channel_length: Optional[int] = II("model.mask_channel_length") - mask_channel_prob: Optional[float] = II("model.mask_channel_prob") - mask_channel_selection: Optional[str] = II("model.mask_channel_selection") - mask_channel_other: Optional[float] = II("model.mask_channel_other") - no_mask_channel_overlap: Optional[bool] = II("model.no_mask_channel_overlap") - mask_channel_min_space: Optional[int] = II("model.mask_channel_min_space") - - conv_feature_layers: Optional[str] = II("model.conv_feature_layers") - encoder_embed_dim: Optional[int] = II("model.encoder_embed_dim") - - -@dataclass -class AudioPretrainingConfig(FairseqDataclass): - data: str = field(default=MISSING, metadata={"help": "path to data directory"}) - labels: Optional[str] = field( - default=None, - metadata={ - "help": "extension of the label file to load, used for fine-tuning"}, - ) - binarized_dataset: bool = field( - default=False, - metadata={ - "help": "if true, loads binarized dataset (useful for very large datasets). " - "See examples/wav2vec/scripts/binarize_manifest.sh" - }, - ) - sample_rate: int = field( - default=16_000, - metadata={ - "help": "target sample rate. audio files will be up/down sampled to this rate" - }, - ) - normalize: bool = field( - default=False, - metadata={"help": "if set, normalizes input to have 0 mean and unit variance"}, - ) - enable_padding: bool = field( - default=False, metadata={"help": "pad shorter samples instead of cropping"} - ) - max_sample_size: Optional[int] = field( - default=None, metadata={"help": "max sample size to crop to for batching"} - ) - min_sample_size: Optional[int] = field( - default=None, metadata={"help": "min sample size to skip small examples"} - ) - num_batch_buckets: int = field( - default=0, - metadata={"help": "number of buckets"}, - ) - precompute_mask_indices: bool = field( - default=False, - metadata={ - "help": "flag to compute mask indices in data preparation.", - }, - ) - - inferred_w2v_config: Optional[InferredW2vConfig] = field( - default=None, - metadata={ - "help": "wav2vec 2.0 masking arguments used to pre-compute masks (required for TPU)", - }, - ) - - tpu: bool = II("common.tpu") - text_compression_level: ChoiceEnum([x.name for x in TextCompressionLevel]) = field( - default="none", - metadata={ - "help": "compression level for texts (e.g. audio filenames, " - "target texts): none/low/high (default: none). " - } - ) - - -@register_task("audio_pretraining", dataclass=AudioPretrainingConfig) -class AudioPretrainingTask(FairseqTask): - """ """ - - cfg: AudioPretrainingConfig - - @classmethod - def setup_task(cls, cfg: AudioPretrainingConfig, **kwargs): - """Setup the task (e.g., load dictionaries). - - Args: - cfg (AudioPretrainingConfig): configuration of this task - """ - - return cls(cfg) - - def _get_mask_precompute_kwargs(self, cfg): - if self.cfg.precompute_mask_indices or self.cfg.tpu: - assert ( - cfg.inferred_w2v_config is not None - ), "inferred_w2v_config must be set" - return OmegaConf.to_container( - cfg.inferred_w2v_config, resolve=True, enum_to_str=True - ) - else: - return {} - - def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs): - data_path = self.cfg.data - task_cfg = task_cfg or self.cfg - - # upgrade old task - if isinstance(task_cfg, Namespace): - if not hasattr(task_cfg, "autoregressive"): - task_cfg.autoregressive = not task_cfg.criterion == "ctc" - - text_compression_level = getattr( - TextCompressionLevel, str(self.cfg.text_compression_level) - ) - if getattr(task_cfg, "binarized_dataset", False): - self.datasets[split] = BinarizedAudioDataset( - data_path, - split=split, - sample_rate=task_cfg.get("sample_rate", self.cfg.sample_rate), - max_sample_size=self.cfg.max_sample_size, - min_sample_size=self.cfg.min_sample_size, - pad=task_cfg.labels is not None or task_cfg.enable_padding, - normalize=task_cfg.normalize, - num_buckets=self.cfg.num_batch_buckets or int(self.cfg.tpu), - compute_mask_indices=(self.cfg.precompute_mask_indices or self.cfg.tpu), - **self._get_mask_precompute_kwargs(task_cfg), - ) - else: - manifest_path = os.path.join(data_path, "{}.tsv".format(split)) - - self.datasets[split] = FileAudioDataset( - manifest_path=manifest_path, - sample_rate=task_cfg.get("sample_rate", self.cfg.sample_rate), - max_sample_size=self.cfg.max_sample_size, - min_sample_size=self.cfg.min_sample_size, - pad=task_cfg.labels is not None or task_cfg.enable_padding, - normalize=task_cfg.normalize, - num_buckets=self.cfg.num_batch_buckets or int(self.cfg.tpu), - compute_mask_indices=(self.cfg.precompute_mask_indices or self.cfg.tpu), - text_compression_level=text_compression_level, - **self._get_mask_precompute_kwargs(task_cfg), - ) - - if self.cfg.tpu and task_cfg.inferred_w2v_config.mask_channel_prob == 0.0: - logger.info( - "Pretraining on TPUs may suffer convergence " - "issues when training with `mask_channel_prob` value of " - "0. You may want to set this to a low value close to 0." - ) - - @property - def source_dictionary(self): - return None - - @property - def target_dictionary(self): - return None - - def max_positions(self): - """Maximum input length supported by the encoder.""" - return sys.maxsize, sys.maxsize - - def build_model(self, model_cfg: FairseqDataclass): - model = super().build_model(model_cfg) - - actualized_cfg = getattr(model, "cfg", None) - if actualized_cfg is not None: - # if "w2v_args" in actualized_cfg: - if hasattr(actualized_cfg, "w2v_args"): - model_cfg.w2v_args = actualized_cfg.w2v_args - - return model diff --git a/spaces/Olivier-Truong/XTTS_V1_CPU_working/app.py b/spaces/Olivier-Truong/XTTS_V1_CPU_working/app.py deleted file mode 100644 index eefe1bb0828c30efd6d373d4844500e6c9698550..0000000000000000000000000000000000000000 --- a/spaces/Olivier-Truong/XTTS_V1_CPU_working/app.py +++ /dev/null @@ -1,258 +0,0 @@ -import sys -import os -# By using XTTS you agree to CPML license https://coqui.ai/cpml -os.environ["COQUI_TOS_AGREED"] = "1" - -import gradio as gr -from TTS.api import TTS - -model_names = TTS().list_models() -m = model_names[0] -print(model_names) -tts = TTS(m, gpu=False) -tts.to("cpu") # no GPU or Amd -#tts.to("cuda") # cuda only - -def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree): - if agree == True: - if use_mic == True: - if mic_file_path is not None: - speaker_wav=mic_file_path - else: - gr.Warning("Please record your voice with Microphone, or uncheck Use Microphone to use reference audios") - return ( - None, - None, - ) - - else: - speaker_wav=audio_file_pth - - if len(prompt)<2: - gr.Warning("Please give a longer prompt text") - return ( - None, - None, - ) - if len(prompt)>10000: - gr.Warning("Text length limited to 10000 characters for this demo, please try shorter text") - return ( - None, - None, - ) - try: - if language == "fr": - if m.find("your") != -1: - language = "fr-fr" - if m.find("/fr/") != -1: - language = None - tts.tts_to_file( - text=prompt, - file_path="output.wav", - speaker_wav=speaker_wav, - language=language - ) - except RuntimeError as e : - if "device-assert" in str(e): - # cannot do anything on cuda device side error, need tor estart - gr.Warning("Unhandled Exception encounter, please retry in a minute") - print("Cuda device-assert Runtime encountered need restart") - sys.exit("Exit due to cuda device-assert") - else: - raise e - - return ( - gr.make_waveform( - audio="output.wav", - ), - "output.wav", - ) - else: - gr.Warning("Please accept the Terms & Condition!") - return ( - None, - None, - ) - - -title = "XTTS Glz's remake (Fonctional Text-2-Speech)" - -description = """ -XTTS is a Voice generation model that lets you clone voices into different languages by using just a quick 3-second audio clip. -
    -XTTS is built on previous research, like Tortoise, with additional architectural innovations and training to make cross-language voice cloning and multilingual speech generation possible. -
    -This is the same model that powers our creator application Coqui Studio as well as the Coqui API. In production we apply modifications to make low-latency streaming possible. -
    -Leave a star on the Github TTS, where our open-source inference and training code lives. -
    -

    For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings. -
    - -Duplicate Space -

    -""" - -article = """ -
    -

    By using this demo you agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml

    -
    -""" -examples = [ - [ - "Hello, World !, here is an example of light voice cloning. Try to upload your best audio samples quality", - "en", - "examples/female.wav", - None, - False, - True, - ], - [ - "Je suis un lycéen français de 17 ans, passioner par la Cyber-Sécuritée et les models d'IA.", - "fr", - "examples/male.wav", - None, - False, - True, - ], - [ - "Als ich sechs war, sah ich einmal ein wunderbares Bild", - "de", - "examples/female.wav", - None, - False, - True, - ], - [ - "Cuando tenía seis años, vi una vez una imagen magnífica", - "es", - "examples/male.wav", - None, - False, - True, - ], - [ - "Quando eu tinha seis anos eu vi, uma vez, uma imagem magnífica", - "pt", - "examples/female.wav", - None, - False, - True, - ], - [ - "Kiedy miałem sześć lat, zobaczyłem pewnego razu wspaniały obrazek", - "pl", - "examples/male.wav", - None, - False, - True, - ], - [ - "Un tempo lontano, quando avevo sei anni, vidi un magnifico disegno", - "it", - "examples/female.wav", - None, - False, - True, - ], - [ - "Bir zamanlar, altı yaşındayken, muhteşem bir resim gördüm", - "tr", - "examples/female.wav", - None, - False, - True, - ], - [ - "Когда мне было шесть лет, я увидел однажды удивительную картинку", - "ru", - "examples/female.wav", - None, - False, - True, - ], - [ - "Toen ik een jaar of zes was, zag ik op een keer een prachtige plaat", - "nl", - "examples/male.wav", - None, - False, - True, - ], - [ - "Když mi bylo šest let, viděl jsem jednou nádherný obrázek", - "cs", - "examples/female.wav", - None, - False, - True, - ], - [ - "当我还只有六岁的时候, 看到了一副精彩的插画", - "zh-cn", - "examples/female.wav", - None, - False, - True, - ], -] - - - -gr.Interface( - fn=predict, - inputs=[ - gr.Textbox( - label="Text Prompt", - info="One or two sentences at a time is better", - value="Hello, World !, here is an example of light voice cloning. Try to upload your best audio samples quality", - ), - gr.Dropdown( - label="Language", - info="Select an output language for the synthesised speech", - choices=[ - "en", - "es", - "fr", - "de", - "it", - "pt", - "pl", - "tr", - "ru", - "nl", - "cs", - "ar", - "zh-cn", - ], - max_choices=1, - value="en", - ), - gr.Audio( - label="Reference Audio", - info="Click on the ✎ button to upload your own target speaker audio", - type="filepath", - value="examples/female.wav", - ), - gr.Audio(source="microphone", - type="filepath", - info="Use your microphone to record audio", - label="Use Microphone for Reference"), - gr.Checkbox(label="Check to use Microphone as Reference", - value=False, - info="Notice: Microphone input may not work properly under traffic",), - gr.Checkbox( - label="Agree", - value=True, - info="I agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml", - ), - ], - outputs=[ - gr.Video(label="Waveform Visual"), - gr.Audio(label="Synthesised Audio"), - ], - title=title, - description=description, - article=article, - examples=examples, -).queue().launch(debug=True) \ No newline at end of file diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/datasets/prepare_panoptic_fpn.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/datasets/prepare_panoptic_fpn.py deleted file mode 100644 index 597d791afab1bcc0013203a66c7fba225065eebe..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/datasets/prepare_panoptic_fpn.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import functools -import json -import multiprocessing as mp -import numpy as np -import os -import time -from fvcore.common.download import download -from panopticapi.utils import rgb2id -from PIL import Image - -from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES - - -def _process_panoptic_to_semantic(input_panoptic, output_semantic, segments, id_map): - panoptic = np.asarray(Image.open(input_panoptic), dtype=np.uint32) - panoptic = rgb2id(panoptic) - output = np.zeros_like(panoptic, dtype=np.uint8) + 255 - for seg in segments: - cat_id = seg["category_id"] - new_cat_id = id_map[cat_id] - output[panoptic == seg["id"]] = new_cat_id - Image.fromarray(output).save(output_semantic) - - -def separate_coco_semantic_from_panoptic(panoptic_json, panoptic_root, sem_seg_root, categories): - """ - Create semantic segmentation annotations from panoptic segmentation - annotations, to be used by PanopticFPN. - - It maps all thing categories to class 0, and maps all unlabeled pixels to class 255. - It maps all stuff categories to contiguous ids starting from 1. - - Args: - panoptic_json (str): path to the panoptic json file, in COCO's format. - panoptic_root (str): a directory with panoptic annotation files, in COCO's format. - sem_seg_root (str): a directory to output semantic annotation files - categories (list[dict]): category metadata. Each dict needs to have: - "id": corresponds to the "category_id" in the json annotations - "isthing": 0 or 1 - """ - os.makedirs(sem_seg_root, exist_ok=True) - - stuff_ids = [k["id"] for k in categories if k["isthing"] == 0] - thing_ids = [k["id"] for k in categories if k["isthing"] == 1] - id_map = {} # map from category id to id in the output semantic annotation - assert len(stuff_ids) <= 254 - for i, stuff_id in enumerate(stuff_ids): - id_map[stuff_id] = i + 1 - for thing_id in thing_ids: - id_map[thing_id] = 0 - id_map[0] = 255 - - with open(panoptic_json) as f: - obj = json.load(f) - - pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4)) - - def iter_annotations(): - for anno in obj["annotations"]: - file_name = anno["file_name"] - segments = anno["segments_info"] - input = os.path.join(panoptic_root, file_name) - output = os.path.join(sem_seg_root, file_name) - yield input, output, segments - - print("Start writing to {} ...".format(sem_seg_root)) - start = time.time() - pool.starmap( - functools.partial(_process_panoptic_to_semantic, id_map=id_map), - iter_annotations(), - chunksize=100, - ) - print("Finished. time: {:.2f}s".format(time.time() - start)) - - -if __name__ == "__main__": - dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco") - for s in ["val2017", "train2017"]: - separate_coco_semantic_from_panoptic( - os.path.join(dataset_dir, "annotations/panoptic_{}.json".format(s)), - os.path.join(dataset_dir, "panoptic_{}".format(s)), - os.path.join(dataset_dir, "panoptic_stuff_{}".format(s)), - COCO_CATEGORIES, - ) - - # Prepare val2017_100 for quick testing: - - dest_dir = os.path.join(dataset_dir, "annotations/") - URL_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" - download(URL_PREFIX + "annotations/coco/panoptic_val2017_100.json", dest_dir) - with open(os.path.join(dest_dir, "panoptic_val2017_100.json")) as f: - obj = json.load(f) - - def link_val100(dir_full, dir_100): - print("Creating " + dir_100 + " ...") - os.makedirs(dir_100, exist_ok=True) - for img in obj["images"]: - basename = os.path.splitext(img["file_name"])[0] - src = os.path.join(dir_full, basename + ".png") - dst = os.path.join(dir_100, basename + ".png") - src = os.path.relpath(src, start=dir_100) - os.symlink(src, dst) - - link_val100( - os.path.join(dataset_dir, "panoptic_val2017"), - os.path.join(dataset_dir, "panoptic_val2017_100"), - ) - - link_val100( - os.path.join(dataset_dir, "panoptic_stuff_val2017"), - os.path.join(dataset_dir, "panoptic_stuff_val2017_100"), - ) diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/models/ade20k/segm_lib/utils/data/dataloader.py b/spaces/OpenGVLab/InternGPT/third-party/lama/bin/models/ade20k/segm_lib/utils/data/dataloader.py deleted file mode 100644 index 039b9ec3645b2a4626ff47c221e372f32a6ad339..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/models/ade20k/segm_lib/utils/data/dataloader.py +++ /dev/null @@ -1,425 +0,0 @@ -import torch -import torch.multiprocessing as multiprocessing -from torch._C import _set_worker_signal_handlers, \ - _remove_worker_pids, _error_if_any_worker_fails -try: - from torch._C import _set_worker_pids -except: - from torch._C import _update_worker_pids as _set_worker_pids -from .sampler import SequentialSampler, RandomSampler, BatchSampler -import signal -import collections -import re -import sys -import threading -import traceback -from torch._six import string_classes, int_classes -import numpy as np - -if sys.version_info[0] == 2: - import Queue as queue -else: - import queue - - -class ExceptionWrapper(object): - r"Wraps an exception plus traceback to communicate across threads" - - def __init__(self, exc_info): - self.exc_type = exc_info[0] - self.exc_msg = "".join(traceback.format_exception(*exc_info)) - - -_use_shared_memory = False -"""Whether to use shared memory in default_collate""" - - -def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id): - global _use_shared_memory - _use_shared_memory = True - - # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal - # module's handlers are executed after Python returns from C low-level - # handlers, likely when the same fatal signal happened again already. - # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1 - _set_worker_signal_handlers() - - torch.set_num_threads(1) - torch.manual_seed(seed) - np.random.seed(seed) - - if init_fn is not None: - init_fn(worker_id) - - while True: - r = index_queue.get() - if r is None: - break - idx, batch_indices = r - try: - samples = collate_fn([dataset[i] for i in batch_indices]) - except Exception: - data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) - else: - data_queue.put((idx, samples)) - - -def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id): - if pin_memory: - torch.cuda.set_device(device_id) - - while True: - try: - r = in_queue.get() - except Exception: - if done_event.is_set(): - return - raise - if r is None: - break - if isinstance(r[1], ExceptionWrapper): - out_queue.put(r) - continue - idx, batch = r - try: - if pin_memory: - batch = pin_memory_batch(batch) - except Exception: - out_queue.put((idx, ExceptionWrapper(sys.exc_info()))) - else: - out_queue.put((idx, batch)) - -numpy_type_map = { - 'float64': torch.DoubleTensor, - 'float32': torch.FloatTensor, - 'float16': torch.HalfTensor, - 'int64': torch.LongTensor, - 'int32': torch.IntTensor, - 'int16': torch.ShortTensor, - 'int8': torch.CharTensor, - 'uint8': torch.ByteTensor, -} - - -def default_collate(batch): - "Puts each data field into a tensor with outer dimension batch size" - - error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" - elem_type = type(batch[0]) - if torch.is_tensor(batch[0]): - out = None - if _use_shared_memory: - # If we're in a background process, concatenate directly into a - # shared memory tensor to avoid an extra copy - numel = sum([x.numel() for x in batch]) - storage = batch[0].storage()._new_shared(numel) - out = batch[0].new(storage) - return torch.stack(batch, 0, out=out) - elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ - and elem_type.__name__ != 'string_': - elem = batch[0] - if elem_type.__name__ == 'ndarray': - # array of string classes and object - if re.search('[SaUO]', elem.dtype.str) is not None: - raise TypeError(error_msg.format(elem.dtype)) - - return torch.stack([torch.from_numpy(b) for b in batch], 0) - if elem.shape == (): # scalars - py_type = float if elem.dtype.name.startswith('float') else int - return numpy_type_map[elem.dtype.name](list(map(py_type, batch))) - elif isinstance(batch[0], int_classes): - return torch.LongTensor(batch) - elif isinstance(batch[0], float): - return torch.DoubleTensor(batch) - elif isinstance(batch[0], string_classes): - return batch - elif isinstance(batch[0], collections.Mapping): - return {key: default_collate([d[key] for d in batch]) for key in batch[0]} - elif isinstance(batch[0], collections.Sequence): - transposed = zip(*batch) - return [default_collate(samples) for samples in transposed] - - raise TypeError((error_msg.format(type(batch[0])))) - - -def pin_memory_batch(batch): - if torch.is_tensor(batch): - return batch.pin_memory() - elif isinstance(batch, string_classes): - return batch - elif isinstance(batch, collections.Mapping): - return {k: pin_memory_batch(sample) for k, sample in batch.items()} - elif isinstance(batch, collections.Sequence): - return [pin_memory_batch(sample) for sample in batch] - else: - return batch - - -_SIGCHLD_handler_set = False -"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one -handler needs to be set for all DataLoaders in a process.""" - - -def _set_SIGCHLD_handler(): - # Windows doesn't support SIGCHLD handler - if sys.platform == 'win32': - return - # can't set signal in child threads - if not isinstance(threading.current_thread(), threading._MainThread): - return - global _SIGCHLD_handler_set - if _SIGCHLD_handler_set: - return - previous_handler = signal.getsignal(signal.SIGCHLD) - if not callable(previous_handler): - previous_handler = None - - def handler(signum, frame): - # This following call uses `waitid` with WNOHANG from C side. Therefore, - # Python can still get and update the process status successfully. - _error_if_any_worker_fails() - if previous_handler is not None: - previous_handler(signum, frame) - - signal.signal(signal.SIGCHLD, handler) - _SIGCHLD_handler_set = True - - -class DataLoaderIter(object): - "Iterates once over the DataLoader's dataset, as specified by the sampler" - - def __init__(self, loader): - self.dataset = loader.dataset - self.collate_fn = loader.collate_fn - self.batch_sampler = loader.batch_sampler - self.num_workers = loader.num_workers - self.pin_memory = loader.pin_memory and torch.cuda.is_available() - self.timeout = loader.timeout - self.done_event = threading.Event() - - self.sample_iter = iter(self.batch_sampler) - - if self.num_workers > 0: - self.worker_init_fn = loader.worker_init_fn - self.index_queue = multiprocessing.SimpleQueue() - self.worker_result_queue = multiprocessing.SimpleQueue() - self.batches_outstanding = 0 - self.worker_pids_set = False - self.shutdown = False - self.send_idx = 0 - self.rcvd_idx = 0 - self.reorder_dict = {} - - base_seed = torch.LongTensor(1).random_(0, 2**31-1)[0] - self.workers = [ - multiprocessing.Process( - target=_worker_loop, - args=(self.dataset, self.index_queue, self.worker_result_queue, self.collate_fn, - base_seed + i, self.worker_init_fn, i)) - for i in range(self.num_workers)] - - if self.pin_memory or self.timeout > 0: - self.data_queue = queue.Queue() - if self.pin_memory: - maybe_device_id = torch.cuda.current_device() - else: - # do not initialize cuda context if not necessary - maybe_device_id = None - self.worker_manager_thread = threading.Thread( - target=_worker_manager_loop, - args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory, - maybe_device_id)) - self.worker_manager_thread.daemon = True - self.worker_manager_thread.start() - else: - self.data_queue = self.worker_result_queue - - for w in self.workers: - w.daemon = True # ensure that the worker exits on process exit - w.start() - - _set_worker_pids(id(self), tuple(w.pid for w in self.workers)) - _set_SIGCHLD_handler() - self.worker_pids_set = True - - # prime the prefetch loop - for _ in range(2 * self.num_workers): - self._put_indices() - - def __len__(self): - return len(self.batch_sampler) - - def _get_batch(self): - if self.timeout > 0: - try: - return self.data_queue.get(timeout=self.timeout) - except queue.Empty: - raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout)) - else: - return self.data_queue.get() - - def __next__(self): - if self.num_workers == 0: # same-process loading - indices = next(self.sample_iter) # may raise StopIteration - batch = self.collate_fn([self.dataset[i] for i in indices]) - if self.pin_memory: - batch = pin_memory_batch(batch) - return batch - - # check if the next sample has already been generated - if self.rcvd_idx in self.reorder_dict: - batch = self.reorder_dict.pop(self.rcvd_idx) - return self._process_next_batch(batch) - - if self.batches_outstanding == 0: - self._shutdown_workers() - raise StopIteration - - while True: - assert (not self.shutdown and self.batches_outstanding > 0) - idx, batch = self._get_batch() - self.batches_outstanding -= 1 - if idx != self.rcvd_idx: - # store out-of-order samples - self.reorder_dict[idx] = batch - continue - return self._process_next_batch(batch) - - next = __next__ # Python 2 compatibility - - def __iter__(self): - return self - - def _put_indices(self): - assert self.batches_outstanding < 2 * self.num_workers - indices = next(self.sample_iter, None) - if indices is None: - return - self.index_queue.put((self.send_idx, indices)) - self.batches_outstanding += 1 - self.send_idx += 1 - - def _process_next_batch(self, batch): - self.rcvd_idx += 1 - self._put_indices() - if isinstance(batch, ExceptionWrapper): - raise batch.exc_type(batch.exc_msg) - return batch - - def __getstate__(self): - # TODO: add limited pickling support for sharing an iterator - # across multiple threads for HOGWILD. - # Probably the best way to do this is by moving the sample pushing - # to a separate thread and then just sharing the data queue - # but signalling the end is tricky without a non-blocking API - raise NotImplementedError("DataLoaderIterator cannot be pickled") - - def _shutdown_workers(self): - try: - if not self.shutdown: - self.shutdown = True - self.done_event.set() - # if worker_manager_thread is waiting to put - while not self.data_queue.empty(): - self.data_queue.get() - for _ in self.workers: - self.index_queue.put(None) - # done_event should be sufficient to exit worker_manager_thread, - # but be safe here and put another None - self.worker_result_queue.put(None) - finally: - # removes pids no matter what - if self.worker_pids_set: - _remove_worker_pids(id(self)) - self.worker_pids_set = False - - def __del__(self): - if self.num_workers > 0: - self._shutdown_workers() - - -class DataLoader(object): - """ - Data loader. Combines a dataset and a sampler, and provides - single- or multi-process iterators over the dataset. - - Arguments: - dataset (Dataset): dataset from which to load the data. - batch_size (int, optional): how many samples per batch to load - (default: 1). - shuffle (bool, optional): set to ``True`` to have the data reshuffled - at every epoch (default: False). - sampler (Sampler, optional): defines the strategy to draw samples from - the dataset. If specified, ``shuffle`` must be False. - batch_sampler (Sampler, optional): like sampler, but returns a batch of - indices at a time. Mutually exclusive with batch_size, shuffle, - sampler, and drop_last. - num_workers (int, optional): how many subprocesses to use for data - loading. 0 means that the data will be loaded in the main process. - (default: 0) - collate_fn (callable, optional): merges a list of samples to form a mini-batch. - pin_memory (bool, optional): If ``True``, the data loader will copy tensors - into CUDA pinned memory before returning them. - drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, - if the dataset size is not divisible by the batch size. If ``False`` and - the size of dataset is not divisible by the batch size, then the last batch - will be smaller. (default: False) - timeout (numeric, optional): if positive, the timeout value for collecting a batch - from workers. Should always be non-negative. (default: 0) - worker_init_fn (callable, optional): If not None, this will be called on each - worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as - input, after seeding and before data loading. (default: None) - - .. note:: By default, each worker will have its PyTorch seed set to - ``base_seed + worker_id``, where ``base_seed`` is a long generated - by main process using its RNG. You may use ``torch.initial_seed()`` to access - this value in :attr:`worker_init_fn`, which can be used to set other seeds - (e.g. NumPy) before data loading. - - .. warning:: If ``spawn'' start method is used, :attr:`worker_init_fn` cannot be an - unpicklable object, e.g., a lambda function. - """ - - def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, - num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False, - timeout=0, worker_init_fn=None): - self.dataset = dataset - self.batch_size = batch_size - self.num_workers = num_workers - self.collate_fn = collate_fn - self.pin_memory = pin_memory - self.drop_last = drop_last - self.timeout = timeout - self.worker_init_fn = worker_init_fn - - if timeout < 0: - raise ValueError('timeout option should be non-negative') - - if batch_sampler is not None: - if batch_size > 1 or shuffle or sampler is not None or drop_last: - raise ValueError('batch_sampler is mutually exclusive with ' - 'batch_size, shuffle, sampler, and drop_last') - - if sampler is not None and shuffle: - raise ValueError('sampler is mutually exclusive with shuffle') - - if self.num_workers < 0: - raise ValueError('num_workers cannot be negative; ' - 'use num_workers=0 to disable multiprocessing.') - - if batch_sampler is None: - if sampler is None: - if shuffle: - sampler = RandomSampler(dataset) - else: - sampler = SequentialSampler(dataset) - batch_sampler = BatchSampler(sampler, batch_size, drop_last) - - self.sampler = sampler - self.batch_sampler = batch_sampler - - def __iter__(self): - return DataLoaderIter(self) - - def __len__(self): - return len(self.batch_sampler) diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/training/modules/base.py b/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/training/modules/base.py deleted file mode 100644 index a50c3fc7753a0bba64a5ab8c1ed64ff97e62313f..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/training/modules/base.py +++ /dev/null @@ -1,80 +0,0 @@ -import abc -from typing import Tuple, List - -import torch -import torch.nn as nn - -from saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv -from saicinpainting.training.modules.multidilated_conv import MultidilatedConv - - -class BaseDiscriminator(nn.Module): - @abc.abstractmethod - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: - """ - Predict scores and get intermediate activations. Useful for feature matching loss - :return tuple (scores, list of intermediate activations) - """ - raise NotImplemented() - - -def get_conv_block_ctor(kind='default'): - if not isinstance(kind, str): - return kind - if kind == 'default': - return nn.Conv2d - if kind == 'depthwise': - return DepthWiseSeperableConv - if kind == 'multidilated': - return MultidilatedConv - raise ValueError(f'Unknown convolutional block kind {kind}') - - -def get_norm_layer(kind='bn'): - if not isinstance(kind, str): - return kind - if kind == 'bn': - return nn.BatchNorm2d - if kind == 'in': - return nn.InstanceNorm2d - raise ValueError(f'Unknown norm block kind {kind}') - - -def get_activation(kind='tanh'): - if kind == 'tanh': - return nn.Tanh() - if kind == 'sigmoid': - return nn.Sigmoid() - if kind is False: - return nn.Identity() - raise ValueError(f'Unknown activation kind {kind}') - - -class SimpleMultiStepGenerator(nn.Module): - def __init__(self, steps: List[nn.Module]): - super().__init__() - self.steps = nn.ModuleList(steps) - - def forward(self, x): - cur_in = x - outs = [] - for step in self.steps: - cur_out = step(cur_in) - outs.append(cur_out) - cur_in = torch.cat((cur_in, cur_out), dim=1) - return torch.cat(outs[::-1], dim=1) - -def deconv_factory(kind, ngf, mult, norm_layer, activation, max_features): - if kind == 'convtranspose': - return [nn.ConvTranspose2d(min(max_features, ngf * mult), - min(max_features, int(ngf * mult / 2)), - kernel_size=3, stride=2, padding=1, output_padding=1), - norm_layer(min(max_features, int(ngf * mult / 2))), activation] - elif kind == 'bilinear': - return [nn.Upsample(scale_factor=2, mode='bilinear'), - DepthWiseSeperableConv(min(max_features, ngf * mult), - min(max_features, int(ngf * mult / 2)), - kernel_size=3, stride=1, padding=1), - norm_layer(min(max_features, int(ngf * mult / 2))), activation] - else: - raise Exception(f"Invalid deconv kind: {kind}") \ No newline at end of file diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/metrics/base.py b/spaces/OpenMotionLab/MotionGPT/mGPT/metrics/base.py deleted file mode 100644 index 540771902f3f91bf926f5fbf1e170242f8b26181..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/mGPT/metrics/base.py +++ /dev/null @@ -1,46 +0,0 @@ -from torch import Tensor, nn -from os.path import join as pjoin -from .mr import MRMetrics -from .t2m import TM2TMetrics -from .mm import MMMetrics -from .m2t import M2TMetrics -from .m2m import PredMetrics - - -class BaseMetrics(nn.Module): - def __init__(self, cfg, datamodule, debug, **kwargs) -> None: - super().__init__() - - njoints = datamodule.njoints - - data_name = datamodule.name - if data_name in ["humanml3d", "kit"]: - self.TM2TMetrics = TM2TMetrics( - cfg=cfg, - dataname=data_name, - diversity_times=30 if debug else cfg.METRIC.DIVERSITY_TIMES, - dist_sync_on_step=cfg.METRIC.DIST_SYNC_ON_STEP, - ) - self.M2TMetrics = M2TMetrics( - cfg=cfg, - w_vectorizer=datamodule.hparams.w_vectorizer, - diversity_times=30 if debug else cfg.METRIC.DIVERSITY_TIMES, - dist_sync_on_step=cfg.METRIC.DIST_SYNC_ON_STEP) - self.MMMetrics = MMMetrics( - cfg=cfg, - mm_num_times=cfg.METRIC.MM_NUM_TIMES, - dist_sync_on_step=cfg.METRIC.DIST_SYNC_ON_STEP, - ) - - self.MRMetrics = MRMetrics( - njoints=njoints, - jointstype=cfg.DATASET.JOINT_TYPE, - dist_sync_on_step=cfg.METRIC.DIST_SYNC_ON_STEP, - ) - self.PredMetrics = PredMetrics( - cfg=cfg, - njoints=njoints, - jointstype=cfg.DATASET.JOINT_TYPE, - dist_sync_on_step=cfg.METRIC.DIST_SYNC_ON_STEP, - task=cfg.model.params.task, - ) diff --git a/spaces/Oumar199/Fake-Real-Face-Detection/fake_face_detection/trainers/custom_trainer.py b/spaces/Oumar199/Fake-Real-Face-Detection/fake_face_detection/trainers/custom_trainer.py deleted file mode 100644 index 6575eef80157f93143fe3e15d00c137f9fa2c7c3..0000000000000000000000000000000000000000 --- a/spaces/Oumar199/Fake-Real-Face-Detection/fake_face_detection/trainers/custom_trainer.py +++ /dev/null @@ -1,27 +0,0 @@ - -from transformers import Trainer -import torch - -def get_custom_trainer(weights: torch.Tensor): - - class CustomTrainer(Trainer): # got from https://huggingface.co/docs/transformers/main_classes/trainer - - def compute_loss(self, model, inputs, return_outputs=False): - - # recuperate labels - labels = inputs.get("labels") - - # forward pass - outputs = model(**inputs) - - # recuperate logits - logits = outputs.get("logits") - - # compute custom loss (passing the weights) - loss_fct = nn.CrossEntropyLoss(weight=weights) - - loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1)) - - return (loss, outputs) if return_outputs else loss - - return CustomTrainer diff --git a/spaces/PAIR/PAIR-Diffusion/ldm/models/diffusion/ddpm.py b/spaces/PAIR/PAIR-Diffusion/ldm/models/diffusion/ddpm.py deleted file mode 100644 index f71a44af48c8cba8e97849b7e6813b3e6f9fe83c..0000000000000000000000000000000000000000 --- a/spaces/PAIR/PAIR-Diffusion/ldm/models/diffusion/ddpm.py +++ /dev/null @@ -1,1797 +0,0 @@ -""" -wild mixture of -https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py -https://github.com/CompVis/taming-transformers --- merci -""" - -import torch -import torch.nn as nn -import numpy as np -import pytorch_lightning as pl -from torch.optim.lr_scheduler import LambdaLR -from einops import rearrange, repeat -from contextlib import contextmanager, nullcontext -from functools import partial -import itertools -from tqdm import tqdm -from torchvision.utils import make_grid -from pytorch_lightning.utilities.distributed import rank_zero_only -from omegaconf import ListConfig - -from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config -from ldm.modules.ema import LitEma -from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution -from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL -from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like -from ldm.models.diffusion.ddim import DDIMSampler - - -__conditioning_keys__ = {'concat': 'c_concat', - 'crossattn': 'c_crossattn', - 'adm': 'y'} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def uniform_on_device(r1, r2, shape, device): - return (r1 - r2) * torch.rand(*shape, device=device) + r2 - - -class DDPM(pl.LightningModule): - # classic DDPM with Gaussian diffusion, in image space - def __init__(self, - unet_config, - timesteps=1000, - beta_schedule="linear", - loss_type="l2", - ckpt_path=None, - ignore_keys=[], - load_only_unet=False, - monitor="val/loss", - use_ema=True, - first_stage_key="image", - image_size=256, - channels=3, - log_every_t=100, - clip_denoised=True, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - given_betas=None, - original_elbo_weight=0., - v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta - l_simple_weight=1., - conditioning_key=None, - parameterization="eps", # all assuming fixed variance schedules - scheduler_config=None, - use_positional_encodings=False, - learn_logvar=False, - logvar_init=0., - make_it_fit=False, - ucg_training=None, - reset_ema=False, - reset_num_ema_updates=False, - ): - super().__init__() - assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' - self.parameterization = parameterization - print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") - self.cond_stage_model = None - self.clip_denoised = clip_denoised - self.log_every_t = log_every_t - self.first_stage_key = first_stage_key - self.image_size = image_size # try conv? - self.channels = channels - self.use_positional_encodings = use_positional_encodings - self.model = DiffusionWrapper(unet_config, conditioning_key) - count_params(self.model, verbose=True) - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self.model) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - self.use_scheduler = scheduler_config is not None - if self.use_scheduler: - self.scheduler_config = scheduler_config - - self.v_posterior = v_posterior - self.original_elbo_weight = original_elbo_weight - self.l_simple_weight = l_simple_weight - - if monitor is not None: - self.monitor = monitor - self.make_it_fit = make_it_fit - if reset_ema: assert exists(ckpt_path) - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) - if reset_ema: - assert self.use_ema - print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") - self.model_ema = LitEma(self.model) - if reset_num_ema_updates: - print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") - assert self.use_ema - self.model_ema.reset_num_updates() - - self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, - linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) - - self.loss_type = loss_type - - self.learn_logvar = learn_logvar - logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) - if self.learn_logvar: - self.logvar = nn.Parameter(self.logvar, requires_grad=True) - else: - self.register_buffer('logvar', logvar) - - self.ucg_training = ucg_training or dict() - if self.ucg_training: - self.ucg_prng = np.random.RandomState() - - def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if exists(given_betas): - betas = given_betas - else: - betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, - cosine_s=cosine_s) - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( - 1. - alphas_cumprod) + self.v_posterior * betas - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer('posterior_variance', to_torch(posterior_variance)) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) - self.register_buffer('posterior_mean_coef1', to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) - self.register_buffer('posterior_mean_coef2', to_torch( - (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) - - if self.parameterization == "eps": - lvlb_weights = self.betas ** 2 / ( - 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) - elif self.parameterization == "x0": - lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) - elif self.parameterization == "v": - lvlb_weights = torch.ones_like(self.betas ** 2 / ( - 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) - else: - raise NotImplementedError("mu not supported") - lvlb_weights[0] = lvlb_weights[1] - self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) - assert not torch.isnan(self.lvlb_weights).all() - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.model.parameters()) - self.model_ema.copy_to(self.model) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.model.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - @torch.no_grad() - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - if self.make_it_fit: - n_params = len([name for name, _ in - itertools.chain(self.named_parameters(), - self.named_buffers())]) - for name, param in tqdm( - itertools.chain(self.named_parameters(), - self.named_buffers()), - desc="Fitting old weights to new weights", - total=n_params - ): - if not name in sd: - continue - old_shape = sd[name].shape - new_shape = param.shape - assert len(old_shape) == len(new_shape) - if len(new_shape) > 2: - # we only modify first two axes - assert new_shape[2:] == old_shape[2:] - # assumes first axis corresponds to output dim - if not new_shape == old_shape: - new_param = param.clone() - old_param = sd[name] - if len(new_shape) == 1: - for i in range(new_param.shape[0]): - new_param[i] = old_param[i % old_shape[0]] - elif len(new_shape) >= 2: - for i in range(new_param.shape[0]): - for j in range(new_param.shape[1]): - new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] - - n_used_old = torch.ones(old_shape[1]) - for j in range(new_param.shape[1]): - n_used_old[j % old_shape[1]] += 1 - n_used_new = torch.zeros(new_shape[1]) - for j in range(new_param.shape[1]): - n_used_new[j] = n_used_old[j % old_shape[1]] - - n_used_new = n_used_new[None, :] - while len(n_used_new.shape) < len(new_shape): - n_used_new = n_used_new.unsqueeze(-1) - new_param /= n_used_new - - sd[name] = new_param - - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys:\n {missing}") - if len(unexpected) > 0: - print(f"\nUnexpected Keys:\n {unexpected}") - - def q_mean_variance(self, x_start, t): - """ - Get the distribution q(x_t | x_0). - :param x_start: the [N x C x ...] tensor of noiseless inputs. - :param t: the number of diffusion steps (minus 1). Here, 0 means one step. - :return: A tuple (mean, variance, log_variance), all of x_start's shape. - """ - mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) - variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) - log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise - ) - - def predict_start_from_z_and_v(self, x_t, t, v): - # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - return ( - extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v - ) - - def predict_eps_from_z_and_v(self, x_t, t, v): - return ( - extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + - extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, x, t, clip_denoised: bool): - model_out = self.model(x, t) - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - if clip_denoised: - x_recon.clamp_(-1., 1.) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def p_sample_loop(self, shape, return_intermediates=False): - device = self.betas.device - b = shape[0] - img = torch.randn(shape, device=device) - intermediates = [img] - for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): - img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), - clip_denoised=self.clip_denoised) - if i % self.log_every_t == 0 or i == self.num_timesteps - 1: - intermediates.append(img) - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, batch_size=16, return_intermediates=False): - image_size = self.image_size - channels = self.channels - return self.p_sample_loop((batch_size, channels, image_size, image_size), - return_intermediates=return_intermediates) - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - - def get_v(self, x, noise, t): - return ( - extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x - ) - - def get_loss(self, pred, target, mean=True): - if self.loss_type == 'l1': - loss = (target - pred).abs() - if mean: - loss = loss.mean() - elif self.loss_type == 'l2': - if mean: - loss = torch.nn.functional.mse_loss(target, pred) - else: - loss = torch.nn.functional.mse_loss(target, pred, reduction='none') - else: - raise NotImplementedError("unknown loss type '{loss_type}'") - - return loss - - def p_losses(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_out = self.model(x_noisy, t) - - loss_dict = {} - if self.parameterization == "eps": - target = noise - elif self.parameterization == "x0": - target = x_start - elif self.parameterization == "v": - target = self.get_v(x_start, noise, t) - else: - raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") - - loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) - - log_prefix = 'train' if self.training else 'val' - - loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) - loss_simple = loss.mean() * self.l_simple_weight - - loss_vlb = (self.lvlb_weights[t] * loss).mean() - loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) - - loss = loss_simple + self.original_elbo_weight * loss_vlb - - loss_dict.update({f'{log_prefix}/loss': loss}) - - return loss, loss_dict - - def forward(self, x, *args, **kwargs): - # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size - # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - return self.p_losses(x, t, *args, **kwargs) - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - def shared_step(self, batch): - x = self.get_input(batch, self.first_stage_key) - loss, loss_dict = self(x) - return loss, loss_dict - - def training_step(self, batch, batch_idx): - for k in self.ucg_training: - p = self.ucg_training[k]["p"] - val = self.ucg_training[k]["val"] - if val is None: - val = "" - for i in range(len(batch[k])): - if self.ucg_prng.choice(2, p=[1 - p, p]): - batch[k][i] = val - - loss, loss_dict = self.shared_step(batch) - - self.log_dict(loss_dict, prog_bar=True, - logger=True, on_step=True, on_epoch=True) - - self.log("global_step", self.global_step, - prog_bar=True, logger=True, on_step=True, on_epoch=False) - - if self.use_scheduler: - lr = self.optimizers().param_groups[0]['lr'] - self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) - - return loss - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - _, loss_dict_no_ema = self.shared_step(batch) - with self.ema_scope(): - _, loss_dict_ema = self.shared_step(batch) - loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} - self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self.model) - - def _get_rows_from_list(self, samples): - n_imgs_per_row = len(samples) - denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): - log = dict() - x = self.get_input(batch, self.first_stage_key) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - x = x.to(self.device)[:N] - log["inputs"] = x - - # get diffusion row - diffusion_row = list() - x_start = x[:n_row] - - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(x_start) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - diffusion_row.append(x_noisy) - - log["diffusion_row"] = self._get_rows_from_list(diffusion_row) - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) - - log["samples"] = samples - log["denoise_row"] = self._get_rows_from_list(denoise_row) - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.learn_logvar: - params = params + [self.logvar] - opt = torch.optim.AdamW(params, lr=lr) - return opt - - -class LatentDiffusion(DDPM): - """main class""" - - def __init__(self, - first_stage_config, - cond_stage_config, - num_timesteps_cond=None, - cond_stage_key="image", - cond_stage_trainable=False, - concat_mode=True, - cond_stage_forward=None, - conditioning_key=None, - scale_factor=1.0, - scale_by_std=False, - force_null_conditioning=False, - *args, **kwargs): - self.force_null_conditioning = force_null_conditioning - self.num_timesteps_cond = default(num_timesteps_cond, 1) - self.scale_by_std = scale_by_std - assert self.num_timesteps_cond <= kwargs['timesteps'] - # for backwards compatibility after implementation of DiffusionWrapper - if conditioning_key is None: - conditioning_key = 'concat' if concat_mode else 'crossattn' - if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: - conditioning_key = None - ckpt_path = kwargs.pop("ckpt_path", None) - reset_ema = kwargs.pop("reset_ema", False) - reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) - ignore_keys = kwargs.pop("ignore_keys", []) - super().__init__(conditioning_key=conditioning_key, *args, **kwargs) - self.concat_mode = concat_mode - self.cond_stage_trainable = cond_stage_trainable - self.cond_stage_key = cond_stage_key - try: - self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: - self.num_downs = 0 - if not scale_by_std: - self.scale_factor = scale_factor - else: - self.register_buffer('scale_factor', torch.tensor(scale_factor)) - self.instantiate_first_stage(first_stage_config) - self.instantiate_cond_stage(cond_stage_config) - self.cond_stage_forward = cond_stage_forward - self.clip_denoised = False - self.bbox_tokenizer = None - - self.restarted_from_ckpt = False - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys) - self.restarted_from_ckpt = True - if reset_ema: - assert self.use_ema - print( - f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") - self.model_ema = LitEma(self.model) - if reset_num_ema_updates: - print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") - assert self.use_ema - self.model_ema.reset_num_updates() - - def make_cond_schedule(self, ): - self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) - ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() - self.cond_ids[:self.num_timesteps_cond] = ids - - @rank_zero_only - @torch.no_grad() - def on_train_batch_start(self, batch, batch_idx, dataloader_idx): - # only for very first batch - if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: - assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' - # set rescale weight to 1./std of encodings - print("### USING STD-RESCALING ###") - x = super().get_input(batch, self.first_stage_key) - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - del self.scale_factor - self.register_buffer('scale_factor', 1. / z.flatten().std()) - print(f"setting self.scale_factor to {self.scale_factor}") - print("### USING STD-RESCALING ###") - - def register_schedule(self, - given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) - - self.shorten_cond_schedule = self.num_timesteps_cond > 1 - if self.shorten_cond_schedule: - self.make_cond_schedule() - - def instantiate_first_stage(self, config): - model = instantiate_from_config(config) - self.first_stage_model = model.eval() - self.first_stage_model.train = disabled_train - for param in self.first_stage_model.parameters(): - param.requires_grad = False - - def instantiate_cond_stage(self, config): - if not self.cond_stage_trainable: - if config == "__is_first_stage__": - print("Using first stage also as cond stage.") - self.cond_stage_model = self.first_stage_model - elif config == "__is_unconditional__": - print(f"Training {self.__class__.__name__} as an unconditional model.") - self.cond_stage_model = None - # self.be_unconditional = True - else: - model = instantiate_from_config(config) - self.cond_stage_model = model.eval() - self.cond_stage_model.train = disabled_train - for param in self.cond_stage_model.parameters(): - param.requires_grad = False - else: - assert config != '__is_first_stage__' - assert config != '__is_unconditional__' - model = instantiate_from_config(config) - self.cond_stage_model = model - - def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): - denoise_row = [] - for zd in tqdm(samples, desc=desc): - denoise_row.append(self.decode_first_stage(zd.to(self.device), - force_not_quantize=force_no_decoder_quantization)) - n_imgs_per_row = len(denoise_row) - denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W - denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - def get_first_stage_encoding(self, encoder_posterior): - if isinstance(encoder_posterior, DiagonalGaussianDistribution): - z = encoder_posterior.sample() - elif isinstance(encoder_posterior, torch.Tensor): - z = encoder_posterior - else: - raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") - return self.scale_factor * z - - def get_learned_conditioning(self, c): - if self.cond_stage_forward is None: - if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): - c = self.cond_stage_model.encode(c) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - else: - c = self.cond_stage_model(c) - else: - assert hasattr(self.cond_stage_model, self.cond_stage_forward) - c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) - return c - - def meshgrid(self, h, w): - y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) - x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) - - arr = torch.cat([y, x], dim=-1) - return arr - - def delta_border(self, h, w): - """ - :param h: height - :param w: width - :return: normalized distance to image border, - wtith min distance = 0 at border and max dist = 0.5 at image center - """ - lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) - arr = self.meshgrid(h, w) / lower_right_corner - dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] - dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] - edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] - return edge_dist - - def get_weighting(self, h, w, Ly, Lx, device): - weighting = self.delta_border(h, w) - weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], - self.split_input_params["clip_max_weight"], ) - weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) - - if self.split_input_params["tie_braker"]: - L_weighting = self.delta_border(Ly, Lx) - L_weighting = torch.clip(L_weighting, - self.split_input_params["clip_min_tie_weight"], - self.split_input_params["clip_max_tie_weight"]) - - L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) - weighting = weighting * L_weighting - return weighting - - def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code - """ - :param x: img of size (bs, c, h, w) - :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) - """ - bs, nc, h, w = x.shape - - # number of crops in image - Ly = (h - kernel_size[0]) // stride[0] + 1 - Lx = (w - kernel_size[1]) // stride[1] + 1 - - if uf == 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) - - weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) - - elif uf > 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), - dilation=1, padding=0, - stride=(stride[0] * uf, stride[1] * uf)) - fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) - - elif df > 1 and uf == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), - dilation=1, padding=0, - stride=(stride[0] // df, stride[1] // df)) - fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) - - else: - raise NotImplementedError - - return fold, unfold, normalization, weighting - - @torch.no_grad() - def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, - cond_key=None, return_original_cond=False, bs=None, return_x=False): - x = super().get_input(batch, k) - if bs is not None: - x = x[:bs] - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - - if self.model.conditioning_key is not None and not self.force_null_conditioning: - if cond_key is None: - cond_key = self.cond_stage_key - if cond_key != self.first_stage_key: - if cond_key in ['caption', 'coordinates_bbox', "txt"]: - xc = batch[cond_key] - elif cond_key in ['class_label', 'cls']: - xc = batch - else: - xc = super().get_input(batch, cond_key).to(self.device) - else: - xc = x - if not self.cond_stage_trainable or force_c_encode: - if isinstance(xc, dict) or isinstance(xc, list): - c = self.get_learned_conditioning(xc) - else: - c = self.get_learned_conditioning(xc.to(self.device)) - else: - c = xc - if bs is not None: - c = c[:bs] - - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - ckey = __conditioning_keys__[self.model.conditioning_key] - c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} - - else: - c = None - xc = None - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - c = {'pos_x': pos_x, 'pos_y': pos_y} - out = [z, c] - if return_first_stage_outputs: - xrec = self.decode_first_stage(z) - out.extend([x, xrec]) - if return_x: - out.extend([x]) - if return_original_cond: - out.append(xc) - return out - - @torch.no_grad() - def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - return self.first_stage_model.decode(z) - - @torch.no_grad() - def encode_first_stage(self, x): - return self.first_stage_model.encode(x) - - def shared_step(self, batch, **kwargs): - x, c = self.get_input(batch, self.first_stage_key) - loss = self(x, c) - return loss - - def forward(self, x, c, *args, **kwargs): - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - if self.model.conditioning_key is not None: - assert c is not None - if self.cond_stage_trainable: - c = self.get_learned_conditioning(c) - if self.shorten_cond_schedule: # TODO: drop this option - tc = self.cond_ids[t].to(self.device) - c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) - return self.p_losses(x, c, t, *args, **kwargs) - - def apply_model(self, x_noisy, t, cond, return_ids=False): - if isinstance(cond, dict): - # hybrid case, cond is expected to be a dict - pass - else: - if not isinstance(cond, list): - cond = [cond] - key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' - cond = {key: cond} - - x_recon = self.model(x_noisy, t, **cond) - - if isinstance(x_recon, tuple) and not return_ids: - return x_recon[0] - else: - return x_recon - - def _predict_eps_from_xstart(self, x_t, t, pred_xstart): - return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) - - def _prior_bpd(self, x_start): - """ - Get the prior KL term for the variational lower-bound, measured in - bits-per-dim. - This term can't be optimized, as it only depends on the encoder. - :param x_start: the [N x C x ...] tensor of inputs. - :return: a batch of [N] KL values (in bits), one per batch element. - """ - batch_size = x_start.shape[0] - t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) - qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) - kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) - return mean_flat(kl_prior) / np.log(2.0) - - def p_losses(self, x_start, cond, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_output = self.apply_model(x_noisy, t, cond) - - loss_dict = {} - prefix = 'train' if self.training else 'val' - - if self.parameterization == "x0": - target = x_start - elif self.parameterization == "eps": - target = noise - elif self.parameterization == "v": - target = self.get_v(x_start, noise, t) - else: - raise NotImplementedError() - - loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) - loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) - - logvar_t = self.logvar[t].to(self.device) - loss = loss_simple / torch.exp(logvar_t) + logvar_t - # loss = loss_simple / torch.exp(self.logvar) + self.logvar - if self.learn_logvar: - loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) - loss_dict.update({'logvar': self.logvar.data.mean()}) - - loss = self.l_simple_weight * loss.mean() - - loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) - loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() - loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) - loss += (self.original_elbo_weight * loss_vlb) - loss_dict.update({f'{prefix}/loss': loss}) - - return loss, loss_dict - - def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, - return_x0=False, score_corrector=None, corrector_kwargs=None): - t_in = t - model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) - - if score_corrector is not None: - assert self.parameterization == "eps" - model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) - - if return_codebook_ids: - model_out, logits = model_out - - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - else: - raise NotImplementedError() - - if clip_denoised: - x_recon.clamp_(-1., 1.) - if quantize_denoised: - x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - if return_codebook_ids: - return model_mean, posterior_variance, posterior_log_variance, logits - elif return_x0: - return model_mean, posterior_variance, posterior_log_variance, x_recon - else: - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, - return_codebook_ids=False, quantize_denoised=False, return_x0=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): - b, *_, device = *x.shape, x.device - outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, - return_codebook_ids=return_codebook_ids, - quantize_denoised=quantize_denoised, - return_x0=return_x0, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if return_codebook_ids: - raise DeprecationWarning("Support dropped.") - model_mean, _, model_log_variance, logits = outputs - elif return_x0: - model_mean, _, model_log_variance, x0 = outputs - else: - model_mean, _, model_log_variance = outputs - - noise = noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - - if return_codebook_ids: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) - if return_x0: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 - else: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, - img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., - score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, - log_every_t=None): - if not log_every_t: - log_every_t = self.log_every_t - timesteps = self.num_timesteps - if batch_size is not None: - b = batch_size if batch_size is not None else shape[0] - shape = [batch_size] + list(shape) - else: - b = batch_size = shape[0] - if x_T is None: - img = torch.randn(shape, device=self.device) - else: - img = x_T - intermediates = [] - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', - total=timesteps) if verbose else reversed( - range(0, timesteps)) - if type(temperature) == float: - temperature = [temperature] * timesteps - - for i in iterator: - ts = torch.full((b,), i, device=self.device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img, x0_partial = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised, return_x0=True, - temperature=temperature[i], noise_dropout=noise_dropout, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if mask is not None: - assert x0 is not None - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) - return img, intermediates - - @torch.no_grad() - def p_sample_loop(self, cond, shape, return_intermediates=False, - x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, start_T=None, - log_every_t=None): - - if not log_every_t: - log_every_t = self.log_every_t - device = self.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - intermediates = [img] - if timesteps is None: - timesteps = self.num_timesteps - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( - range(0, timesteps)) - - if mask is not None: - assert x0 is not None - assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match - - for i in iterator: - ts = torch.full((b,), i, device=device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised) - if mask is not None: - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) - - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, - verbose=True, timesteps=None, quantize_denoised=False, - mask=None, x0=None, shape=None, **kwargs): - if shape is None: - shape = (batch_size, self.channels, self.image_size, self.image_size) - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - return self.p_sample_loop(cond, - shape, - return_intermediates=return_intermediates, x_T=x_T, - verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, - mask=mask, x0=x0) - - @torch.no_grad() - def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): - if ddim: - ddim_sampler = DDIMSampler(self) - shape = (self.channels, self.image_size, self.image_size) - samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, - shape, cond, verbose=False, **kwargs) - - else: - samples, intermediates = self.sample(cond=cond, batch_size=batch_size, - return_intermediates=True, **kwargs) - - return samples, intermediates - - @torch.no_grad() - def get_unconditional_conditioning(self, batch_size, null_label=None): - if null_label is not None: - xc = null_label - if isinstance(xc, ListConfig): - xc = list(xc) - if isinstance(xc, dict) or isinstance(xc, list): - c = self.get_learned_conditioning(xc) - else: - if hasattr(xc, "to"): - xc = xc.to(self.device) - c = self.get_learned_conditioning(xc) - else: - if self.cond_stage_key in ["class_label", "cls"]: - xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) - return self.get_learned_conditioning(xc) - else: - raise NotImplementedError("todo") - if isinstance(c, list): # in case the encoder gives us a list - for i in range(len(c)): - c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) - else: - c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) - return c - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, - quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, - plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, - use_ema_scope=True, - **kwargs): - ema_scope = self.ema_scope if use_ema_scope else nullcontext - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, - return_first_stage_outputs=True, - force_c_encode=True, - return_original_cond=True, - bs=N) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reconstruction"] = xrec - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption", "txt"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) - log["conditioning"] = xc - elif self.cond_stage_key in ['class_label', "cls"]: - try: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) - log['conditioning'] = xc - except KeyError: - # probably no "human_label" in batch - pass - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - with ema_scope("Sampling"): - samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( - self.first_stage_model, IdentityFirstStage): - # also display when quantizing x0 while sampling - with ema_scope("Plotting Quantized Denoised"): - samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta, - quantize_denoised=True) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, - # quantize_denoised=True) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_x0_quantized"] = x_samples - - if unconditional_guidance_scale > 1.0: - uc = self.get_unconditional_conditioning(N, unconditional_guidance_label) - if self.model.conditioning_key == "crossattn-adm": - uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]} - with ema_scope("Sampling with classifier-free guidance"): - samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=uc, - ) - x_samples_cfg = self.decode_first_stage(samples_cfg) - log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg - - if inpaint: - # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] - mask = torch.ones(N, h, w).to(self.device) - # zeros will be filled in - mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. - mask = mask[:, None, ...] - with ema_scope("Plotting Inpaint"): - samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_inpainting"] = x_samples - log["mask"] = mask - - # outpaint - mask = 1. - mask - with ema_scope("Plotting Outpaint"): - samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_outpainting"] = x_samples - - if plot_progressive_rows: - with ema_scope("Plotting Progressives"): - img, progressives = self.progressive_denoising(c, - shape=(self.channels, self.image_size, self.image_size), - batch_size=N) - prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") - log["progressive_row"] = prog_row - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.cond_stage_trainable: - print(f"{self.__class__.__name__}: Also optimizing conditioner params!") - params = params + list(self.cond_stage_model.parameters()) - if self.learn_logvar: - print('Diffusion model optimizing logvar') - params.append(self.logvar) - opt = torch.optim.AdamW(params, lr=lr) - if self.use_scheduler: - assert 'target' in self.scheduler_config - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [opt], scheduler - return opt - - @torch.no_grad() - def to_rgb(self, x): - x = x.float() - if not hasattr(self, "colorize"): - self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) - x = nn.functional.conv2d(x, weight=self.colorize) - x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. - return x - - -class DiffusionWrapper(pl.LightningModule): - def __init__(self, diff_model_config, conditioning_key): - super().__init__() - self.sequential_cross_attn = diff_model_config.pop("sequential_crossattn", False) - self.diffusion_model = instantiate_from_config(diff_model_config) - self.conditioning_key = conditioning_key - assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm'] - - def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None): - if self.conditioning_key is None: - out = self.diffusion_model(x, t) - elif self.conditioning_key == 'concat': - xc = torch.cat([x] + c_concat, dim=1) - out = self.diffusion_model(xc, t) - elif self.conditioning_key == 'crossattn': - if not self.sequential_cross_attn: - cc = torch.cat(c_crossattn, 1) - else: - cc = c_crossattn - out = self.diffusion_model(x, t, context=cc) - elif self.conditioning_key == 'hybrid': - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc) - elif self.conditioning_key == 'hybrid-adm': - assert c_adm is not None - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc, y=c_adm) - elif self.conditioning_key == 'crossattn-adm': - assert c_adm is not None - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(x, t, context=cc, y=c_adm) - elif self.conditioning_key == 'adm': - cc = c_crossattn[0] - out = self.diffusion_model(x, t, y=cc) - else: - raise NotImplementedError() - - return out - - -class LatentUpscaleDiffusion(LatentDiffusion): - def __init__(self, *args, low_scale_config, low_scale_key="LR", noise_level_key=None, **kwargs): - super().__init__(*args, **kwargs) - # assumes that neither the cond_stage nor the low_scale_model contain trainable params - assert not self.cond_stage_trainable - self.instantiate_low_stage(low_scale_config) - self.low_scale_key = low_scale_key - self.noise_level_key = noise_level_key - - def instantiate_low_stage(self, config): - model = instantiate_from_config(config) - self.low_scale_model = model.eval() - self.low_scale_model.train = disabled_train - for param in self.low_scale_model.parameters(): - param.requires_grad = False - - @torch.no_grad() - def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False): - if not log_mode: - z, c = super().get_input(batch, k, force_c_encode=True, bs=bs) - else: - z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, - force_c_encode=True, return_original_cond=True, bs=bs) - x_low = batch[self.low_scale_key][:bs] - x_low = rearrange(x_low, 'b h w c -> b c h w') - x_low = x_low.to(memory_format=torch.contiguous_format).float() - zx, noise_level = self.low_scale_model(x_low) - if self.noise_level_key is not None: - # get noise level from batch instead, e.g. when extracting a custom noise level for bsr - raise NotImplementedError('TODO') - - all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level} - if log_mode: - # TODO: maybe disable if too expensive - x_low_rec = self.low_scale_model.decode(zx) - return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level - return z, all_conds - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, - unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, - **kwargs): - ema_scope = self.ema_scope if use_ema_scope else nullcontext - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N, - log_mode=True) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reconstruction"] = xrec - log["x_lr"] = x_low - log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption", "txt"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) - log["conditioning"] = xc - elif self.cond_stage_key in ['class_label', 'cls']: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) - log['conditioning'] = xc - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - with ema_scope("Sampling"): - samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if unconditional_guidance_scale > 1.0: - uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label) - # TODO explore better "unconditional" choices for the other keys - # maybe guide away from empty text label and highest noise level and maximally degraded zx? - uc = dict() - for k in c: - if k == "c_crossattn": - assert isinstance(c[k], list) and len(c[k]) == 1 - uc[k] = [uc_tmp] - elif k == "c_adm": # todo: only run with text-based guidance? - assert isinstance(c[k], torch.Tensor) - #uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level - uc[k] = c[k] - elif isinstance(c[k], list): - uc[k] = [c[k][i] for i in range(len(c[k]))] - else: - uc[k] = c[k] - - with ema_scope("Sampling with classifier-free guidance"): - samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=uc, - ) - x_samples_cfg = self.decode_first_stage(samples_cfg) - log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg - - if plot_progressive_rows: - with ema_scope("Plotting Progressives"): - img, progressives = self.progressive_denoising(c, - shape=(self.channels, self.image_size, self.image_size), - batch_size=N) - prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") - log["progressive_row"] = prog_row - - return log - - -class LatentFinetuneDiffusion(LatentDiffusion): - """ - Basis for different finetunas, such as inpainting or depth2image - To disable finetuning mode, set finetune_keys to None - """ - - def __init__(self, - concat_keys: tuple, - finetune_keys=("model.diffusion_model.input_blocks.0.0.weight", - "model_ema.diffusion_modelinput_blocks00weight" - ), - keep_finetune_dims=4, - # if model was trained without concat mode before and we would like to keep these channels - c_concat_log_start=None, # to log reconstruction of c_concat codes - c_concat_log_end=None, - *args, **kwargs - ): - ckpt_path = kwargs.pop("ckpt_path", None) - ignore_keys = kwargs.pop("ignore_keys", list()) - super().__init__(*args, **kwargs) - self.finetune_keys = finetune_keys - self.concat_keys = concat_keys - self.keep_dims = keep_finetune_dims - self.c_concat_log_start = c_concat_log_start - self.c_concat_log_end = c_concat_log_end - if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint' - if exists(ckpt_path): - self.init_from_ckpt(ckpt_path, ignore_keys) - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - - # make it explicit, finetune by including extra input channels - if exists(self.finetune_keys) and k in self.finetune_keys: - new_entry = None - for name, param in self.named_parameters(): - if name in self.finetune_keys: - print( - f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only") - new_entry = torch.zeros_like(param) # zero init - assert exists(new_entry), 'did not find matching parameter to modify' - new_entry[:, :self.keep_dims, ...] = sd[k] - sd[k] = new_entry - - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, - plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, - use_ema_scope=True, - **kwargs): - ema_scope = self.ema_scope if use_ema_scope else nullcontext - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True) - c_cat, c = c["c_concat"][0], c["c_crossattn"][0] - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reconstruction"] = xrec - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption", "txt"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) - log["conditioning"] = xc - elif self.cond_stage_key in ['class_label', 'cls']: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) - log['conditioning'] = xc - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if not (self.c_concat_log_start is None and self.c_concat_log_end is None): - log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end]) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - with ema_scope("Sampling"): - samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, - batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if unconditional_guidance_scale > 1.0: - uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label) - uc_cat = c_cat - uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]} - with ema_scope("Sampling with classifier-free guidance"): - samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, - batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=uc_full, - ) - x_samples_cfg = self.decode_first_stage(samples_cfg) - log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg - - return log - - -class LatentInpaintDiffusion(LatentFinetuneDiffusion): - """ - can either run as pure inpainting model (only concat mode) or with mixed conditionings, - e.g. mask as concat and text via cross-attn. - To disable finetuning mode, set finetune_keys to None - """ - - def __init__(self, - concat_keys=("mask", "masked_image"), - masked_image_key="masked_image", - *args, **kwargs - ): - super().__init__(concat_keys, *args, **kwargs) - self.masked_image_key = masked_image_key - assert self.masked_image_key in concat_keys - - @torch.no_grad() - def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False): - # note: restricted to non-trainable encoders currently - assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting' - z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, - force_c_encode=True, return_original_cond=True, bs=bs) - - assert exists(self.concat_keys) - c_cat = list() - for ck in self.concat_keys: - cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float() - if bs is not None: - cc = cc[:bs] - cc = cc.to(self.device) - bchw = z.shape - if ck != self.masked_image_key: - cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) - else: - cc = self.get_first_stage_encoding(self.encode_first_stage(cc)) - c_cat.append(cc) - c_cat = torch.cat(c_cat, dim=1) - all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} - if return_first_stage_outputs: - return z, all_conds, x, xrec, xc - return z, all_conds - - @torch.no_grad() - def log_images(self, *args, **kwargs): - log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs) - log["masked_image"] = rearrange(args[0]["masked_image"], - 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float() - return log - - -class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion): - """ - condition on monocular depth estimation - """ - - def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs): - super().__init__(concat_keys=concat_keys, *args, **kwargs) - self.depth_model = instantiate_from_config(depth_stage_config) - self.depth_stage_key = concat_keys[0] - - @torch.no_grad() - def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False): - # note: restricted to non-trainable encoders currently - assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for depth2img' - z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, - force_c_encode=True, return_original_cond=True, bs=bs) - - assert exists(self.concat_keys) - assert len(self.concat_keys) == 1 - c_cat = list() - for ck in self.concat_keys: - cc = batch[ck] - if bs is not None: - cc = cc[:bs] - cc = cc.to(self.device) - cc = self.depth_model(cc) - cc = torch.nn.functional.interpolate( - cc, - size=z.shape[2:], - mode="bicubic", - align_corners=False, - ) - - depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3], - keepdim=True) - cc = 2. * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1. - c_cat.append(cc) - c_cat = torch.cat(c_cat, dim=1) - all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} - if return_first_stage_outputs: - return z, all_conds, x, xrec, xc - return z, all_conds - - @torch.no_grad() - def log_images(self, *args, **kwargs): - log = super().log_images(*args, **kwargs) - depth = self.depth_model(args[0][self.depth_stage_key]) - depth_min, depth_max = torch.amin(depth, dim=[1, 2, 3], keepdim=True), \ - torch.amax(depth, dim=[1, 2, 3], keepdim=True) - log["depth"] = 2. * (depth - depth_min) / (depth_max - depth_min) - 1. - return log - - -class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion): - """ - condition on low-res image (and optionally on some spatial noise augmentation) - """ - def __init__(self, concat_keys=("lr",), reshuffle_patch_size=None, - low_scale_config=None, low_scale_key=None, *args, **kwargs): - super().__init__(concat_keys=concat_keys, *args, **kwargs) - self.reshuffle_patch_size = reshuffle_patch_size - self.low_scale_model = None - if low_scale_config is not None: - print("Initializing a low-scale model") - assert exists(low_scale_key) - self.instantiate_low_stage(low_scale_config) - self.low_scale_key = low_scale_key - - def instantiate_low_stage(self, config): - model = instantiate_from_config(config) - self.low_scale_model = model.eval() - self.low_scale_model.train = disabled_train - for param in self.low_scale_model.parameters(): - param.requires_grad = False - - @torch.no_grad() - def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False): - # note: restricted to non-trainable encoders currently - assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for upscaling-ft' - z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, - force_c_encode=True, return_original_cond=True, bs=bs) - - assert exists(self.concat_keys) - assert len(self.concat_keys) == 1 - # optionally make spatial noise_level here - c_cat = list() - noise_level = None - for ck in self.concat_keys: - cc = batch[ck] - cc = rearrange(cc, 'b h w c -> b c h w') - if exists(self.reshuffle_patch_size): - assert isinstance(self.reshuffle_patch_size, int) - cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w', - p1=self.reshuffle_patch_size, p2=self.reshuffle_patch_size) - if bs is not None: - cc = cc[:bs] - cc = cc.to(self.device) - if exists(self.low_scale_model) and ck == self.low_scale_key: - cc, noise_level = self.low_scale_model(cc) - c_cat.append(cc) - c_cat = torch.cat(c_cat, dim=1) - if exists(noise_level): - all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level} - else: - all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} - if return_first_stage_outputs: - return z, all_conds, x, xrec, xc - return z, all_conds - - @torch.no_grad() - def log_images(self, *args, **kwargs): - log = super().log_images(*args, **kwargs) - log["lr"] = rearrange(args[0]["lr"], 'b h w c -> b c h w') - return log diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/util.py b/spaces/PKUWilliamYang/VToonify/vtoonify/util.py deleted file mode 100644 index 01ad2930c55d07866dee02e019d359bb78f65fc7..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/VToonify/vtoonify/util.py +++ /dev/null @@ -1,229 +0,0 @@ -import numpy as np -import matplotlib.pyplot as plt -from PIL import Image -import cv2 -import random -import math -import argparse -import torch -from torch.utils import data -from torch.nn import functional as F -from torch import autograd -from torch.nn import init -import torchvision.transforms as transforms -from model.stylegan.op import conv2d_gradfix -from model.encoder.encoders.psp_encoders import GradualStyleEncoder -from model.encoder.align_all_parallel import get_landmark - -def visualize(img_arr, dpi): - plt.figure(figsize=(10,10),dpi=dpi) - plt.imshow(((img_arr.detach().cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)) - plt.axis('off') - plt.show() - -def save_image(img, filename): - tmp = ((img.detach().cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8) - cv2.imwrite(filename, cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR)) - -def load_image(filename): - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]), - ]) - - img = Image.open(filename) - img = transform(img) - return img.unsqueeze(dim=0) - -def data_sampler(dataset, shuffle, distributed): - if distributed: - return data.distributed.DistributedSampler(dataset, shuffle=shuffle) - - if shuffle: - return data.RandomSampler(dataset) - - else: - return data.SequentialSampler(dataset) - - -def requires_grad(model, flag=True): - for p in model.parameters(): - p.requires_grad = flag - - -def accumulate(model1, model2, decay=0.999): - par1 = dict(model1.named_parameters()) - par2 = dict(model2.named_parameters()) - - for k in par1.keys(): - par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay) - - -def sample_data(loader): - while True: - for batch in loader: - yield batch - - -def d_logistic_loss(real_pred, fake_pred): - real_loss = F.softplus(-real_pred) - fake_loss = F.softplus(fake_pred) - - return real_loss.mean() + fake_loss.mean() - - -def d_r1_loss(real_pred, real_img): - with conv2d_gradfix.no_weight_gradients(): - grad_real, = autograd.grad( - outputs=real_pred.sum(), inputs=real_img, create_graph=True - ) - grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean() - - return grad_penalty - - -def g_nonsaturating_loss(fake_pred): - loss = F.softplus(-fake_pred).mean() - - return loss - - -def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01): - noise = torch.randn_like(fake_img) / math.sqrt( - fake_img.shape[2] * fake_img.shape[3] - ) - grad, = autograd.grad( - outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True - ) - path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1)) - - path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length) - - path_penalty = (path_lengths - path_mean).pow(2).mean() - - return path_penalty, path_mean.detach(), path_lengths - - -def make_noise(batch, latent_dim, n_noise, device): - if n_noise == 1: - return torch.randn(batch, latent_dim, device=device) - - noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0) - - return noises - - -def mixing_noise(batch, latent_dim, prob, device): - if prob > 0 and random.random() < prob: - return make_noise(batch, latent_dim, 2, device) - - else: - return [make_noise(batch, latent_dim, 1, device)] - - -def set_grad_none(model, targets): - for n, p in model.named_parameters(): - if n in targets: - p.grad = None - - -def weights_init(m): - classname = m.__class__.__name__ - if classname.find('BatchNorm2d') != -1: - if hasattr(m, 'weight') and m.weight is not None: - init.normal_(m.weight.data, 1.0, 0.02) - if hasattr(m, 'bias') and m.bias is not None: - init.constant_(m.bias.data, 0.0) - elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): - init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') - if hasattr(m, 'bias') and m.bias is not None: - init.constant_(m.bias.data, 0.0) - - -def load_psp_standalone(checkpoint_path, device='cuda'): - ckpt = torch.load(checkpoint_path, map_location='cpu') - opts = ckpt['opts'] - if 'output_size' not in opts: - opts['output_size'] = 1024 - opts['n_styles'] = int(math.log(opts['output_size'], 2)) * 2 - 2 - opts = argparse.Namespace(**opts) - psp = GradualStyleEncoder(50, 'ir_se', opts) - psp_dict = {k.replace('encoder.', ''): v for k, v in ckpt['state_dict'].items() if k.startswith('encoder.')} - psp.load_state_dict(psp_dict) - psp.eval() - psp = psp.to(device) - latent_avg = ckpt['latent_avg'].to(device) - - def add_latent_avg(model, inputs, outputs): - return outputs + latent_avg.repeat(outputs.shape[0], 1, 1) - - psp.register_forward_hook(add_latent_avg) - return psp - -def get_video_crop_parameter(filepath, predictor, padding=[200,200,200,200]): - if type(filepath) == str: - img = dlib.load_rgb_image(filepath) - else: - img = filepath - lm = get_landmark(img, predictor) - if lm is None: - return None - lm_chin = lm[0 : 17] # left-right - lm_eyebrow_left = lm[17 : 22] # left-right - lm_eyebrow_right = lm[22 : 27] # left-right - lm_nose = lm[27 : 31] # top-down - lm_nostrils = lm[31 : 36] # top-down - lm_eye_left = lm[36 : 42] # left-clockwise - lm_eye_right = lm[42 : 48] # left-clockwise - lm_mouth_outer = lm[48 : 60] # left-clockwise - lm_mouth_inner = lm[60 : 68] # left-clockwise - - scale = 64. / (np.mean(lm_eye_right[:,0])-np.mean(lm_eye_left[:,0])) - center = ((np.mean(lm_eye_right, axis=0)+np.mean(lm_eye_left, axis=0)) / 2) * scale - h, w = round(img.shape[0] * scale), round(img.shape[1] * scale) - left = max(round(center[0] - padding[0]), 0) // 8 * 8 - right = min(round(center[0] + padding[1]), w) // 8 * 8 - top = max(round(center[1] - padding[2]), 0) // 8 * 8 - bottom = min(round(center[1] + padding[3]), h) // 8 * 8 - return h,w,top,bottom,left,right,scale - -def tensor2cv2(img): - tmp = ((img.cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8) - return cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR) - -# get parameters from the stylegan and mark them with their layers -def gather_params(G): - params = dict( - [(res, {}) for res in range(18)] + [("others", {})] - ) - for n, p in sorted(list(G.named_buffers()) + list(G.named_parameters())): - if n.startswith("convs"): - layer = int(n.split(".")[1]) + 1 - params[layer][n] = p - elif n.startswith("to_rgbs"): - layer = int(n.split(".")[1]) * 2 + 3 - params[layer][n] = p - elif n.startswith("conv1"): - params[0][n] = p - elif n.startswith("to_rgb1"): - params[1][n] = p - else: - params["others"][n] = p - return params - -# blend the ffhq stylegan model and the finetuned model for toonify -# see ``Resolution Dependent GAN Interpolation for Controllable Image Synthesis Between Domains'' -def blend_models(G_low, G_high, weight=[1]*7+[0]*11): - params_low = gather_params(G_low) - params_high = gather_params(G_high) - - for res in range(18): - for n, p in params_high[res].items(): - params_high[res][n] = params_high[res][n] * (1-weight[res]) + params_low[res][n] * weight[res] - - state_dict = {} - for _, p in params_high.items(): - state_dict.update(p) - - return state_dict - diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/fdes-finalizers.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/fdes-finalizers.go deleted file mode 100644 index 9f558a42274f565e41f7156238c731386c23b4c9..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/fdes-finalizers.go and /dev/null differ diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-28.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-28.go deleted file mode 100644 index f5a7db5bfaacc10c20584f22dc3524c2ea3c53b4..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-28.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/OpenAssistant-reward-model-deberta-v3-large-v2/README.md b/spaces/PeepDaSlan9/OpenAssistant-reward-model-deberta-v3-large-v2/README.md deleted file mode 100644 index d9f0dec6044afde3bb32ba6e33b9df5583063a14..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/OpenAssistant-reward-model-deberta-v3-large-v2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: OpenAssistant Reward Model Deberta V3 Large V2 -emoji: 👁 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.33.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/flickr.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/flickr.py deleted file mode 100644 index 5dfff031ea80e012d9b6a5b547b5e12cb50a0896..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/flickr.py +++ /dev/null @@ -1,8 +0,0 @@ -import torch -import torchvision -import torch.utils.data as data -from maskrcnn_benchmark.data.datasets.modulated_coco import ModulatedDataset - - -class FlickrDataset(ModulatedDataset): - pass diff --git a/spaces/PixArt-alpha/PixArt-alpha/README.md b/spaces/PixArt-alpha/PixArt-alpha/README.md deleted file mode 100644 index 5a00d07f34c605d555af8dcc23af5a027a075531..0000000000000000000000000000000000000000 --- a/spaces/PixArt-alpha/PixArt-alpha/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Pixart-α -emoji: 👀 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 4.1.1 -app_file: app.py -pinned: false ---- - -Inference Code: https://github.com/PixArt-alpha/PixArt-alpha -Paper: https://arxiv.org/abs/2310.00426 diff --git a/spaces/Rajagopal/ImageBind_zeroshot_demo2/models/transformer.py b/spaces/Rajagopal/ImageBind_zeroshot_demo2/models/transformer.py deleted file mode 100644 index 98902ac8f08868c486a7c74781e952bee444c2e6..0000000000000000000000000000000000000000 --- a/spaces/Rajagopal/ImageBind_zeroshot_demo2/models/transformer.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env python3 -# Portions Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# Code modified from -# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py ; -# https://github.com/facebookresearch/deit/blob/main/models.py -# and https://github.com/facebookresearch/vissl/blob/main/vissl/models/trunks/vision_transformer.py - - -import copy -import fnmatch -import logging -from functools import partial -from typing import Callable, List - -import torch -import torch.nn as nn -import torch.utils.checkpoint as checkpoint - -from timm.models.layers import DropPath, trunc_normal_ - - -class Attention(nn.Module): - def __init__( - self, - dim, - num_heads=8, - qkv_bias=False, - qk_scale=None, - attn_drop=0.0, - proj_drop=0.0, - ): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - # NOTE scale factor was wrong in my original version, - # can set manually to be compat with prev weights - self.scale = qk_scale or head_dim**-0.5 - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - def forward(self, x): - B, N, C = x.shape - qkv = ( - self.qkv(x) - .reshape(B, N, 3, self.num_heads, C // self.num_heads) - .permute(2, 0, 3, 1, 4) - ) - q, k, v = ( - qkv[0], - qkv[1], - qkv[2], - ) # make torchscript happy (cannot use tensor as tuple) - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class Mlp(nn.Module): - def __init__( - self, - in_features, - hidden_features=None, - out_features=None, - act_layer=nn.GELU, - drop=0.0, - ): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class MultiheadAttention(nn.MultiheadAttention): - def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): - return super().forward(x, x, x, need_weights=False, attn_mask=attn_mask)[0] - - -class ViTAttention(Attention): - def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): - assert attn_mask is None - return super().forward(x) - - -class BlockWithMasking(nn.Module): - def __init__( - self, - dim: int, - attn_target: Callable, - mlp_ratio: int = 4, - act_layer: Callable = nn.GELU, - norm_layer: Callable = nn.LayerNorm, - ffn_dropout_rate: float = 0.0, - drop_path: float = 0.0, - layer_scale_type: str = None, - layer_scale_init_value: float = 1e-4, - ): - super().__init__() - - assert not isinstance( - attn_target, nn.Module - ), "attn_target should be a Callable. Otherwise attn_target is shared across blocks!" - self.attn = attn_target() - if drop_path > 0.0: - self.drop_path = DropPath(drop_path) - else: - self.drop_path = nn.Identity() - self.norm_1 = norm_layer(dim) - mlp_hidden_dim = int(mlp_ratio * dim) - self.mlp = Mlp( - in_features=dim, - hidden_features=mlp_hidden_dim, - act_layer=act_layer, - drop=ffn_dropout_rate, - ) - self.norm_2 = norm_layer(dim) - self.layer_scale_type = layer_scale_type - if self.layer_scale_type is not None: - assert self.layer_scale_type in [ - "per_channel", - "scalar", - ], f"Found Layer scale type {self.layer_scale_type}" - if self.layer_scale_type == "per_channel": - # one gamma value per channel - gamma_shape = [1, 1, dim] - elif self.layer_scale_type == "scalar": - # single gamma value for all channels - gamma_shape = [1, 1, 1] - # two gammas: for each part of the fwd in the encoder - self.layer_scale_gamma1 = nn.Parameter( - torch.ones(size=gamma_shape) * layer_scale_init_value, - requires_grad=True, - ) - self.layer_scale_gamma2 = nn.Parameter( - torch.ones(size=gamma_shape) * layer_scale_init_value, - requires_grad=True, - ) - - def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): - if self.layer_scale_type is None: - x = x + self.drop_path(self.attn(self.norm_1(x), attn_mask)) - x = x + self.drop_path(self.mlp(self.norm_2(x))) - else: - x = ( - x - + self.drop_path(self.attn(self.norm_1(x), attn_mask)) - * self.layer_scale_gamma1 - ) - x = x + self.drop_path(self.mlp(self.norm_2(x))) * self.layer_scale_gamma2 - return x - - -_LAYER_NORM = partial(nn.LayerNorm, eps=1e-6) - - -class SimpleTransformer(nn.Module): - def __init__( - self, - attn_target: Callable, - embed_dim: int, - num_blocks: int, - block: Callable = BlockWithMasking, - pre_transformer_layer: Callable = None, - post_transformer_layer: Callable = None, - drop_path_rate: float = 0.0, - drop_path_type: str = "progressive", - norm_layer: Callable = _LAYER_NORM, - mlp_ratio: int = 4, - ffn_dropout_rate: float = 0.0, - layer_scale_type: str = None, # from cait; possible values are None, "per_channel", "scalar" - layer_scale_init_value: float = 1e-4, # from cait; float - weight_init_style: str = "jax", # possible values jax or pytorch - ): - """ - Simple Transformer with the following features - 1. Supports masked attention - 2. Supports DropPath - 3. Supports LayerScale - 4. Supports Dropout in Attention and FFN - 5. Makes few assumptions about the input except that it is a Tensor - """ - super().__init__() - self.pre_transformer_layer = pre_transformer_layer - if drop_path_type == "progressive": - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_blocks)] - elif drop_path_type == "uniform": - dpr = [drop_path_rate for i in range(num_blocks)] - else: - raise ValueError(f"Unknown drop_path_type: {drop_path_type}") - - self.blocks = nn.Sequential( - *[ - block( - dim=embed_dim, - attn_target=attn_target, - mlp_ratio=mlp_ratio, - ffn_dropout_rate=ffn_dropout_rate, - drop_path=dpr[i], - norm_layer=norm_layer, - layer_scale_type=layer_scale_type, - layer_scale_init_value=layer_scale_init_value, - ) - for i in range(num_blocks) - ] - ) - self.post_transformer_layer = post_transformer_layer - self.weight_init_style = weight_init_style - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - if self.weight_init_style == "jax": - # Based on MAE and official Jax ViT implementation - torch.nn.init.xavier_uniform_(m.weight) - elif self.weight_init_style == "pytorch": - # PyTorch ViT uses trunc_normal_ - trunc_normal_(m.weight, std=0.02) - - if m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, (nn.LayerNorm)): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - def forward( - self, - tokens: torch.Tensor, - attn_mask: torch.Tensor = None, - use_checkpoint: bool = False, - checkpoint_every_n: int = 1, - checkpoint_blk_ids: List[int] = None, - ): - """ - Inputs - - tokens: data of shape N x L x D (or L x N x D depending on the attention implementation) - - attn: mask of shape L x L - - Output - - x: data of shape N x L x D (or L x N x D depending on the attention implementation) - """ - if self.pre_transformer_layer: - tokens = self.pre_transformer_layer(tokens) - if use_checkpoint and checkpoint_blk_ids is None: - checkpoint_blk_ids = [ - blk_id - for blk_id in range(len(self.blocks)) - if blk_id % checkpoint_every_n == 0 - ] - if checkpoint_blk_ids: - checkpoint_blk_ids = set(checkpoint_blk_ids) - for blk_id, blk in enumerate(self.blocks): - if use_checkpoint and blk_id in checkpoint_blk_ids: - tokens = checkpoint.checkpoint( - blk, tokens, attn_mask, use_reentrant=False - ) - else: - tokens = blk(tokens, attn_mask=attn_mask) - if self.post_transformer_layer: - tokens = self.post_transformer_layer(tokens) - return tokens diff --git a/spaces/RajkNakka/speech-to-speech-translation/app.py b/spaces/RajkNakka/speech-to-speech-translation/app.py deleted file mode 100644 index 19a8cd51b6df63491b92588b17f202f1faafd3de..0000000000000000000000000000000000000000 --- a/spaces/RajkNakka/speech-to-speech-translation/app.py +++ /dev/null @@ -1,73 +0,0 @@ -import gradio as gr -import numpy as np -import torch -from datasets import load_dataset - -from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline - - -device = "cuda:0" if torch.cuda.is_available() else "cpu" - -# load speech translation checkpoint -asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device) - -# load text-to-speech checkpoint and speaker embeddings -# processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") -processor = SpeechT5Processor.from_pretrained("RajkNakka/speecht5_finetuned_voxpopuli_nl") - -model = SpeechT5ForTextToSpeech.from_pretrained("RajkNakka/speecht5_finetuned_voxpopuli_nl").to(device) -vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device) - -embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") -speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) - - -def translate(audio): - outputs = asr_pipe(audio, max_new_tokens=128, generate_kwargs={"task": "transcribe", "language": "nl"}) - return outputs["text"] - - -def synthesise(text): - inputs = processor(text=text, return_tensors="pt") - speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder) - return speech.cpu() - - -def speech_to_speech_translation(audio): - translated_text = translate(audio) - synthesised_speech = synthesise(translated_text) - synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16) - return 16000, synthesised_speech - - -title = "Cascaded STST" -description = """ -Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's -[SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech: - -![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation") -""" - -demo = gr.Blocks() - -mic_translate = gr.Interface( - fn=speech_to_speech_translation, - inputs=gr.Audio(source="microphone", type="filepath"), - outputs=gr.Audio(label="Generated Speech", type="numpy"), - title=title, - description=description, -) - -file_translate = gr.Interface( - fn=speech_to_speech_translation, - inputs=gr.Audio(source="upload", type="filepath"), - outputs=gr.Audio(label="Generated Speech", type="numpy"), - examples=[["./example.wav"]], - title=title, - description=description, -) - -with demo: - gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"]) - -demo.launch() diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/highlighter.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/highlighter.py deleted file mode 100644 index 82293dffc492ea50b16335fd411b255dd5dfca57..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/highlighter.py +++ /dev/null @@ -1,232 +0,0 @@ -import re -from abc import ABC, abstractmethod -from typing import List, Union - -from .text import Span, Text - - -def _combine_regex(*regexes: str) -> str: - """Combine a number of regexes in to a single regex. - - Returns: - str: New regex with all regexes ORed together. - """ - return "|".join(regexes) - - -class Highlighter(ABC): - """Abstract base class for highlighters.""" - - def __call__(self, text: Union[str, Text]) -> Text: - """Highlight a str or Text instance. - - Args: - text (Union[str, ~Text]): Text to highlight. - - Raises: - TypeError: If not called with text or str. - - Returns: - Text: A test instance with highlighting applied. - """ - if isinstance(text, str): - highlight_text = Text(text) - elif isinstance(text, Text): - highlight_text = text.copy() - else: - raise TypeError(f"str or Text instance required, not {text!r}") - self.highlight(highlight_text) - return highlight_text - - @abstractmethod - def highlight(self, text: Text) -> None: - """Apply highlighting in place to text. - - Args: - text (~Text): A text object highlight. - """ - - -class NullHighlighter(Highlighter): - """A highlighter object that doesn't highlight. - - May be used to disable highlighting entirely. - - """ - - def highlight(self, text: Text) -> None: - """Nothing to do""" - - -class RegexHighlighter(Highlighter): - """Applies highlighting from a list of regular expressions.""" - - highlights: List[str] = [] - base_style: str = "" - - def highlight(self, text: Text) -> None: - """Highlight :class:`rich.text.Text` using regular expressions. - - Args: - text (~Text): Text to highlighted. - - """ - - highlight_regex = text.highlight_regex - for re_highlight in self.highlights: - highlight_regex(re_highlight, style_prefix=self.base_style) - - -class ReprHighlighter(RegexHighlighter): - """Highlights the text typically produced from ``__repr__`` methods.""" - - base_style = "repr." - highlights = [ - r"(?P<)(?P[-\w.:|]*)(?P[\w\W]*?)(?P>)", - r'(?P[\w_]{1,50})=(?P"?[\w_]+"?)?', - r"(?P[][{}()])", - _combine_regex( - r"(?P[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})", - r"(?P([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})", - r"(?P(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})", - r"(?P(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})", - r"(?P[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})", - r"(?P[\w.]*?)\(", - r"\b(?PTrue)\b|\b(?PFalse)\b|\b(?PNone)\b", - r"(?P\.\.\.)", - r"(?P(?(?\B(/[-\w._+]+)*\/)(?P[-\w._+]*)?", - r"(?b?'''.*?(?(file|https|http|ws|wss)://[-0-9a-zA-Z$_+!`(),.?/;:&=%#]*)", - ), - ] - - -class JSONHighlighter(RegexHighlighter): - """Highlights JSON""" - - # Captures the start and end of JSON strings, handling escaped quotes - JSON_STR = r"(?b?\".*?(?[\{\[\(\)\]\}])", - r"\b(?Ptrue)\b|\b(?Pfalse)\b|\b(?Pnull)\b", - r"(?P(? None: - super().highlight(text) - - # Additional work to handle highlighting JSON keys - plain = text.plain - append = text.spans.append - whitespace = self.JSON_WHITESPACE - for match in re.finditer(self.JSON_STR, plain): - start, end = match.span() - cursor = end - while cursor < len(plain): - char = plain[cursor] - cursor += 1 - if char == ":": - append(Span(start, end, "json.key")) - elif char in whitespace: - continue - break - - -class ISO8601Highlighter(RegexHighlighter): - """Highlights the ISO8601 date time strings. - Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html - """ - - base_style = "iso8601." - highlights = [ - # - # Dates - # - # Calendar month (e.g. 2008-08). The hyphen is required - r"^(?P[0-9]{4})-(?P1[0-2]|0[1-9])$", - # Calendar date w/o hyphens (e.g. 20080830) - r"^(?P(?P[0-9]{4})(?P1[0-2]|0[1-9])(?P3[01]|0[1-9]|[12][0-9]))$", - # Ordinal date (e.g. 2008-243). The hyphen is optional - r"^(?P(?P[0-9]{4})-?(?P36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$", - # - # Weeks - # - # Week of the year (e.g., 2008-W35). The hyphen is optional - r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9]))$", - # Week date (e.g., 2008-W35-6). The hyphens are optional - r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9])-?(?P[1-7]))$", - # - # Times - # - # Hours and minutes (e.g., 17:21). The colon is optional - r"^(?P
  2. -
  3. Have fun and enjoy the game. The most important thing is to have fun and enjoy the game while playing Hunter Assassin with hack mod apk. You can try different strategies and tactics, challenge yourself with harder levels and missions, or just relax and kill some targets. You can also share your achievements and experiences with your friends or other players online.
  4. - -

    The best characters and weapons to use in Hunter Assassin with hack mod apk

    -

    With Hunter Assassin hack mod apk, you can unlock and use all the characters and weapons in the game. However, some characters and weapons are better than others in terms of performance and suitability. Here are some of the best characters and weapons to use in Hunter Assassin with hack mod apk:

    - - - - - - - - - - - - - - - - - - - - - - - - - - -
    CharacterWeaponDescription
    NinjaKatanaThe Ninja is one of the fastest and stealthiest characters in the game. He can move quickly and quietly, making him ideal for sneaking up on enemies and killing them in one swift strike. His weapon, the Katana, is a long and sharp sword that can slice through any target with ease. The Ninja is perfect for levels and missions that require speed and stealth.
    SniperRifleThe Sniper is one of the most accurate and powerful characters in the game. He can shoot targets from a long distance, making him ideal for taking down enemies without getting close to them. His weapon, the Rifle, is a high-caliber gun that can pierce through any armor or obstacle with precision. The Sniper is perfect for levels and missions that require accuracy and power.
    TankAxeThe Tank is one of the most durable and strong characters in the game. He can withstand a lot of damage, making him ideal for surviving attacks from enemies. His weapon, the Axe, is a heavy and blunt weapon that can smash any target with force. The Tank is perfect for levels and missions that require durability and strength.
    AssassinDaggerThe Assassin is one of the most versatile and balanced characters in the game. He can do a bit of everything, making him ideal for adapting to different situations. His weapon, the Dagger, is a short and sharp weapon that can stab any target with speed. The Assassin is perfect for levels and missions that require versatility and balance.
    -

    Conclusion

    -

    A summary of the main points and a call to action

    -

    In conclusion, Hunter Assassin hack download is a modified version of the original game that gives you unlimited money and gems, as well as other features and benefits. However, it also has some disadvantages and risks that you should be aware of before using it. If you decide to use Hunter Assassin hack download, you should follow the steps and requirements for downloading and installing Hunter Assassin hack mod apk, and use it wisely and sparingly. You should also use the tips and tricks for playing Hunter Assassin with hack mod apk, and choose the best characters and weapons for your gameplay. Hunter Assassin hack download can make your game more fun and exciting, but it can also ruin your game and device if you are not careful. So, use it at your own risk and discretion. If you want to download Hunter Assassin hack mod apk, you can use the link below. But remember, you are doing it for entertainment purposes only, and not for cheating or harming anyone. Have fun and enjoy being a hunter assassin!

    FAQs

    -

    Here are some of the frequently asked questions about Hunter Assassin hack download:

    -
      -
    • Q: Is Hunter Assassin hack download safe to use?
      -A: Hunter Assassin hack download is not safe to use, as it might contain malware, viruses, spyware, or other malicious software that can damage your device or steal your data. You are also risking your account being banned or suspended by the game servers or authorities for using a hacked version of the game.
    • -
    • Q: Is Hunter Assassin hack download legal to use?
      -A: Hunter Assassin hack download is not legal to use, as it violates the terms of service and the intellectual property rights of the game developers and the law. You are depriving the game developers of their rightful income by not paying for their products and services. You are also giving yourself an unfair advantage over other players by using a hacked version of the game.
    • -
    • Q: Is Hunter Assassin hack download free to use?
      -A: Hunter Assassin hack download is free to use, as you don't have to pay any money to download or install it on your device. However, you might have to pay a price in terms of your device's security, your account's safety, and your gaming experience's quality by using a hacked version of the game.
    • -
    • Q: How can I update Hunter Assassin hack mod apk?
      -A: You can update Hunter Assassin hack mod apk by downloading and installing the latest version of the hacked file from the same source that you got it from. However, you might lose your progress or data if you update the hacked version of the game. You might also encounter compatibility issues or errors if you update the hacked version of the game.
    • -
    • Q: How can I uninstall Hunter Assassin hack mod apk?
      -A: You can uninstall Hunter Assassin hack mod apk by deleting the hacked file from your device's storage. You can also uninstall the original version of Hunter Assassin from your device if you want to stop playing the game altogether.
    • -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Live Your Life MP3 - The Ultimate Motivational Song by T.I. and Rihanna.md b/spaces/fatiXbelha/sd/Download Live Your Life MP3 - The Ultimate Motivational Song by T.I. and Rihanna.md deleted file mode 100644 index 32cd3381bd775cf35279952226e7c29e44cae359..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Live Your Life MP3 - The Ultimate Motivational Song by T.I. and Rihanna.md +++ /dev/null @@ -1,156 +0,0 @@ - -

    Download Live Your Life MP3: How to Enjoy the Hit Song by T.I. and Rihanna

    -

    Do you love the song Live Your Life by T.I. and Rihanna? Do you want to download it as an MP3 file and listen to it offline? If yes, then you are in the right place. In this article, we will show you how to download Live Your Life MP3 from three different sources: Spotify, Last.fm, and AfroCharts. We will also compare the pros and cons of each option and give you some tips on how to enjoy the song better.

    -

    Introduction

    -

    What is Live Your Life?

    -

    Live Your Life is a song by American rapper T.I., featuring Barbadian singer Rihanna. It was released as the third single from T.I.'s sixth studio album Paper Trail in 2008. The song samples O-Zone's Dragostea Din Tei, also known as the Numa Numa song, and has a motivational theme of living your life to the fullest. The song was a huge commercial success, reaching number one on the Billboard Hot 100 chart and becoming one of the best-selling singles of all time.

    -

    download live your life mp3


    Download Ziphttps://urllie.com/2uNHZr



    -

    Why is Live Your Life popular?

    -

    Live Your Life is popular for many reasons. First of all, it has a catchy melody and a powerful chorus that make you want to sing along. Second, it has a positive message that inspires you to pursue your dreams and not let others bring you down. Third, it features two of the most popular artists in the music industry, T.I. and Rihanna, who deliver an amazing performance with their rap and vocals. Fourth, it has a memorable music video that shows T.I. and Rihanna traveling around the world and having fun.

    -

    How to download Live Your Life MP3

    -

    Now that you know what Live Your Life is and why it is popular, you might be wondering how to download it as an MP3 file. There are many ways to do that, but we will focus on three of them: Spotify, Last.fm, and AfroCharts. Let's take a look at each option and see how they work.

    -

    Option 1: Spotify

    -

    Pros and cons of Spotify

    -

    Spotify is one of the most popular music streaming services in the world, with over 350 million users. It offers access to millions of songs, podcasts, playlists, and radio stations. You can listen to music online or offline, with or without ads, depending on your subscription plan.

    -

    The pros of using Spotify to download Live Your Life MP3 are:

    -
      -
    • You can enjoy high-quality audio (up to 320 kbps).
    • -
    • You can create your own playlists and share them with your friends.
    • -
    • You can discover new music based on your preferences and recommendations.
    • -
    • You can sync your music across different devices (phone, computer, tablet, etc.).
    • -
    -

    The cons of using Spotify to download Live Your Life MP3 are:

    -
      -
    • You need a premium account ($9.99 per month) to download music offline.
    • -
    • You can only download up to 10,000 songs per device.
    • -
    • You can only play the downloaded songs within the Spotify app.
    • -
    • You need an internet connection to update your library and verify your subscription every 30 days.
    • -
    -

    How to download Live Your Life MP3 from Spotify

    -

    To download Live Your Life MP3 from Spotify, you need to follow these steps:

    -
      -
    1. Open the Spotify app on your device and log in with your premium account.
    2. -
    3. Search for Live Your Life by T.I. and Rihanna and tap on it.
    4. -
    5. Tap on the three dots icon on the top right corner and select Download.
    6. -
    7. Wait for the song to download and check the green arrow icon next to it.
    8. -
    9. Enjoy listening to Live Your Life offline within the Spotify app.
    10. -
    -

    Option 2: Last.fm

    -

    Pros and cons of Last.fm

    -

    Last.fm is another popular music streaming service that also provides music discovery and recommendation features. It tracks the music you listen to and creates personalized charts, stats, and suggestions. You can also join a community of music lovers and share your tastes and opinions.

    -

    download live your life mp3 free
    -download live your life mp3 song
    -download live your life mp3 by ti
    -download live your life mp3 rihanna
    -download live your life mp3 320kbps
    -download live your life mp3 skull
    -download live your life mp3 online
    -download live your life mp3 audio
    -download live your life mp3 music
    -download live your life mp3 video
    -download live your life mp3 remix
    -download live your life mp3 instrumental
    -download live your life mp3 ringtone
    -download live your life mp3 lyrics
    -download live your life mp3 album
    -download live your life mp3 paper trail
    -download live your life mp3 youtube
    -download live your life mp3 soundcloud
    -download live your life mp3 spotify
    -download live your life mp3 apple music
    -download live your life mp3 amazon music
    -download live your life mp3 google play music
    -download live your life mp3 deezer
    -download live your life mp3 tidal
    -download live your life mp3 napster
    -download live your life mp3 pandora
    -download live your life mp3 iheartradio
    -download live your life mp3 tunein radio
    -download live your life mp3 shazam
    -download live your life mp3 musixmatch
    -download live your life mp3 genius
    -download live your life mp3 azlyrics
    -download live your life mp3 metrolyrics
    -download live your life mp3 lyricsfreak
    -download live your life mp3 songmeanings
    -download live your life mp3 songfacts
    -download live your life mp3 billboard
    -download live your life mp3 rolling stone
    -download live your life mp3 pitchfork
    -download live your life mp3 allmusic
    -download live your life mp3 discogs
    -download live your life mp3 last.fm[^1^]
    -download live your life mp3 rateyourmusic
    -download live your life mp3 whosampled
    -download live your life mp3 genius samples

    -

    The pros of using Last.fm to download Live Your Life MP3 are:

    -
      -
    • You can download music for free from various sources (YouTube, SoundCloud, etc.).
    • -
    • You can scrobble (record) your listening history and get insights into your musical preferences.
    • -
    • You can explore new music based on your scrobbles and similar artists.
    • -
    • You can interact with other users and join groups, forums, and events.
    • -
    -

    The cons of using Last.fm to download Live Your Life MP3 are:

    -
      -
    • You need to install a third-party software (Last.fm Scrobbler) to download music from Last.fm.
    • -
    • You may not find all the songs you want to download on Last.fm.
    • -
    • You may encounter some quality issues or broken links when downloading music from Last.fm.
    • -
    • You may violate some copyright laws or terms of service when downloading music from Last.fm.
    • -
    -

    How to download Live Your Life MP3 from Last.fm

    -

    To download Live Your Life MP3 from Last.fm, you need to follow these steps:

    -
      -
    1. Download and install the Last.fm Scrobbler software on your device from here.
    2. -
    3. Open the Last.fm app or website and log in with your account.
    4. -
    5. Search for Live Your Life by T.I. and Rihanna and click on it.
    6. -
    7. Scroll down to the section that says "Download Track" and choose a source (YouTube, SoundCloud, etc.).
    8. -
    9. Click on the download button and save the MP3 file on your device.
    10. -
    11. Enjoy listening to Live Your Life offline with any music player.
    12. -
    -

    Option 3: AfroCharts

    -

    Pros and cons of AfroCharts

    -

    AfroCharts is a music streaming service that focuses on African music. It offers access to thousands of songs, albums, artists, and genres from across the continent. You can listen to music online or offline, with or without ads, depending on your subscription plan.

    -

    The pros of using AfroCharts to download Live Your Life MP3 are:

    -
      -
    • You can support African musicians and promote their culture and diversity.
    • -
    • You can discover new music from different regions, languages, and styles.
    • -
    • You can create your own playlists and follow your favorite artists.
    • -
    • You can enjoy high-quality audio (up to 320 kbps).
    • -
    -

    The cons of using AfroCharts to download Live Your Life MP3 are:

    -
      -
    • You need a premium account ($4.99 per month) to download music offline.
    • -
    • You can only download up to 5,000 songs per device.
    • -
    • You can only play the downloaded songs within the AfroCharts app.
    • -
    • You may not find some songs that are not related to African music on AfroCharts.
    • -
    -

    How to download Live Your Life MP3 from AfroCharts

    -

    To download Live Your Life MP3 from AfroCharts, you need to follow these steps:

    -
      -
    1. Open the AfroCharts app on your device and log in with your premium account.
    2. -
    3. Search for Live Your Life by T.I. and Rihanna and tap on it.
    4. -
    5. Tap on the three dots icon on the bottom right corner and select Download.
    6. -
    7. Wait for the song to download and check the green check icon next to it.
    8. -
    9. Enjoy listening to Live Your Life offline within the AfroCharts app.
    10. -
    -

    Conclusion

    -

    Summary of the main points

    -

    In this article, we have shown you how to download Live Your Life MP3 from three different sources: Spotify, Last.fm, and AfroCharts. We have also compared the pros and cons of each option and given you some tips on how to enjoy the song better. Live Your Life is a hit song by T.I. and Rihanna that has a catchy melody, a positive message, and a memorable music video. It is one of the best-selling singles of all time and a motivational anthem for many people.

    -

    Call to action

    -

    Now that you know how to download Live Your Life MP3, you can choose the option that suits you best and start listening to the song offline. You can also share the song with your friends and family and inspire them to live their lives to the fullest. Remember, you only have one life, so make it count. As T.I. and Rihanna say, "Just live your life, don't let them tell you what to do."

    -

    FAQs

    -

    Here are some frequently asked questions about Live Your Life MP3:

    -
      -
    1. Q: How long is Live Your Life?
    2. -
    3. A: Live Your Life is 5 minutes and 38 seconds long.
    4. -
    5. Q: What genre is Live Your Life?
    6. -
    7. A: Live Your Life is a hip hop song with elements of R&B and pop.
    8. -
    9. Q: Who wrote Live Your Life?
    10. -
    11. A: Live Your Life was written by T.I., Rihanna, Dan Balan, Just Blaze, Makeba Riddick, and James Harris III.
    12. -
    13. Q: What awards did Live Your Life win?
    14. -
    15. A: Live Your Life won the Grammy Award for Best Rap/Sung Collaboration in 2009.
    16. -
    17. Q: Where can I watch the music video of Live Your Life?
    18. -
    19. A: You can watch the music video of Live Your Life on YouTube here.
    20. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Dream AI How to Use the MOD APK to Create Amazing Art - Download Now.md b/spaces/fatiXbelha/sd/Dream AI How to Use the MOD APK to Create Amazing Art - Download Now.md deleted file mode 100644 index 41fb752e200e3c0b64b147516eb3cde2429b8a33..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Dream AI How to Use the MOD APK to Create Amazing Art - Download Now.md +++ /dev/null @@ -1,149 +0,0 @@ -
    -

    Download Dream AI Mod APK: A Guide to Unlocking Your Creative Potential

    -

    Do you want to unleash your imagination and create stunning artworks with just a few taps on your phone? Do you want to explore different styles, themes, and effects without any limitations? If yes, then you should download Dream AI Mod APK, a powerful and innovative app that lets you transform your photos into amazing artworks using artificial intelligence.

    -

    What is Dream AI?

    -

    Dream AI is an app that uses advanced neural networks and deep learning algorithms to generate realistic and artistic images from your photos. You can choose from hundreds of filters, stickers, backgrounds, and frames to create your own unique masterpiece. You can also mix and match different elements to create new combinations and effects. Whether you want to make your photos look like paintings, sketches, cartoons, or anything else, Dream AI can help you achieve it.

    -

    download dream ai mod apk


    Download Zip ⚙⚙⚙ https://urllie.com/2uNygJ



    -

    Features of Dream AI

    -

    Some of the features of Dream AI are:

    -
      -
    • Easy and intuitive interface: You can easily navigate through the app and access all the features with just a few taps.
    • -
    • High-quality results: The app uses high-resolution images and advanced algorithms to ensure that your artworks are clear, detailed, and realistic.
    • -
    • Fast and smooth performance: The app runs smoothly on most devices and does not consume much battery or storage space.
    • -
    • Offline mode: You can use the app without an internet connection and save your artworks on your device.
    • -
    • No watermark: The app does not add any watermark or logo to your artworks, so you can enjoy them without any distraction.
    • -
    • No ads: The app does not show any annoying ads or pop-ups that might interrupt your creative process.
    • -
    -

    Benefits of Dream AI

    -

    Some of the benefits of using Dream AI are:

    -
      -
    • You can express yourself creatively and have fun with your photos.
    • -
    • You can learn new skills and techniques and improve your artistic abilities.
    • -
    • You can impress your friends and family with your amazing artworks and share them on social media.
    • -
    • You can relax and relieve stress by playing with different filters and effects.
    • -
    • You can discover new styles and genres of art and get inspired by them.
    • -
    -

    Why Download Dream AI Mod APK?

    -

    If you are wondering why you should download Dream AI Mod APK instead of the original version, here are some reasons:

    -
      -
    • You can access all the premium features for free, such as unlimited filters, stickers, backgrounds, frames, etc.
    • -
    • You can unlock all the hidden features that are not available in the original version, such as custom filters, advanced settings, etc.
    • -
    • You can enjoy the app without any restrictions or limitations, such as time limit, image size limit, etc.
    • -
    -

    How to Download Dream AI Mod APK

    -

    If you want to download Dream AI Mod APK, here are the steps you need to follow:

    -
      -
    1. Click the Download button at the top of this page to download the Dream AI Mod APK file.
    2. -
    3. Save the file in your device's download folder or any other location you prefer.
    4. -
    -

    How to Install Dream AI Mod APK

    -

    If you want to install Dream AI Mod APK on your device, here are the steps you need to follow:

    -
      -
    1. Go to your device's settings and enable the option to install apps from unknown sources. This will allow you to install apps that are not from the official app store.
    2. -
    3. Locate the downloaded Dream AI Mod APK file on your device and tap on it to open it.
    4. -
    5. Tap on the Install button and wait for the installation process to complete.
    6. -
    7. Once the installation is done, you can launch the app and start using it.
    8. -
    -

    How to Use Dream AI Mod APK

    -

    If you want to use Dream AI Mod APK to create amazing artworks, here are the steps you need to follow:

    -
      -
    1. Open the app and grant the necessary permissions, such as camera, storage, etc.
    2. -
    3. Select a photo from your gallery or take a new one with your camera.
    4. -
    5. Choose a filter from the list of categories, such as Art, Sketch, Cartoon, etc. You can also use the search bar to find a specific filter.
    6. -
    7. Adjust the intensity and other parameters of the filter according to your preference.
    8. -
    9. Add stickers, backgrounds, frames, and other elements to enhance your artwork.
    10. -
    11. Save your artwork on your device or share it with others.
    12. -
    -

    Tips and Tricks for Dream AI Mod APK

    -

    If you want to get the most out of Dream AI Mod APK, here are some tips and tricks you can try:

    -

    download dream ai mod apk free
    -download dream ai mod apk premium
    -download dream ai mod apk unlocked
    -download dream ai mod apk latest version
    -download dream ai mod apk for android
    -download dream ai mod apk no ads
    -download dream ai mod apk full features
    -download dream ai mod apk from jojoy
    -download dream ai mod apk without root
    -download dream ai mod apk 2023
    -how to download dream ai mod apk
    -where to download dream ai mod apk
    -best site to download dream ai mod apk
    -download dream ai mod apk for pc
    -download dream ai mod apk for ios
    -download dream ai mod apk for windows
    -download dream ai mod apk for mac
    -download dream ai mod apk for linux
    -download dream ai mod apk online
    -download dream ai mod apk offline
    -download dream ai mod apk with obb
    -download dream ai mod apk with data
    -download dream ai mod apk with unlimited money
    -download dream ai mod apk with all filters
    -download dream ai mod apk with pro features
    -is it safe to download dream ai mod apk
    -is it legal to download dream ai mod apk
    -is it possible to download dream ai mod apk
    -benefits of downloading dream ai mod apk
    -drawbacks of downloading dream ai mod apk
    -alternatives to downloading dream ai mod apk
    -reviews of downloading dream ai mod apk
    -ratings of downloading dream ai mod apk
    -tips for downloading dream ai mod apk
    -tricks for downloading dream ai mod apk
    -steps for downloading dream ai mod apk
    -guide for downloading dream ai mod apk
    -tutorial for downloading dream ai mod apk
    -video for downloading dream ai mod apk
    -blog for downloading dream ai mod apk
    -website for downloading dream ai mod apk
    -link for downloading dream ai mod apk
    -source for downloading dream ai mod apk
    -mirror for downloading dream ai mod apk
    -torrent for downloading dream ai mod apk
    -file for downloading dream ai mod apk
    -folder for downloading dream ai mod apk
    -zip for downloading dream ai mod apk
    -rar for downloading dream ai mod apk

    -

    How to Create Amazing Artworks with Dream AI Mod APK

    -

    Some of the ways you can create amazing artworks with Dream AI Mod APK are:

    -
      -
    • Experiment with different filters and effects and see how they change your photo.
    • -
    • Mix and match different elements from different categories and create your own unique style.
    • -
    • Use the custom filter option to create your own filter from scratch or modify an existing one.
    • -
    • Use the eraser tool to remove unwanted parts of your photo or filter.
    • -
    • Use the crop tool to adjust the size and shape of your photo or filter.
    • -
    -

    How to Share Your Artworks with Dream AI Mod APK

    -

    Some of the ways you can share your artworks with Dream AI Mod APK are:

    -
      -
    • Use the share button to send your artwork to your friends and family via social media, email, or other apps.
    • -
    • Use the save button to save your artwork on your device or cloud storage.
    • -
    • Use the print button to print your artwork on paper or canvas.
    • -
    • Use the download button to download your artwork as an image file or a video file.
    • -
    -

    How to Customize Your Settings with Dream AI Mod APK

    -

    Some of the ways you can customize your settings with Dream AI Mod APK are:

    -
      -
    • Use the settings button to access various options, such as language, theme, quality, etc.
    • -
    • Use the feedback button to rate the app, report bugs, or suggest improvements.
    • -
    • Use the help button to get more information about the app and its features.
    • -
    -

    Conclusion

    -

    Dream AI Mod APK is a great app for anyone who loves art and creativity. It allows you to transform your photos into stunning artworks using artificial intelligence. You can choose from hundreds of filters, stickers, backgrounds, frames, and other elements to create your own unique masterpiece. You can also access all the premium features for free and enjoy the app without any restrictions or limitations. Download Dream AI Mod APK today and unleash your imagination!

    -

    FAQs

    -

    Here are some frequently asked questions about Dream AI Mod APK:

    -
      -
    1. Is Dream AI Mod APK safe?
    2. -

      Dream AI Mod APK is safe to use as long as you download it from a trusted source. It does not contain any viruses or malware that might harm your device or data. However, you should always be careful when installing apps from unknown sources and check their permissions before granting them.

      -
    3. Is Dream AI Mod APK legal?
    4. -

      Dream AI Mod APK is legal to use as long as you do not use it for any illegal or unethical purposes. It is a modified version of the original app that provides some extra features and benefits. However, it is not affiliated with or endorsed by the official developers of Dream AI. Therefore, you should use it at your own risk and responsibility.

      -
    5. What are the requirements for Dream AI Mod APK?
    6. -

      Dream AI Mod APK requires an Android device that runs on Android 4.4 or higher. It also requires a minimum of 100 MB of free storage space and 2 GB of RAM. It works best on devices that have a good camera and a fast processor.

      -
    7. How can I update Dream AI Mod APK?
    8. -

      Dream AI Mod APK does not have an automatic update feature, so you will have to manually download and install the latest version from the same source you downloaded it from. You can also check this page for any updates or news about Dream AI Mod APK.

      -
    9. How can I contact the developers of Dream AI Mod APK?
    10. -

      Dream AI Mod APK is not developed by the official developers of Dream AI, so you cannot contact them directly. However, you can contact the modders who created Dream AI Mod APK through their website or social media accounts. You can also leave a comment on this page and we will try to answer your questions or concerns.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Experience Premium Gameplay with Free Fire MAX 1.94.1 APK - Latest Version.md b/spaces/fatiXbelha/sd/Experience Premium Gameplay with Free Fire MAX 1.94.1 APK - Latest Version.md deleted file mode 100644 index 47486fb1ac65050feb243d2200527ce36b816c05..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Experience Premium Gameplay with Free Fire MAX 1.94.1 APK - Latest Version.md +++ /dev/null @@ -1,150 +0,0 @@ -
    -

    Free Fire MAX 1.94.1 APK Download: Everything You Need to Know

    -

    If you are a fan of battle royale games, you might have heard of Free Fire, one of the most popular and downloaded games in the genre. But did you know that there is another version of the game that offers a more premium and immersive experience? It's called Free Fire MAX, and it's designed exclusively for players who want to enjoy the best graphics, effects, and gameplay in a battle royale.

    -

    In this article, we will tell you everything you need to know about Free Fire MAX, including how to download and install the latest version (1.94.1) of the game on your Android device, what's new in this update, how to play with other Free Fire players, and why you should give it a try.

    -

    free fire max 1.94.1 apk download


    Download Filehttps://urllie.com/2uNzC8



    -

    What is Free Fire MAX?

    -

    Free Fire MAX is a standalone application that runs on the same server as the original Free Fire. It is not a mod or a hack, but an official product from Garena, the developer and publisher of the game. Free Fire MAX is compatible with all Free Fire accounts, items, and events, so you don't have to worry about losing your progress or missing out on anything.

    -

    The main difference between Free Fire MAX and Free Fire is that Free Fire MAX offers a more enhanced and realistic graphics quality, with Ultra HD resolutions and breathtaking effects. You can also customize your graphics settings according to your preference and device performance. In addition, Free Fire MAX has some exclusive features and content that are not available in the original version, such as special lobby themes, emotes, skins, and more.

    -

    Free Fire MAX is not a replacement for Free Fire, but an alternative option for players who want to experience a different level of combat in a battle royale. You can choose to play either version depending on your mood and device capability.

    -

    How to Download and Install Free Fire MAX 1.94.1 APK?

    -

    Requirements

    -

    Before you download and install Free Fire MAX on your Android device, you need to make sure that your device meets the minimum or recommended specifications for running the game smoothly. Here are the requirements for Free Fire MAX:

    - - - - - - - - - - - - - - - - - -Storage: 1.5 GB or higher - - -
    MinimumRecommended
    Android version: 4.4 or higherAndroid version: 8 or higher
    RAM: 2 GB or higherRAM: 4 GB or higher
    CPU: Dual core 1.2 GHz or higherCPU: Octa core 2 GHz or higher
    Storage: 2.5 GB or higher
    -

    If your device does not meet the minimum requirements, you may not be able to download or install Free Fire MAX, or you may encounter some performance issues while playing the game. If your device meets the recommended requirements, you can enjoy the game at its full potential.

    -

    free fire max 1.94.1 apk download for android
    -free fire max 1.94.1 apk download latest version
    -free fire max 1.94.1 apk download link
    -free fire max 1.94.1 apk download obb
    -free fire max 1.94.1 apk download uptodown
    -free fire max 1.94.1 apk download mod
    -free fire max 1.94.1 apk download hack
    -free fire max 1.94.1 apk download unlimited diamonds
    -free fire max 1.94.1 apk download mediafıre
    -free fire max 1.94.1 apk download highly compressed
    -free fire max 1.94.1 apk download for pc
    -free fire max 1.94.1 apk download for ios
    -free fire max 1.94.1 apk download for laptop
    -free fire max 1.94.1 apk download for windows 10
    -free fire max 1.94.1 apk download for mac
    -free fire max 1.94.1 apk download offline
    -free fire max 1.94.1 apk download online
    -free fire max 1.94.1 apk download no verification
    -free fire max 1.94.1 apk download without update
    -free fire max 1.94.1 apk download new update
    -free fire max 1.94.1 apk download full version
    -free fire max 1.94.1 apk download beta version
    -free fire max 1.94.1 apk download old version
    -free fire max 1.94.1 apk download original version
    -free fire max 1.94.1 apk download google play store
    -free fire max 1.94.1 apk download from apkpure
    -free fire max 1.94.1 apk download from apkmirror
    -free fire max 1.94.1 apk download from apktada
    -free fire max 1.94.1 apk download from apkmody
    -free fire max 1.94.1 apk download from apknite
    -free fire max 1.94.1 apk download by garena
    -free fire max 1.94.1 apk download by dts
    -free fire max 1.94.1 apk download by rexdl
    -free fire max 1.94.1 apk download by revdl
    -free fire max 1.94.1 apk download by androidoyun club
    -how to install free fire max 1.94..apk on android device?
    -how to update free fire max to version .apk on android device?
    -how to play free fire max .apk on pc using emulator?
    -how to fix error while downloading or installing .apk file of free fire?
    -how to get unlimited diamonds in .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of .apk file of

    -

    Steps

    -

    Once you have checked your device specifications, you can follow these steps to download and install Free Fire MAX 1.94.1 APK on your Android device:

    -
      -
    1. Go to a trusted and reliable source to download the APK file. You can use this link to download the latest version of Free Fire MAX from APKPure, one of the most popular and safe websites for downloading APK files.
    2. -
    3. After downloading the APK file, locate it in your device's file manager and tap on it to start the installation process. You may need to enable the "Unknown sources" option in your device's settings to allow the installation of apps from sources other than the Google Play Store.
    4. -
    5. Follow the on-screen instructions to complete the installation process. You may need to grant some permissions to the app to access your device's features and data.
    6. -
    7. Once the installation is done, you can launch the app and log in with your existing Free Fire account or create a new one if you don't have one.
    8. -
    9. Enjoy playing Free Fire MAX on your Android device!
    10. -
    -

    Tips and Tricks

    -

    To make sure that you have a smooth and enjoyable gaming experience with Free Fire MAX, here are some tips and tricks that you can use:

    -
      -
    • Make sure that you have a stable and fast internet connection while playing the game. You can use a Wi-Fi network or a mobile data plan, but avoid using public or shared networks that may have low speed or high latency.
    • -
    • Close any background apps or processes that may consume your device's memory or battery while playing the game. You can also use a game booster app to optimize your device's performance and reduce lag or stuttering.
    • -
    • Adjust your graphics settings according to your device's capability and your personal preference. You can choose from low, medium, high, or ultra settings, depending on how much detail and quality you want to see in the game. You can also enable or disable some features such as shadows, anti-aliasing, bloom, etc.
    • -
    • If you encounter any problems or errors while downloading, installing, or playing the game, you can try some common solutions such as clearing the app's cache and data, reinstalling the app, updating your device's software, etc. You can also contact Garena's customer support team for further assistance.
    • -
    -

    What's New in Free Fire MAX 1.94.1?

    -

    The latest version of Free Fire MAX (1.94.1) was released on June 9, 2023, and it brings some new features, changes, and improvements to the game. Here are some of the highlights of this update:

    -
      -
    • A new map called Bermuda Remastered has been added to the game. This map is a revamped version of the classic Bermuda map, with more details, locations, and surprises. You can explore places such as Academy, Aden's Creek, Fisherman Creek, Nurek Dam, etc.
    • -
    • A new mode called Clash Squad Ranked Season 7 has been introduced to the game. This mode is a competitive mode where you can team up with other players and fight against another team in a best-of-seven series of rounds. You can earn points and rank up by winning matches and completing missions.
    • -
    • A new character called Maro has been added to the game. Maro is a falconer who loves nature and animals. His special skill is called Falcon Fervor, which increases his damage over distance and against marked enemies.
    • -
    • A new weapon called Kord has been added to the game. Kord is a light machine gun that has a high rate of fire and a large magazine capacity. It also has a special mode called Machine Gun Mode, which increases its damage and accuracy when prone or crouching.
    • -
    • A new pet called Dr. Beanie has been added to the game. Dr. Beanie is a cute and smart hamster who wears a lab coat and glasses. His special skill is called Smooth Gloo, which reduces the cooldown of gloo walls by 20%.
    • -
    • A new feature called Dynamic Lighting has been added to the game. This feature enhances the lighting effects in the game, making it more realistic and immersive. You can see how the light changes according to the time of day, weather conditions, etc .
    • -
    -

    How to Play Free Fire MAX with Free Fire Players?

    -

    One of the best features of Free Fire MAX is that it allows you to play with other players who are using the original Free Fire version. This means that you can join your friends and squad up with them, regardless of which version of the game you are using. You can also participate in the same events, modes, and matches as the Free Fire players.

    -

    To play Free Fire MAX with Free Fire players, you need to use a feature called Firelink. Firelink is a feature that connects your Free Fire account with your Free Fire MAX account, and lets you switch between the two versions seamlessly. You can also use Firelink to sync your game data, settings, and preferences across both versions.

    -

    To use Firelink, you need to follow these steps:

    -
      -
    1. Open Free Fire MAX and tap on the Firelink icon on the top right corner of the screen.
    2. -
    3. Select the option to link your Free Fire account with your Free Fire MAX account. You can choose to link your account via Facebook, Google, VK, or Huawei.
    4. -
    5. Log in with your credentials and confirm the linking process.
    6. -
    7. Once your accounts are linked, you can see a green check mark on the Firelink icon. You can also see your Free Fire nickname and ID on the top left corner of the screen.
    8. -
    9. Now you can play Free Fire MAX with Free Fire players. You can invite them to your lobby, join their lobby, or match with them randomly. You can also see their version of the game on their profile.
    10. -
    -

    If you want to switch back to the original Free Fire version, you can tap on the Firelink icon again and select the option to switch versions. You can do this anytime without losing your progress or data.

    -

    Why You Should Play Free Fire MAX?

    -

    Advantages

    -

    There are many reasons why you should play Free Fire MAX, especially if you are looking for a more enhanced and immersive battle royale experience. Here are some of the advantages of playing Free Fire MAX:

    -
      -
    • You can enjoy a more realistic and stunning graphics quality, with Ultra HD resolutions and amazing effects. You can see every detail of the environment, the characters, and the weapons, making you feel like you are in a real battlefield.
    • -
    • You can customize your graphics settings according to your preference and device performance. You can choose from low, medium, high, or ultra settings, depending on how much detail and quality you want to see in the game. You can also enable or disable some features such as shadows, anti-aliasing, bloom, etc.
    • -
    • You can access some exclusive features and content that are not available in the original version, such as special lobby themes, emotes, skins, and more. You can also get some rewards and benefits for playing Free Fire MAX, such as coupons, diamonds, and crates.
    • -
    • You can play with other players who are using either version of the game, thanks to the Firelink feature. You can join your friends and squad up with them, regardless of which version of the game you are using. You can also participate in the same events, modes, and matches as the Free Fire players.
    • -
    -

    Disadvantages

    -

    However, playing Free Fire MAX also has some drawbacks that you should be aware of before downloading and installing the game. Here are some of the disadvantages of playing Free Fire MAX:

    -
      -
    • You need a higher device specification to run the game smoothly. The minimum requirement for Free Fire MAX is Android 4.4 or higher, 2 GB RAM or higher, dual core 1.2 GHz or higher CPU, and 1.5 GB storage or higher. If your device does not meet these requirements, you may not be able to download or install Free Fire MAX, or you may encounter some performance issues while playing the game.
    • -
    • You need a larger file size to download and install the game. The APK file size for Free Fire MAX is around 900 MB, which is much larger than the original Free Fire version (around 700 MB). You may need to free up some space on your device or use an external storage device to accommodate the game.
    • -
    • You may face some compatibility issues with some devices or regions. Some devices or regions may not support Free Fire MAX due to various reasons such as hardware limitations, software restrictions, network regulations , etc. You may need to check the compatibility of your device or region before downloading and installing Free Fire MAX, or you may need to use a VPN or other methods to bypass the restrictions.
    • -
    -

    Conclusion

    -

    Free Fire MAX is a great option for players who want to experience a more enhanced and immersive battle royale game. It offers a more realistic and stunning graphics quality, a more customizable graphics settings, and some exclusive features and content that are not available in the original version. It also allows you to play with other players who are using either version of the game, thanks to the Firelink feature.

    -

    However, Free Fire MAX also has some drawbacks that you should be aware of before downloading and installing the game. It requires a higher device specification, a larger file size, and a compatible device or region to run the game smoothly. You may also encounter some performance issues or errors while playing the game, which you can try to solve by following some tips and tricks.

    -

    If you are interested in trying Free Fire MAX, you can download and install the latest version (1.94.1) of the game on your Android device by following the steps we have provided in this article. You can also check out the latest updates, changes, and improvements in the game by reading this article.

    -

    We hope that this article has helped you learn everything you need to know about Free Fire MAX. If you have any questions or feedback, feel free to leave a comment below. Happy gaming!

    -

    FAQs

    -

    Here are some of the frequently asked questions and their answers about Free Fire MAX:

    -
      -
    1. Is Free Fire MAX free to play?
    2. -

      Yes, Free Fire MAX is free to play, just like the original Free Fire version. You don't have to pay anything to download or install the game, or to access its features and content. However, you can choose to purchase some in-game items or services with real money if you want to enhance your gaming experience.

      -
    3. Can I play Free Fire MAX on PC?
    4. -

      Yes, you can play Free Fire MAX on PC by using an Android emulator. An Android emulator is a software that allows you to run Android apps and games on your PC. You can use any Android emulator that supports Free Fire MAX, such as BlueStacks, LDPlayer, NoxPlayer, etc. You just need to download and install the emulator on your PC, then download and install Free Fire MAX on the emulator.

      -
    5. Can I transfer my data from Free Fire to Free Fire MAX?
    6. -

      Yes, you can transfer your data from Free Fire to Free Fire MAX by using the Firelink feature. Firelink is a feature that connects your Free Fire account with your Free Fire MAX account, and lets you sync your game data, settings, and preferences across both versions. You just need to link your accounts by logging in with the same credentials on both versions.

      -
    7. Can I play both versions of the game on the same device?
    8. -

      Yes, you can play both versions of the game on the same device, as long as your device meets the requirements for both versions. You can switch between the two versions by using the Firelink feature. However, you cannot run both versions at the same time on the same device, as it may cause some conflicts or errors.

      -
    9. How can I update Free Fire MAX?
    10. -

      You can update Free Fire MAX by downloading and installing the latest version of the APK file from a trusted source. You can use this link to download the latest version of Free Fire MAX from APKPure. You just need to follow the same steps as downloading and installing the game for the first time.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/fcakyon/zero-shot-video-classification/README.md b/spaces/fcakyon/zero-shot-video-classification/README.md deleted file mode 100644 index b7c5e4880a879007c0a56405ce3fb58fc4372608..0000000000000000000000000000000000000000 --- a/spaces/fcakyon/zero-shot-video-classification/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Zero Shot Video Classification -emoji: 🔥 -colorFrom: blue -colorTo: pink -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: true -license: apache-2.0 -tags: -- making-demos ---- \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/CarX Drift Racing 2 Money Mod APK Upgrade Your Cars and Customize Your Drifts.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/CarX Drift Racing 2 Money Mod APK Upgrade Your Cars and Customize Your Drifts.md deleted file mode 100644 index 85ea6cfab2582931dd9c507f02ae2a7efce3f4ce..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/CarX Drift Racing 2 Money Mod APK Upgrade Your Cars and Customize Your Drifts.md +++ /dev/null @@ -1,76 +0,0 @@ -
    -

    CarX Drift Racing 2 APK Money: How to Download and Play the Best Drifting Game on Android

    -

    If you are a fan of racing games, especially drifting games, you should not miss CarX Drift Racing 2. This is one of the most realistic and immersive drifting games on Android, with stunning graphics, physics, and sound effects. You can enjoy the thrill of drifting on various tracks, with different cars and customization options. You can also compete with other players online, or join a club and challenge other teams.

    -

    But what if you want to get unlimited money in CarX Drift Racing 2, so you can buy and upgrade any car you want, without spending real money? Well, there is a way to do that, by downloading the CarX Drift Racing 2 APK Money mod. In this article, we will show you what is CarX Drift Racing 2, how to download and install the APK Money mod, how to play the game, and some tips and tricks to help you become a drifting master.

    -

    carx drift racing 2 apk money


    Download Ziphttps://gohhs.com/2uPppV



    -

    What is CarX Drift Racing 2?

    -

    CarX Drift Racing 2 is a sequel to the popular CarX Drift Racing game, developed by CarX Technologies. It is a racing game that focuses on drifting, which is a driving technique where the driver intentionally oversteers the car, causing it to slide sideways. Drifting requires skill and precision, as well as a good sense of speed and timing.

    -

    Features of CarX Drift Racing 2

    -

    Some of the features that make CarX Drift Racing 2 stand out from other racing games are:

    -
      -
    • Realistic physics and car behavior, based on the CarX Engine technology.
    • -
    • High-quality graphics and sound effects, with dynamic day-night cycle and weather conditions.
    • -
    • Over 100 cars from different brands and categories, such as sports cars, muscle cars, supercars, etc.
    • -
    • Thousands of customization options, including paint, vinyls, wheels, tires, suspension, engine, etc.
    • -
    • A variety of tracks and locations, from city streets to desert roads.
    • -
    • Multiple game modes and events, such as solo mode, online mode, career mode, club mode, etc.
    • -
    • A leaderboard system and a rating system, where you can compete with other players and earn rewards.
    • -
    • A replay system and a photo mode, where you can watch your best drifts and share them with others.
    • -
    -

    How to download CarX Drift Racing 2 APK Money

    -

    If you want to get unlimited money in CarX Drift Racing 2, you need to download the APK Money mod. This is a modified version of the original game file, that gives you access to unlimited resources. Here are the steps to download and install the APK Money mod:

    -
      -
    1. Go to [this link](^1^) and download the APK file and the OBB file.
    2. -
    3. Go to your device settings and enable the installation of apps from unknown sources.
    4. -
    5. Go to your file manager and locate the downloaded files. Tap on the APK file and install it.
    6. -
    7. Copy the OBB file to the Android/OBB/com.carxtech.carxdr2 folder. If there is no such folder, create one.
    8. -
    9. Launch the game and enjoy unlimited money.
    10. -
    -

    How to play CarX Drift Racing 2

    -

    Now that you have downloaded and installed the APK Money mod, you can start playing CarX Drift Racing 2. Here are some basic steps to help you get started:

    -

    Choose your car and customize it

    -

    The first thing you need to do is to choose your car. You can browse through the garage and select from over 100 cars, each with different stats and performance. You can also use the money you have to buy new cars or upgrade your existing ones.

    -

    Once you have chosen your car, you can customize it to your liking. You can change the paint, vinyls, wheels, tires, suspension, engine, and more. You can also tune your car's settings, such as the steering angle, the brake force, the camber angle, etc. You can save your customizations as presets and switch between them easily.

    -

    Learn the basics of drifting

    -

    The next thing you need to do is to learn how to drift. Drifting is not easy, but it is very fun and rewarding. You need to master the balance between the throttle, the brake, the steering, and the handbrake. You also need to know when to initiate, maintain, and exit a drift.

    -

    [CarX Drift Racing 2 MOD APK 1.16.0 (Unlimited Money) Download]
    -[CarX Drift Racing 2 Mod Apk v1.16.0 (Unlimited Money) - ApkModPro]
    -[CarX Drift Racing 2 Mod APK 1.16.0 - Download CarX Drift Racing ...]
    -[CarX Drift Racing 2 MOD APK v1.16.0 (Unlimited Money) - APKMODY]
    -[CarX Drift Racing 2 Mod Apk v1.16.0 (Unlimited Money) - ApkPalace]

    -

    The game has a tutorial mode that teaches you the basics of drifting. You can also practice on different tracks and modes, such as training mode, freestyle mode, or time attack mode. You can also watch replays of other players or yourself, and learn from their mistakes or techniques.

    -

    Compete in different modes and events

    -

    The last thing you need to do is to compete in different modes and events. The game has a lot of options for you to challenge yourself and others. You can play in solo mode, where you can race against AI opponents or ghost cars. You can also play in online mode, where you can race against real players from around the world.

    -

    The game also has a career mode, where you can progress through different stages and levels, and earn rewards and achievements. You can also join a club or create your own club, and compete with other clubs in club wars or club seasons. You can also participate in special events and tournaments, where you can win exclusive prizes and cars.

    -

    Tips and tricks for CarX Drift Racing 2

    -

    To help you improve your drifting skills and enjoy the game more, here are some tips and tricks for CarX Drift Racing 2:

    -

    Adjust your settings and controls

    -

    One of the most important things to do is to adjust your settings and controls according to your preference and device. You can choose from different control schemes, such as tilt, buttons, or steering wheel. You can also adjust the sensitivity and feedback of each control option. You can also change the camera angle and the sound volume.

    -

    Use the handbrake and nitro wisely

    -

    Another important thing to do is to use the handbrake and nitro wisely. The handbrake is useful for initiating or extending a drift, but it also reduces your speed and stability. The nitro is useful for boosting your speed and power, but it also consumes your fuel and overheats your engine. You need to find the right balance between using them and saving them for the right moments.

    -

    Practice and improve your skills

    -

    The last important thing to do is to practice and improve your skills. Drifting is not something that you can master overnight. It takes time and effort to learn how to drift smoothly and consistently. You need to practice on different tracks and cars, and learn how they behave and react. You also need to improve your timing, accuracy, angle, speed, and style.

    -

    Conclusion

    -

    CarX Drift Racing 2 is a great game for anyone who loves racing and drifting. It has realistic physics and graphics, a huge variety of cars and tracks, a lot of game modes and events, and a lot of fun and excitement. If you want to get unlimited money in CarX Drift Racing 2, you can download the APK Money mod from [this link] and follow the instructions above.

    -

    We hope this article has helped you learn more about CarX Drift Racing 2 APK Money mod. If you have any questions or feedback, please let us know in the comments below. Happy drifting!

    -

    FAQs

    -
      -
    • Is CarX Drift Racing 2 APK Money mod safe?
    • -

      Yes, CarX Drift Racing 2 APK Money mod is safe to use, as long as you download it from a trusted source and follow the installation steps correctly. However, you should be aware that using the mod may affect your game performance or compatibility, and may violate the game's terms of service. Use it at your own risk.

      -
    • How can I get more money in CarX Drift Racing 2 without using the mod?
    • -

      If you don't want to use the APK Money mod, you can still get more money in CarX Drift Racing 2 by playing the game regularly and completing various tasks and challenges. You can also watch ads or make in-app purchases to get more money.

      -
    • What are the best cars for drifting in CarX Drift Racing 2?
    • -

      The best cars for drifting in CarX Drift Racing 2 depend on your personal preference and style. However, some of the most popular and recommended cars for drifting are:

      -
        -
      • CarX E30 - A classic BMW model with good handling and balance.
      • -
      • CarX RX8 - A sporty Mazda model with high power and speed.
      • -
      • CarX S15 - A sleek Nissan model with great acceleration and stability.
      • -
      • CarX Mustang - A powerful Ford model with a lot of torque and drift potential.
      • -
      • CarX Supra - A legendary Toyota model with a high-performance engine and design.
      • -
      -
    • How can I join or create a club in CarX Drift Racing 2?
    • -

      To join or create a club in CarX Drift Racing 2, you need to go to the club menu and tap on the join or create button. You can search for existing clubs by name, rating, or region, or you can create your own club by choosing a name, a logo, and a description. You can also invite other players to join your club or accept invitations from other clubs.

      -
    • How can I share my replays or photos in CarX Drift Racing 2?
    • -

      To share your replays or photos in CarX Drift Racing 2, you need to go to the replay or photo mode and tap on the share button. You can choose from different options, such as saving to your device, uploading to YouTube, or sharing on social media. You can also edit your replays or photos before sharing them.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fffiloni/AnimateDiff-Image-Init/download_bashscripts/0-MotionModule.sh b/spaces/fffiloni/AnimateDiff-Image-Init/download_bashscripts/0-MotionModule.sh deleted file mode 100644 index 8e2007ee6210f45e6f904ccecaad66eeff5e59ec..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/AnimateDiff-Image-Init/download_bashscripts/0-MotionModule.sh +++ /dev/null @@ -1,2 +0,0 @@ -gdown 1RqkQuGPaCO5sGZ6V6KZ-jUWmsRu48Kdq -O models/Motion_Module/ -gdown 1ql0g_Ys4UCz2RnokYlBjyOYPbttbIpbu -O models/Motion_Module/ \ No newline at end of file diff --git a/spaces/fffiloni/ControlVideo/inference.sh b/spaces/fffiloni/ControlVideo/inference.sh deleted file mode 100644 index 2ff056ed060222067cdbc762dc087f71214b27fa..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/ControlVideo/inference.sh +++ /dev/null @@ -1,10 +0,0 @@ -python inference.py \ - --prompt "A striking mallard floats effortlessly on the sparkling pond." \ - --condition "depth" \ - --video_path "data/mallard-water.mp4" \ - --output_path "outputs/" \ - --video_length 15 \ - --smoother_steps 19 20 \ - --width 512 \ - --height 512 \ - # --is_long_video \ No newline at end of file diff --git a/spaces/fffiloni/stable-diffusion-color-sketch/app.py b/spaces/fffiloni/stable-diffusion-color-sketch/app.py deleted file mode 100644 index fe301a5863167c1398966294634f7ff48de0d888..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/stable-diffusion-color-sketch/app.py +++ /dev/null @@ -1,61 +0,0 @@ -import gradio as gr -#import torch -#from torch import autocast // only for GPU - -from PIL import Image -import numpy as np -from io import BytesIO -import os -MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD') - -#from diffusers import StableDiffusionPipeline -from diffusers import StableDiffusionImg2ImgPipeline - -print("hello sylvain") - -YOUR_TOKEN=MY_SECRET_TOKEN - -device="cpu" - -#prompt_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=YOUR_TOKEN) -#prompt_pipe.to(device) - -img_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=YOUR_TOKEN) -img_pipe.to(device) - -source_img = gr.Image(source="canvas", type="filepath", tool='color-sketch', label="new gradio color sketch") - -gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto") - -def resize(value,img): - #baseheight = value - img = Image.open(img) - #hpercent = (baseheight/float(img.size[1])) - #wsize = int((float(img.size[0])*float(hpercent))) - #img = img.resize((wsize,baseheight), Image.Resampling.LANCZOS) - img = img.resize((value,value), Image.Resampling.LANCZOS) - return img - - -def infer(source_img, prompt): - - source_image = resize(512, source_img) - source_image.save('source.png') - images_list = img_pipe([prompt] * 2, init_image=source_image, strength=0.75) - images = [] - safe_image = Image.open(r"unsafe.png") - for i, image in enumerate(images_list["sample"]): - if(images_list["nsfw_content_detected"][i]): - images.append(safe_image) - else: - images.append(image) - return images - -print("Great sylvain ! Everything is working fine !") - -title="Paint Stable Diffusion CPU" -description="Img-2-Img Stable Diffusion example using CPU and the beta color-sketch gradio tool.
      Warning: Slow process... ~5/10 min inference time. NSFW filter enabled." -custom_css = "style.css" - -gr.Interface(fn=infer, inputs=[source_img, "text"], outputs=gallery,title=title,description=description,css=custom_css).queue(max_size=100).launch(enable_queue=True) - diff --git a/spaces/fracapuano/AISandbox/mailing/mailing.py b/spaces/fracapuano/AISandbox/mailing/mailing.py deleted file mode 100644 index a40bb2a4f135e386270f48aaa8b5f8eb65b718c1..0000000000000000000000000000000000000000 --- a/spaces/fracapuano/AISandbox/mailing/mailing.py +++ /dev/null @@ -1,32 +0,0 @@ -import smtplib -from email.mime.text import MIMEText -from email.mime.multipart import MIMEMultipart -from typing import Text, Union, Iterable - - -def mailing_main(subject:Text, body:Text, to_address:Union[Text, Iterable[Text]]): - """Sends the email with the given subject and body to the given address (accepts also list of addresses).""" - # Mailing server configuration - smtp_server = 'smtp.gmail.com.' - smtp_port = 587 - sender_email = 'bainhackathon@gmail.com' - sender_password = 'onyghfffdbmurjdf' - - # This creates the actual email message - msg = MIMEMultipart() - msg['From'] = sender_email - msg['To'] = to_address - msg['Subject'] = subject - msg.attach(MIMEText(body, 'plain')) - - # Connects to SMTP server and then sends the actual email - try: - server = smtplib.SMTP(smtp_server, smtp_port) - server.starttls() - server.login(sender_email, sender_password) - server.sendmail(sender_email, to_address, msg.as_string()) - server.quit() - print("Email sent successfully!") - except Exception as e: - print("Error sending email:", e) - diff --git a/spaces/gagan3012/T5-Summarization/Makefile b/spaces/gagan3012/T5-Summarization/Makefile deleted file mode 100644 index e8352210d475daa7a1dfff65e41bc84f05cd3a28..0000000000000000000000000000000000000000 --- a/spaces/gagan3012/T5-Summarization/Makefile +++ /dev/null @@ -1,95 +0,0 @@ -.PHONY: clean dirs virtualenv lint requirements push pull run - -################################################################################# -# GLOBALS # -################################################################################# - -PROJECT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) -PYTHON_INTERPRETER = python - -################################################################################# -# COMMANDS # -################################################################################# - -## Create virtualenv. -## Activate with the command: -## source env/bin/activate -virtualenv: - virtualenv -p $(PYTHON_INTERPRETER) env - -## Install Python Dependencies. -## Make sure you activate the virtualenv first! -requirements: - $(PYTHON_INTERPRETER) -m pip install -U pip setuptools wheel - $(PYTHON_INTERPRETER) -m pip install -r requirements.txt - -## Create directories that are ignored by git but required for the project -dirs: - mkdir -p data/raw data/processed models - -## Delete all compiled Python files -clean: - find . -type f -name "*.py[co]" -delete - find . -type d -name "__pycache__" -delete - -## Lint using flake8 -lint: - flake8 src - -## Upload Data to default DVC remote -push: - dvc push -r origin - - -## Download Data from default DVC remote -pull: - dvc pull - -## run the DVC pipeline - recompute any modified outputs such as processed data or trained models -run: - dvc repro dvc.yaml - -################################################################################# -# PROJECT RULES # -################################################################################# - - - -################################################################################# -# Self Documenting Commands # -################################################################################# - -.DEFAULT_GOAL := help - -# Inspired by -# sed script explained: -# /^##/: -# * save line in hold space -# * purge line -# * Loop: -# * append newline + line to hold space -# * go to next line -# * if line starts with doc comment, strip comment character off and loop -# * remove target prerequisites -# * append hold space (+ newline) to line -# * replace newline plus comments by `---` -# * print line -# Separate expressions are necessary because labels cannot be delimited by -# semicolon; see -.PHONY: help -help: - @echo "$$(tput bold)Available rules:$$(tput sgr0)" - @echo - @sed -n -e "/^## / Missing" $Missing \ - | LC_ALL='C' sort --ignore-case \ - | awk -F '---' \ - -v ncol=$$(tput cols) \ - -v indent=19 \ - -v col_on="$$(tput setaf 6)" \ - -v col_off="$$(tput sgr0)" \ - 'Missing \ - printf "%s ", words[i]; \ - } \ - printf "\n"; \ - }' \ - | more $(shell test $(shell uname) = Darwin && echo '--no-init --raw-control-chars') diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/contour_expand.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/contour_expand.py deleted file mode 100644 index ea1111e1768b5f27e118bf7dbc0d9c70a7afd6d7..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/contour_expand.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', ['contour_expand']) - - -def contour_expand(kernel_mask, internal_kernel_label, min_kernel_area, - kernel_num): - """Expand kernel contours so that foreground pixels are assigned into - instances. - - Arguments: - kernel_mask (np.array or Tensor): The instance kernel mask with - size hxw. - internal_kernel_label (np.array or Tensor): The instance internal - kernel label with size hxw. - min_kernel_area (int): The minimum kernel area. - kernel_num (int): The instance kernel number. - - Returns: - label (list): The instance index map with size hxw. - """ - assert isinstance(kernel_mask, (torch.Tensor, np.ndarray)) - assert isinstance(internal_kernel_label, (torch.Tensor, np.ndarray)) - assert isinstance(min_kernel_area, int) - assert isinstance(kernel_num, int) - - if isinstance(kernel_mask, np.ndarray): - kernel_mask = torch.from_numpy(kernel_mask) - if isinstance(internal_kernel_label, np.ndarray): - internal_kernel_label = torch.from_numpy(internal_kernel_label) - - if torch.__version__ == 'parrots': - if kernel_mask.shape[0] == 0 or internal_kernel_label.shape[0] == 0: - label = [] - else: - label = ext_module.contour_expand( - kernel_mask, - internal_kernel_label, - min_kernel_area=min_kernel_area, - kernel_num=kernel_num) - label = label.tolist() - else: - label = ext_module.contour_expand(kernel_mask, internal_kernel_label, - min_kernel_area, kernel_num) - return label diff --git a/spaces/giswqs/Streamlit/apps/xy.py b/spaces/giswqs/Streamlit/apps/xy.py deleted file mode 100644 index 1ca2cef25553a8b1d52c19db0aac6c3ca37a6858..0000000000000000000000000000000000000000 --- a/spaces/giswqs/Streamlit/apps/xy.py +++ /dev/null @@ -1,65 +0,0 @@ -import leafmap.foliumap as leafmap -import pandas as pd -import streamlit as st - - -def app(): - - st.title("Add Points from XY") - - sample_url = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/world_cities.csv" - url = st.text_input("Enter URL:", sample_url) - m = leafmap.Map(locate_control=True, plugin_LatLngPopup=False) - - if url: - - try: - df = pd.read_csv(url) - - columns = df.columns.values.tolist() - row1_col1, row1_col2, row1_col3, row1_col4, row1_col5 = st.columns( - [1, 1, 3, 1, 1] - ) - - lon_index = 0 - lat_index = 0 - - for col in columns: - if col.lower() in ["lon", "longitude", "long", "lng"]: - lon_index = columns.index(col) - elif col.lower() in ["lat", "latitude"]: - lat_index = columns.index(col) - - with row1_col1: - x = st.selectbox("Select longitude column", columns, lon_index) - - with row1_col2: - y = st.selectbox("Select latitude column", columns, lat_index) - - with row1_col3: - popups = st.multiselect("Select popup columns", columns, columns) - - with row1_col4: - heatmap = st.checkbox("Add heatmap") - - if heatmap: - with row1_col5: - if "pop_max" in columns: - index = columns.index("pop_max") - else: - index = 0 - heatmap_col = st.selectbox("Select heatmap column", columns, index) - try: - m.add_heatmap(df, y, x, heatmap_col) - except: - st.error("Please select a numeric column") - - try: - m.add_points_from_xy(df, x, y, popups) - except: - st.error("Please select a numeric column") - - except Exception as e: - st.error(e) - - m.to_streamlit() diff --git a/spaces/gotiQspiryo/whisper-ui/examples/BAHAN AJAR - Universitas Udayana[2].md b/spaces/gotiQspiryo/whisper-ui/examples/BAHAN AJAR - Universitas Udayana[2].md deleted file mode 100644 index 67e2b8fdeafaa7e2dbba855732bbf718893c07ca..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/BAHAN AJAR - Universitas Udayana[2].md +++ /dev/null @@ -1,8 +0,0 @@ -
      -

      Bismillah sya mau bertnya ya
      *Mohon bantuannya*
      1. Pada kondisi yang bagaimana anda menggunakan rancangan acak lengkap dalam suatu penelitian *
      2. Pada kondisi yang bagaimana anda menggunakan rancangan acak kelompok dalam suatu penelian
      3. Pada kondisi yang bagaimana anda menggunakan rancangan faktorial dalam suatu penelitian?
      4. Berikan gambaran secara lengkap dan jelas (dijelaskan juga perlakuan, pengacakan dan kondisi/lokasi penelitiannya) suatu penelitian yang menggunakan rancangan acak lengkap? *
      5. Berikan gambaran secara lengkap dan jelas (dijelaskan juga perlakuan, pengacakan dan kondisi/lokasi penelitiannya) suatu penelitian yang menggunakan rancangan acak kelompok? *

      -

      Misalnya, sebagai contoh sederhana dari penelitian eksperimental, peneliti ingin mengetahui efektivitas penggunaan sistem alarm rokok di ruang publik untuk mengurangi konsumsi rokok di tempat umum. Beberapa ruang publik dibangun menjadi rokok, yang lain dengan fitur yang sama tidak dilengkapi dengan alarm rokok.

      -

      contoh soal rancangan acak lengkap pdf to jpg


      Download ✸✸✸ https://urlgoal.com/2uyNCg



      -

      1Inventarisasi Hutan

      • Petak Ukur
      • Inventore Hutan dengan sampling revisi
      • Invent Kuliah II
      • Sampling
      • Statistik Untuk Kehutanan
      2Analisis Keanekaragaman Flora dan Fauna
      • Laporan pengamatan burung metode IPA
      • Perhitungan Metode IPA
      • Proyeksi Metode IPA
      3Penyuluhan Kehutanan
      • Bab I
      • Bab II
      • Bab III
      • Bab IV
      • Bab V
      • Mengukur Partisipasi
      • Metode Penyuluhan Kehutanan
      • Proses persiapan penyuluhan
      4Pemanenan Hasil hutan
      • Muat Bongkar
      • Penyaradan
      • Penebangan
      • Pembagian batang
      • Perencanaan pembukaan wilayah hutan
      • Sistem pemanenan kayu di hutan rawa tropika Indonesia
      • Road contructions
      • RIL
      5Perlindungan Hutan
      • Ilmu hama hutan
      • Pengendalian hama
      • Penyakit hutan
      • Gangguan hutan dari faktor abiotik
      6Pengantar Konservasi Sumber Daya Hutan
      • Pengeertian dan sejarah konservasi
      • Konservasi pada tingkat spesies dan populasi
      • Bentuk kawasan dilindungi
      • HCFC
      • Overview
      7Hutan Kota
      • Hutan Kota 1
      • Hutan Kota 2
      • Hutan Kota 3
      • Hutan Kota 4
      • Hutan kota 5
      • Outline
      • PP 63 th 2002 tentang Hutan kota
      • tujuan pembangunan kota
      • Urban Forest Planning
      8Rancangan Percobaan
      • Contoh soal latihan rancob
      • Percobaan dua faktor
      • RAK
      • Rancangan acak kelompok
      • Rancangan acak lengkap
      • Rancob All dosen revisi
      9Perencanaan Hutan
      • Kuliah 1 Pendahuluan
      • Kuliah 2 Unsur-unsur dan jenis perencanaan
      • Kuliah 3 Perencanaan pengelolaan hutan berbasis ekosistem
      • Kuliah 4 perencanaan Partisipatif
      • Penyelesaian masalah Multikriteria Kehutanan dengan software
      10Biometrika Hutan
      • Ilmu ukur kayu
      • Penentuan volume sortimen
      • Pengukuran dimensi tegakan
      • Tabel tegakan
      11Pengelolaan Daerah Aliran Sungai
      • Kuliah Das 1
      • Kuliah Das 2
      • Kuliah Das 3
      • Kuliah Das 4
      • Pengaruh perlakuan hutan terhadap kondisi hidrologi
      12Klimatologi Pertanian
      • Ruang lingkup iklim
      • Siklus udara
      • Energi sumber penggerak iklim
      • Suhu udara
      • Evapotranspirasi dan Kelembaban
      • Neraca radiasi
      • Iklim tropis Indonesia, Sumatra, lampung
      • Proses pembentukan awan dan hujan
      • Iklim Global
      • Kelembaban udara
      13Agroforestry
      • Bahan Ajar Agroforestry 1
      • Bahan Ajar Agroforestry 2
      • Bahan Ajar Agroforestry 3
      14Bioteknologi Kehutanan
      • Definisi dan sejarah
      • Bioremediasi
      • Mikoriza
      • Endomikoriza
      • Pemanfaatan mikroorganisme
      • SNI
      15Dasar-dasar Pengelolaan Kayu
      • Kayu lapis
      • Lect11 Particleboard
      • Lect12 Fiberboard
      • Lect13 Pulp and Paper
      16Pengelolaan Hutan Rakyat
      • Acara 1
      • Acara 2
      • Acara 3
      • Acara 4
      • Keanekaragaman jenis burung air
      • Efektivitas penyerapan debu oleh daun
      18Teknik Sampling
      • Kuliah 1
      • Cluster Sampling
      • Double Sampling
      • Sampling Bertingkat
      • Sistematik Sampling
      • Statified Ramdom Sampling
      19Sistem informasi Geografi
      • Kuliah 1
      • Pembangunan berkelanjutan
      20Fisika DasarBab 1 Penyusunan dan Penguraian VektorÂ

      -

      Penelitian ini bertujuan untuk mengetahui pengaruh pembedaan kualitas konsentrat terhadap pertumbuhan ukuran-ukuran tubuh, bobot badan dan konsumsi pakan pedet FH betina lepas sapih. Penelitian ini menggunakan 12 ekor pedet FH betina lepas sapih, bobot badan rata-rata 84,25 ± 15,16 kg (CV= 18,62%), rata-rata tinggi pundak awal 90,56 ± 4,86 cm (CV= 5,73%), rata-rata panjang badan awal 75,15 ± 5,43 cm (CV= 2,23%), rata-rata lingkar dada awal 104,98 ± 5,87 cm (CV= 5,59%), rata-rata panjang tulang carpus awal 3,30 ± 0,61 cm (CV= 18,48%), rata-rata tulang metacarpus awal 15,79 ± 1,91 cm (CV= 7,45%). Pakan
      penelitian yang digunakan adalah rumput gajah dan formulasi konsentrat I dan formulasi konsentrat II dengan perbedaan protein kasar (PK) dan total digestibel nutrien (TDN). Parameter yang diukur antara lain pertambahan bobot badan dan ukuran-ukuran tubuh meliputi lingkar dada (LD), panjang badan (PB), tinggi pundak (TP), panjang tulang carpus dan metacarpus, serta konsumsi pakan. Penganbilan sampel dilakukan secara purposive sampling berdasarkan umur pedet yaitu rata-rata 3,5 bulan. Rancangan percobaan menggunakan rancangan acak lengkap (RAL) dengan dua perlakuan dan masing-masing diulang enam kali. Semua data dianalisa secara statistik menggunakan uji t dengan peluang kesalahan 5%. Simpulan penelitian ini adalah bahwa formulasi konsentrat I dan formulasi konsentrat II dapat menjadi pakan bagi pedet FH betina lepas sapih dan layak untuk dijadikan calon induk pengganti.

      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Download Jungle Ki Sherni full movie in hindi in 3gp The story of a tigress of the forests and her tribal allies.md b/spaces/gotiQspiryo/whisper-ui/examples/Download Jungle Ki Sherni full movie in hindi in 3gp The story of a tigress of the forests and her tribal allies.md deleted file mode 100644 index f39baf66ba2a93d608507f4fb38c847f5c53906d..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Download Jungle Ki Sherni full movie in hindi in 3gp The story of a tigress of the forests and her tribal allies.md +++ /dev/null @@ -1,6 +0,0 @@ - -

      Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi Photos
      Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi XXX Videos
      Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi HD Videos
      Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi Indian Videos
      Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi MP4 Videos
      Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi Indian Images
      Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi Leaked Videos
      Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi Leaked Pics
      Search www jungle ki sherni hot sexy 3gp video download comatrena videosd magi XXX Posts

      -

      wapbold.com - is a free online porn tube portal, where can watch and dowload many free porn movies and porn videos, which is daily updated. So watch and download your favourite mobile porn here, at our wapbold porn site and don`t forget to bookmark us! See you at wapbold.com ;)

      -

      download Jungle Ki Sherni full movie in hindi in 3gp


      Download Filehttps://urlgoal.com/2uyN4P



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/gulabpatel/GFP_GAN/PaperModel.md b/spaces/gulabpatel/GFP_GAN/PaperModel.md deleted file mode 100644 index aec81d31de56df74c19ae840d44ad2b2a1f06d28..0000000000000000000000000000000000000000 --- a/spaces/gulabpatel/GFP_GAN/PaperModel.md +++ /dev/null @@ -1,76 +0,0 @@ -# Installation - -We now provide a *clean* version of GFPGAN, which does not require customized CUDA extensions. See [here](README.md#installation) for this easier installation.
      -If you want want to use the original model in our paper, please follow the instructions below. - -1. Clone repo - - ```bash - git clone https://github.com/xinntao/GFPGAN.git - cd GFPGAN - ``` - -1. Install dependent packages - - As StyleGAN2 uses customized PyTorch C++ extensions, you need to **compile them during installation** or **load them just-in-time(JIT)**. - You can refer to [BasicSR-INSTALL.md](https://github.com/xinntao/BasicSR/blob/master/INSTALL.md) for more details. - - **Option 1: Load extensions just-in-time(JIT)** (For those just want to do simple inferences, may have less issues) - - ```bash - # Install basicsr - https://github.com/xinntao/BasicSR - # We use BasicSR for both training and inference - pip install basicsr - - # Install facexlib - https://github.com/xinntao/facexlib - # We use face detection and face restoration helper in the facexlib package - pip install facexlib - - pip install -r requirements.txt - python setup.py develop - - # remember to set BASICSR_JIT=True before your running commands - ``` - - **Option 2: Compile extensions during installation** (For those need to train/inference for many times) - - ```bash - # Install basicsr - https://github.com/xinntao/BasicSR - # We use BasicSR for both training and inference - # Set BASICSR_EXT=True to compile the cuda extensions in the BasicSR - It may take several minutes to compile, please be patient - # Add -vvv for detailed log prints - BASICSR_EXT=True pip install basicsr -vvv - - # Install facexlib - https://github.com/xinntao/facexlib - # We use face detection and face restoration helper in the facexlib package - pip install facexlib - - pip install -r requirements.txt - python setup.py develop - ``` - -## :zap: Quick Inference - -Download pre-trained models: [GFPGANv1.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth) - -```bash -wget https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth -P experiments/pretrained_models -``` - -- Option 1: Load extensions just-in-time(JIT) - - ```bash - BASICSR_JIT=True python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/whole_imgs --save_root results --arch original --channel 1 - - # for aligned images - BASICSR_JIT=True python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/cropped_faces --save_root results --arch original --channel 1 --aligned - ``` - -- Option 2: Have successfully compiled extensions during installation - - ```bash - python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/whole_imgs --save_root results --arch original --channel 1 - - # for aligned images - python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/cropped_faces --save_root results --arch original --channel 1 --aligned - ``` diff --git a/spaces/gundruke/ua-thesis-absa/models/layers/__init__.py b/spaces/gundruke/ua-thesis-absa/models/layers/__init__.py deleted file mode 100644 index 2ee872937f8b5718aaf0faacb6f22e024ec55a87..0000000000000000000000000000000000000000 --- a/spaces/gundruke/ua-thesis-absa/models/layers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .CRF import CRF diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/structures/image_list.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/structures/image_list.py deleted file mode 100644 index 2d89224b64402badf7f0b113188b5f653df912ac..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/structures/image_list.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -from __future__ import division -from typing import Any, List, Sequence, Tuple, Union -import torch -from torch.nn import functional as F - - -class ImageList(object): - """ - Structure that holds a list of images (of possibly - varying sizes) as a single tensor. - This works by padding the images to the same size, - and storing in a field the original sizes of each image - - Attributes: - image_sizes (list[tuple[int, int]]): each tuple is (h, w) - """ - - def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]): - """ - Arguments: - tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1 - image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can - be smaller than (H, W) due to padding. - """ - self.tensor = tensor - self.image_sizes = image_sizes - - def __len__(self) -> int: - return len(self.image_sizes) - - def __getitem__(self, idx: Union[int, slice]) -> torch.Tensor: - """ - Access the individual image in its original size. - - Returns: - Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1 - """ - size = self.image_sizes[idx] - return self.tensor[idx, ..., : size[0], : size[1]] # type: ignore - - def to(self, *args: Any, **kwargs: Any) -> "ImageList": - cast_tensor = self.tensor.to(*args, **kwargs) - return ImageList(cast_tensor, self.image_sizes) - - @property - def device(self) -> torch.device: - return self.tensor.device - - @staticmethod - def from_tensors( - tensors: Sequence[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0 - ) -> "ImageList": - """ - Args: - tensors: a tuple or list of `torch.Tensors`, each of shape (Hi, Wi) or - (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded - to the same shape with `pad_value`. - size_divisibility (int): If `size_divisibility > 0`, add padding to ensure - the common height and width is divisible by `size_divisibility`. - This depends on the model and many models need a divisibility of 32. - pad_value (float): value to pad - - Returns: - an `ImageList`. - """ - assert len(tensors) > 0 - assert isinstance(tensors, (tuple, list)) - for t in tensors: - assert isinstance(t, torch.Tensor), type(t) - assert t.shape[1:-2] == tensors[0].shape[1:-2], t.shape - # per dimension maximum (H, W) or (C_1, ..., C_K, H, W) where K >= 1 among all tensors - max_size = ( - # In tracing mode, x.shape[i] is Tensor, and should not be converted - # to int: this will cause the traced graph to have hard-coded shapes. - # Instead we should make max_size a Tensor that depends on these tensors. - # Using torch.stack twice seems to be the best way to convert - # list[list[ScalarTensor]] to a Tensor - torch.stack( - [ - torch.stack([torch.as_tensor(dim) for dim in size]) - for size in [tuple(img.shape) for img in tensors] - ] - ) - .max(0) - .values - ) - - if size_divisibility > 0: - stride = size_divisibility - # the last two dims are H,W, both subject to divisibility requirement - max_size = torch.cat([max_size[:-2], (max_size[-2:] + (stride - 1)) // stride * stride]) - - image_sizes = [tuple(im.shape[-2:]) for im in tensors] - - if len(tensors) == 1: - # This seems slightly (2%) faster. - # TODO: check whether it's faster for multiple images as well - image_size = image_sizes[0] - padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]] - if all(x == 0 for x in padding_size): # https://github.com/pytorch/pytorch/issues/31734 - batched_imgs = tensors[0].unsqueeze(0) - else: - padded = F.pad(tensors[0], padding_size, value=pad_value) - batched_imgs = padded.unsqueeze_(0) - else: - # max_size can be a tensor in tracing mode, therefore use tuple() - batch_shape = (len(tensors),) + tuple(max_size) - batched_imgs = tensors[0].new_full(batch_shape, pad_value) - for img, pad_img in zip(tensors, batched_imgs): - pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img) - - return ImageList(batched_imgs.contiguous(), image_sizes) diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tools/deploy/README.md b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tools/deploy/README.md deleted file mode 100644 index b9d5b15512c0bd160accbb1823236b8954a37b86..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tools/deploy/README.md +++ /dev/null @@ -1,9 +0,0 @@ - -This directory contains: - -1. A script that converts a detectron2 model to caffe2 format. - -2. An example that loads a Mask R-CNN model in caffe2 format and runs inference. - -See [tutorial](https://detectron2.readthedocs.io/tutorials/deployment.html) -for their usage. diff --git a/spaces/hf-audio/open_asr_leaderboard/init.py b/spaces/hf-audio/open_asr_leaderboard/init.py deleted file mode 100644 index 2e24a0345a30dd3eb1a79616f81a65efa36a1956..0000000000000000000000000000000000000000 --- a/spaces/hf-audio/open_asr_leaderboard/init.py +++ /dev/null @@ -1,93 +0,0 @@ -import os -from constants import EVAL_REQUESTS_PATH -from pathlib import Path -from huggingface_hub import HfApi, Repository - -TOKEN_HUB = os.environ.get("TOKEN_HUB", None) -QUEUE_REPO = os.environ.get("QUEUE_REPO", None) -QUEUE_PATH = os.environ.get("QUEUE_PATH", None) - -hf_api = HfApi( - endpoint="https://huggingface.co", - token=TOKEN_HUB, -) - -def load_all_info_from_dataset_hub(): - eval_queue_repo = None - results_csv_path = None - requested_models = None - - passed = True - if TOKEN_HUB is None: - passed = False - else: - print("Pulling evaluation requests and results.") - - eval_queue_repo = Repository( - local_dir=QUEUE_PATH, - clone_from=QUEUE_REPO, - use_auth_token=TOKEN_HUB, - repo_type="dataset", - ) - eval_queue_repo.git_pull() - - # Local directory where dataset repo is cloned + folder with eval requests - directory = QUEUE_PATH / EVAL_REQUESTS_PATH - requested_models = get_all_requested_models(directory) - requested_models = [p.stem for p in requested_models] - # Local directory where dataset repo is cloned - csv_results = get_csv_with_results(QUEUE_PATH) - if csv_results is None: - passed = False - if not passed: - print("No HuggingFace token provided. Skipping evaluation requests and results.") - - return eval_queue_repo, requested_models, csv_results - - -def upload_file(requested_model_name, path_or_fileobj): - dest_repo_file = Path(EVAL_REQUESTS_PATH) / path_or_fileobj.name - dest_repo_file = str(dest_repo_file) - hf_api.upload_file( - path_or_fileobj=path_or_fileobj, - path_in_repo=str(dest_repo_file), - repo_id=QUEUE_REPO, - token=TOKEN_HUB, - repo_type="dataset", - commit_message=f"Add {requested_model_name} to eval queue") - -def get_all_requested_models(directory): - directory = Path(directory) - all_requested_models = list(directory.glob("*.txt")) - return all_requested_models - -def get_csv_with_results(directory): - directory = Path(directory) - all_csv_files = list(directory.glob("*.csv")) - latest = [f for f in all_csv_files if f.stem.endswith("latest")] - if len(latest) != 1: - return None - return latest[0] - - - -def is_model_on_hub(model_name, revision="main") -> bool: - try: - model_name = model_name.replace(" ","") - author = model_name.split("/")[0] - model_id = model_name.split("/")[1] - if len(author) == 0 or len(model_id) == 0: - return False, "is not a valid model name. Please use the format `author/model_name`." - except Exception as e: - return False, "is not a valid model name. Please use the format `author/model_name`." - - try: - models = list(hf_api.list_models(author=author, search=model_id)) - matched = [model_name for m in models if m.modelId == model_name] - if len(matched) != 1: - return False, "was not found on the hub!" - else: - return True, None - except Exception as e: - print(f"Could not get the model from the hub.: {e}") - return False, "was not found on hub!" \ No newline at end of file diff --git a/spaces/hhalim/hadi_first_day_in_HF/README.md b/spaces/hhalim/hadi_first_day_in_HF/README.md deleted file mode 100644 index f71c508603356006ad9676a0602d35969180425e..0000000000000000000000000000000000000000 --- a/spaces/hhalim/hadi_first_day_in_HF/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Hadi First Day In HF -emoji: 🐠 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hra/Curriculum-BabyAGI/app.py b/spaces/hra/Curriculum-BabyAGI/app.py deleted file mode 100644 index 74214146d6f8eb8d4e945c180f1e5d5280afef6b..0000000000000000000000000000000000000000 --- a/spaces/hra/Curriculum-BabyAGI/app.py +++ /dev/null @@ -1,116 +0,0 @@ -import json -import openai -import os - -import pandas as pd - -import gradio as gr -from collections import deque -from typing import Dict, List, Optional, Any - -from langchain import LLMChain, OpenAI, PromptTemplate -import datetime -from datetime import datetime, date, time, timedelta -from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, Document, ServiceContext -from langchain.llms import OpenAIChat - -import feedparser -import pandas as pd -import numpy as np - -from duckduckgo_search import ddg_videos -from duckduckgo_search import ddg - -def get_learning_curriculum(openapikey,topic): - dateforfilesave=datetime.today().strftime("%d-%m-%Y %I:%M%p") - print(topic) - print(dateforfilesave) - if openapikey=='': - return pd.DataFrame(["Please provide OpenAPI Key"],columns=['ERROR']) - - os.environ['OPENAI_API_KEY'] = str(openapikey) - - - ###Task Creation Agent - - prompt='You are a training center AI. Give me a detailed curriculum to learn about "{topicforquery}" using search. The curriculum will be a series of learning tasks to be achieved. Give output as a python list of jsons with "task name", "search keyword" to search to complete the task. Donot repeat the taks. For each task name also add a list of "questions" to ask the search results data to select specific articles and complete the curriculum. Remember the search list will be a dataframe of titles & body of the searched article and you may not be able to go through the full article hence these questions should be of types "Which article best suits a learning curriculum?", "Which article is learning oriented?. To reiterate output should be in json with keys task name ex: get beginner training articles for painting, search keyword ex: beginner painting & questions ex: What are top articles for painting?'.format(topicforquery=topic) - openai.api_key = os.getenv("OPENAI_API_KEY") - resp=openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "user", "content": prompt} - ] - ) - tasklist=json.loads(resp['choices'][0]['message']['content']) - - ###Function to search the internet using DuckDuckGo exposed as a tool - def research_search(search_keyword,question_to_ask,topic): - llm_predictor = LLMPredictor(llm=OpenAIChat(temperature=0, model_name="gpt-3.5-turbo")) - service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor) - keyword=search_keyword - keyword="+".join(keyword.lower().split()) - keyword=keyword.replace(' and ',' AND ') - posts = ddg(keyword+' '+topic, safesearch='Off', page=1) - latestnews_df=pd.DataFrame(posts) - print(latestnews_df.columns) - #latestnews_df=latestnews_df.drop_duplicates(subset=['title','link','published']) - latestnews_df['text']='Title: '+latestnews_df['title']+' Description: '+latestnews_df['body'] - print(latestnews_df['text'].tolist()) - documents=[Document(t) for t in latestnews_df['text'].tolist()] - index = GPTSimpleVectorIndex.from_documents(documents) - prompt_query=question_to_ask - respstr=str(index.query(prompt_query, - service_context=service_context, - response_mode="tree_summarize", - similarity_top_k=10)) - print("Search response: ",respstr) - return respstr - - ###Task Execution Agent loop - list1=[] - list2=[] - list3=[] - for i in range(len(tasklist)): - taskstuff=tasklist[i] - search_keyword=taskstuff['search keyword'] - for question in taskstuff['questions']: - response_string=research_search(search_keyword,question,topic) - list1.append(taskstuff['task name']) - list2.append(question) - list3.append(response_string) - - ###Create dataframe to display - outputdf=pd.DataFrame() - outputdf['Task']=list1 - outputdf['Question']=list2 - outputdf['Learning']=list3 - - return outputdf - -with gr.Blocks() as demo: - gr.Markdown("

      BabyAGI creates Learning Curriculum

      ") - gr.Markdown( - """ This is the first step of an experiment using BabyAGI as a "framework" to construct focused use cases (ex: learning curriculums). The flow uses two AI agents 1) Task creation agent: to create a task list & questions 2) Task execution agent: to execute the tasks & find answers to the questions. Unlike the original BabyAGI concept, this is not open-ended. \n\nNote: This is a series of experiments to understand AI agents and hence do check the quality of output. OpenAI agents (gpt-3.5-turbo), llama-index & DuckDuckGo search are used. The analysis takes roughly 120 secs & may not always be consistent. An error occurs when the OpenAI Api key is not provided/ ChatGPT API is overloaded/ ChatGPT is unable to correctly decipher & format the output\n\n Future directions: 1) Make the task creation more open-ended or longer. 2) Discover multiple learning paths and make ChatGPT introspect on them before finalizing the optimal one 3)Learn from the answers and change the curriculum![visitors](https://visitor-badge.glitch.me/badge?page_id=hra/Curriculum-BabyAGI)""" - ) - - with gr.Row() as row: - with gr.Column(): - textboxtopic = gr.Textbox(placeholder="Enter Topic for Curriculum...", lines=1,label='Topic') - with gr.Column(): - textboxopenapi = gr.Textbox(placeholder="Enter OpenAPI Key...", lines=1,label='OpenAPI Key') - with gr.Row() as row: - examples = gr.Examples(examples=['Acrylic painting','Generative AI','latest NLP topic models','FIFA mobile game','Telemedicine'], - inputs=[textboxtopic]) - with gr.Row() as row: - btn = gr.Button("Generate \nCurriculum") - - with gr.Row() as row: - table1=gr.Dataframe( - #headers=["Item", "Cost"], - #datatype=["str", "str","str"], - label="Learning Curriculum", - ) - - btn.click(get_learning_curriculum, inputs=[textboxopenapi,textboxtopic],outputs=[table1]) - -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/hysts/zeroscope-v2/README.md b/spaces/hysts/zeroscope-v2/README.md deleted file mode 100644 index 803a683fa3488ffd9086df37d9f8237e98777b9d..0000000000000000000000000000000000000000 --- a/spaces/hysts/zeroscope-v2/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Zeroscope V2 -emoji: 🌖 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.45.2 -app_file: app.py -pinned: false -license: mit -suggested_hardware: t4-small ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/idlsono/Idksono4/README.md b/spaces/idlsono/Idksono4/README.md deleted file mode 100644 index 484f63344c33755efc6687ddcb752305dc60f3f7..0000000000000000000000000000000000000000 --- a/spaces/idlsono/Idksono4/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Lolcats39 -emoji: 🌍 -colorFrom: yellow -colorTo: purple -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/imseldrith/BotX/Uploader/config.py b/spaces/imseldrith/BotX/Uploader/config.py deleted file mode 100644 index e97d8fce7dc3879ab97081ed271023b9420ad602..0000000000000000000000000000000000000000 --- a/spaces/imseldrith/BotX/Uploader/config.py +++ /dev/null @@ -1,72 +0,0 @@ -# MIT License - -# Copyright (c) 2022 Hash Minner - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE - -import os - -import logging - -logging.basicConfig( - format='%(name)s - %(levelname)s - %(message)s', - handlers=[logging.FileHandler('log.txt'), - logging.StreamHandler()], - level=logging.INFO -) - - -class Config(object): - WEBHOOK = os.environ.get("BOT_TOKEN", False) - # Get a token from @BotFather - BOT_TOKEN = os.environ.get("BOT_TOKEN", "") - # The Telegram API things - API_ID = int(os.environ.get("API_ID", 12345)) - API_HASH = os.environ.get("API_HASH") - # Get these values from my.telegram.org - # Array to store users who are authorized to use the bot - - # File /video download location - DOWNLOAD_LOCATION = "./DOWNLOADS" - - MEGA_EMAIL = os.environ.get("MEGA_EMAIL", "None") - # If deploying on vps edit the above value as example := Mega_email = "Your-Mega_email-inside-inverted-commas." - - # This is not necessary! Enter your mega password only if you have a mega.nz account with pro/business features. - MEGA_PASSWORD = os.environ.get("MEGA_PASSWORD", "None") - # If deploying on vps edit the above value as example := Mega_password = "Your-Mega_password-inside-inverted-commas." - # Telegram maximum file upload size - TG_MAX_FILE_SIZE = 4194304000 - - # Chunk size that should be used with requests - CHUNK_SIZE = int(os.environ.get("CHUNK_SIZE", 128)) - # Proxy for accessing youtube-dl in GeoRestricted Areas - # Get your own proxy from https://github.com/rg3/youtube-dl/issues/1091#issuecomment-230163061 - HTTP_PROXY = os.environ.get("HTTP_PROXY", "") - - # Set timeout for subprcess - PROCESS_MAX_TIMEOUT = 3700 - - LOG_CHANNEL = int(os.environ.get("LOG_CHANNEL", -100)) - OWNER_ID = int(os.environ.get("OWNER_ID", "12356")) - BOT_USERNAME = os.environ.get("BOT_USERNAME", "") - ADL_BOT_RQ = {} - AUTH_USERS = list({int(x) - for x in os.environ.get("AUTH_USERS", "0").split()}) - AUTH_USERS.append(OWNER_ID) diff --git a/spaces/inamXcontru/PoeticTTS/Californication S06e01 Hdtv X264 Evolve English Subtitles Free Dvd Download.md b/spaces/inamXcontru/PoeticTTS/Californication S06e01 Hdtv X264 Evolve English Subtitles Free Dvd Download.md deleted file mode 100644 index dbae6f903e7eb3c62cb40762a0eebd4f90ddc89e..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Californication S06e01 Hdtv X264 Evolve English Subtitles Free Dvd Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Californication S06e01 Hdtv X264 Evolve English Subtitles : Free Dvd


      Download ->->->-> https://gohhs.com/2uz4xY



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/7 Data Recovery Crack Serial Key.md b/spaces/inplisQlawa/anything-midjourney-v4-1/7 Data Recovery Crack Serial Key.md deleted file mode 100644 index 5491205c9b24c2ec4ea7fa3bda21afac80478bc9..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/7 Data Recovery Crack Serial Key.md +++ /dev/null @@ -1,6 +0,0 @@ -

      7 Data Recovery Crack Serial Key


      Download File ✒ ✒ ✒ https://urlin.us/2uEwO3



      -
      - 4fefd39f24
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Crack _VERIFIED_ed Working Digital Anarchy Backdrop Designer V1 2 2 For Photoshop Rar.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Crack _VERIFIED_ed Working Digital Anarchy Backdrop Designer V1 2 2 For Photoshop Rar.md deleted file mode 100644 index e1fbcb942a7e55392aeabae31da038aaa4d0f625..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Crack _VERIFIED_ed Working Digital Anarchy Backdrop Designer V1 2 2 For Photoshop Rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Cracked Working Digital Anarchy Backdrop Designer v1 2 2 for Photoshop rar


      Download ->->->-> https://urlin.us/2uEvOs



      -
      -Oct 19, 2020 · Digital Anarchy has released Flicker Free 2. ... in this section is stacked with pro video design features, with great visuals to choose from, ... Roll - Creative COW's user support and discussion forum for users of Adobe After Effects. rar. ... 3 CE for After Effects and Premiere Pro + Crack Anarchy Flicker Free 1. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Mere Brother Ki Dulhan 3 1080p Full Movie Download PATCHED.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Mere Brother Ki Dulhan 3 1080p Full Movie Download PATCHED.md deleted file mode 100644 index 2cab3dc3e81a17344f822c68505f24ab4eded080..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Mere Brother Ki Dulhan 3 1080p Full Movie Download PATCHED.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Mere Brother Ki Dulhan 3 1080p full movie download


      Download Ziphttps://urlin.us/2uEvN8



      - -Mere Brother Ki Dulhan. Saved by Anja India. 2. 2011 MoviesHd MoviesMovie TvMp3 Song DownloadFull Movies DownloadMere Brother Ki DulhanLatest ... Housefull 3 Movie is an Indien movie. ... Hetal PanchalFull movies download ... Read reviews and buy xXx: Return Of Xander Cage (Blu-ray+ DVD + Digital) at Target. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Moyea Swf To Video Converter Pro 4.0.0.1 Cracked.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Moyea Swf To Video Converter Pro 4.0.0.1 Cracked.md deleted file mode 100644 index a33a4bd7cab7d4594fb92617025f700cee8ca08b..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Moyea Swf To Video Converter Pro 4.0.0.1 Cracked.md +++ /dev/null @@ -1,6 +0,0 @@ -

      moyea swf to video converter pro 4.0.0.1 cracked


      DOWNLOAD ⇒⇒⇒ https://urlin.us/2uEweX



      -
      -Listen to Moyea Swf To Video Converter Pro 4001 Crack and thirty-nine more episodes by Assignment 3.pdf, free! No signup or install.... Flv 2 video converter ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/AutoCAD Civil 3D 2020.1 Crack With Product Key Full [PORTABLE] Free Download.md b/spaces/inreVtussa/clothingai/Examples/AutoCAD Civil 3D 2020.1 Crack With Product Key Full [PORTABLE] Free Download.md deleted file mode 100644 index 1124a0d58fd14f1f47096bcde3b219a280f1eb5e..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/AutoCAD Civil 3D 2020.1 Crack With Product Key Full [PORTABLE] Free Download.md +++ /dev/null @@ -1,7 +0,0 @@ - -

      autocad 2020 crack has many other features like snapline, dynamic views, deskew, fillet, measure, wall cutting and more. it includes a brand new toolset for exporting drawings. the new toolset for autocad civil 3d includes the ability to save a new drawing with a template. the ability to save a new drawing with a template is possible for the first time. this new template-based drawing creation saves time by allowing you to save a new drawing with just a few clicks of the mouse. the template-based drawing creation helps users save time by creating drawings in a template-based drawing. this feature allows users to save a new drawing with just a few clicks of the mouse and specify a drawing template.

      -

      AutoCAD Civil 3D 2020.1 Crack With Product Key Full Free Download


      Download Ziphttps://tiurll.com/2uCiX0



      -

      autocad civil 3d 2020.1 crack has many other features like 2d and 3d graphics, gis, measure, solid modeling and many other features and improvements included in this version release. you can work collaboratively with your colleagues and customers. you can share data with them and talk to them about your projects. you can make changes in the same drawing while working on a version. you can open and close multiple drawings simultaneously.

      -

      autocad crack also allows the user to create a presentation with a new custom presentation, called the streamlined presentation. streamlined presentation is designed to make it easy for users to create a presentation. you can share content with other users and with clients. you can also open multiple drawings at once. it lets you organize your drawings and models. autocad civil 3d 2020.1 crack provides a new method to print from autocad civil 3d by using a network printer. this method uses the network printer or the winprinter in windows to print your drawings directly to a network printer. when network printing is available, you can choose to use the network printer or the winprinter to print your drawings. then, the print settings are stored as a filter to the network printer or winprinter. when the filter is applied in the autocad civil 3d printer, you can print from autocad civil 3d directly to the network printer or winprinter.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Bhugol Aur Aap Pdf Download REPACK.md b/spaces/inreVtussa/clothingai/Examples/Bhugol Aur Aap Pdf Download REPACK.md deleted file mode 100644 index 52173559576554396a0fc209375eea7fb8095f9f..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Bhugol Aur Aap Pdf Download REPACK.md +++ /dev/null @@ -1,10 +0,0 @@ -

      bhugol aur aap pdf download


      Download File →→→ https://tiurll.com/2uCiIv



      -
      -July 4, 2020 - "Geography" and "You", commonly known as G'nY (in English) and its counterpart Bhugol Aur Aap ( in Hindi), published twice a month. magazines that ... read more -July 3, 2020 - Aaj Kal (in English) and Bhugol Aur Aap (in Hindi) are released every month, which means you can enjoy two magazines at the same time. -If you want to see everything that goes into our magazines and see how our ... read more -June 24, 2020 - This month we will show you four new magazines and will continue to show you more every month. -We will only show you four magazines and the rest will be stored in ... read more 8a78ff9644
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv] UPD.md b/spaces/inreVtussa/clothingai/Examples/Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv] UPD.md deleted file mode 100644 index cb7e9e7cb50c7be0b2b0a0863d1106cd76a208f0..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv] UPD.md +++ /dev/null @@ -1,17 +0,0 @@ - -

      How to Watch Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv]

      -

      Brooklyn Nine-Nine is a hilarious comedy series that follows the lives of an eclectic group of detectives in a New York precinct. The show has won several awards, including two Golden Globes and four Emmys. If you are a fan of Brooklyn Nine-Nine, you might be wondering how to watch the first two seasons of the show in high quality and with subtitles.

      -

      In this article, we will show you how to watch Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv], which is a torrent file that contains all the episodes of the first two seasons in 1080p resolution and with subtitles in various languages. We will also explain what rarbg and rartv are, and why they are popular among torrent users.

      -

      Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv]


      DOWNLOADhttps://tiurll.com/2uCliL



      -

      What are rarbg and rartv?

      -

      rarbg and rartv are two of the most popular torrent sites on the internet. They offer a wide range of movies, TV shows, games, music, and software for free download. They are known for their high-quality releases, fast download speeds, and reliable seeding. They also provide subtitles for many of their releases, which is very convenient for non-English speakers or hearing-impaired viewers.

      -

      rarbg and rartv are not official sources of Brooklyn Nine-Nine, and they do not have any affiliation with the creators or distributors of the show. They are simply providing a service for people who want to watch the show without paying for it or waiting for it to be available on streaming platforms. However, downloading or streaming content from torrent sites may be illegal in some countries, so you should be careful and use a VPN (virtual private network) to protect your privacy and security.

      -

      How to watch Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv]?

      -

      To watch Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv], you will need a torrent client, such as uTorrent or BitTorrent, which will allow you to download the torrent file and connect to other peers who have the same file. You can find the torrent file by searching for it on rarbg or rartv's websites, or by using a torrent search engine like Torrentz2 or 1337x.

      -

      Once you have downloaded the torrent file, you can open it with your torrent client and start downloading the episodes. The download speed will depend on your internet connection and the number of seeders (people who have the complete file and are sharing it). The more seeders there are, the faster the download will be.

      -

      After you have downloaded all the episodes, you can watch them on your computer or transfer them to your TV or mobile device. You can use any media player that supports MKV files, such as VLC or MPC-HC. The subtitles should be embedded in the video files, so you can choose your preferred language from the menu. If not, you can download separate subtitle files from opensubtitles.com[^1^] or subtitlevid.com[^2^] and load them manually.

      -

      -

      Conclusion

      -

      Brooklyn Nine-Nine Season 1-2 Complete 1080p Plus Subtitles [rarbg][rartv] is a great way to enjoy one of the best comedy shows of recent years in high quality and with subtitles. However, you should be aware of the legal and ethical implications of downloading or streaming content from torrent sites, and use a VPN to protect yourself from potential risks. Alternatively, you can watch Brooklyn Nine-Nine legally on streaming platforms like Netflix or Hulu, or buy the DVDs or Blu-rays from official sources.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/izumi-lab/llama-13b-japanese-lora-v0-1ep/README.md b/spaces/izumi-lab/llama-13b-japanese-lora-v0-1ep/README.md deleted file mode 100644 index 3666c04df9e84d8f96e743091a0714d19b13b346..0000000000000000000000000000000000000000 --- a/spaces/izumi-lab/llama-13b-japanese-lora-v0-1ep/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: LLaMA 13B Japanese LoRA v0 1 epoch -emoji: 🐨 -colorFrom: gray -colorTo: gray -sdk: docker -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jackyccl/segment-anything/groundingdino/util/__init__.py b/spaces/jackyccl/segment-anything/groundingdino/util/__init__.py deleted file mode 100644 index 168f9979a4623806934b0ff1102ac166704e7dec..0000000000000000000000000000000000000000 --- a/spaces/jackyccl/segment-anything/groundingdino/util/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/spaces/jeffrymahbuubi/foodvision-mini/app.py b/spaces/jeffrymahbuubi/foodvision-mini/app.py deleted file mode 100644 index 4cd715f723f6f42cc3c86fd54d25df129d61bc3f..0000000000000000000000000000000000000000 --- a/spaces/jeffrymahbuubi/foodvision-mini/app.py +++ /dev/null @@ -1,75 +0,0 @@ - -### 1. Imports and class names setup ### -import gradio as gr -import os -import torch - -from model import create_effnetb2_model -from timeit import default_timer as timer -from typing import Tuple, Dict - -# Setup class names -class_names = ['pizza', 'steak', 'sushi'] - -### 2. Model and transforms prepration ### -effnetb2, effnetb2_tranforms = create_effnetb2_model( - num_classes=3 -) - -# Load saved weights -effnetb2.load_state_dict( - torch.load( - f"09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth", - map_location=torch.device('cpu') - ) -) - -### 3. Predict function ### -def predict(img) -> Tuple[Dict, float]: - """ - Transforms and performs a prediction on img and returns prediction and time taken. - """ - - # Start the timer - start_time = timer() - - # Transform the target image and add a batch dimension - img = effnetb2_tranforms(img).unsqueeze(0) - - # Put model into evaluation mode and turn on inference mode - effnetb2.eval() - with torch.inference_mode(): - # Pass the transformed image through the model and turn the prediction logits into prediction probabilities - pred_probs = torch.softmax(effnetb2(img), dim=1) - - # Create a prediction label and prediction probability for each prediction class - pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))} - - # Calculate the prediction time - pred_time = round(timer() - start_time, 5) - - # Return the prediction dictionary and prediction time - return pred_labels_and_probs, pred_time - -### 4. Gradio app ### -# Create title, description and article strings -title = "FoodVision Mini 🍕🥩🍣" -description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi." -article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)." - -# Create examples list from "examples/" directory -example_list = [["examples/" + example] for example in os.listdir("examples")] - -# Create the Gradio demo -demo = gr.Interface(fn=predict, # mapping function from input to output - inputs=gr.Image(type="pil"), # what are the inputs? - outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs? - gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs - # Create examples list from "examples/" directory - examples=example_list, - title=title, - description=description, - article=article) - -# Launch the demo! -demo.launch() diff --git a/spaces/jgurzoni/image_background_swapper/saicinpainting/evaluation/losses/lpips.py b/spaces/jgurzoni/image_background_swapper/saicinpainting/evaluation/losses/lpips.py deleted file mode 100644 index b5f19b747f2457902695213f7efcde4fdc306c1f..0000000000000000000000000000000000000000 --- a/spaces/jgurzoni/image_background_swapper/saicinpainting/evaluation/losses/lpips.py +++ /dev/null @@ -1,891 +0,0 @@ -############################################################ -# The contents below have been combined using files in the # -# following repository: # -# https://github.com/richzhang/PerceptualSimilarity # -############################################################ - -############################################################ -# __init__.py # -############################################################ - -import numpy as np -from skimage.metrics import structural_similarity -import torch - -from saicinpainting.utils import get_shape - - -class PerceptualLoss(torch.nn.Module): - def __init__(self, model='net-lin', net='alex', colorspace='rgb', model_path=None, spatial=False, use_gpu=True): - # VGG using our perceptually-learned weights (LPIPS metric) - # def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss - super(PerceptualLoss, self).__init__() - self.use_gpu = use_gpu - self.spatial = spatial - self.model = DistModel() - self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace, - model_path=model_path, spatial=self.spatial) - - def forward(self, pred, target, normalize=True): - """ - Pred and target are Variables. - If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1] - If normalize is False, assumes the images are already between [-1,+1] - Inputs pred and target are Nx3xHxW - Output pytorch Variable N long - """ - - if normalize: - target = 2 * target - 1 - pred = 2 * pred - 1 - - return self.model(target, pred) - - -def normalize_tensor(in_feat, eps=1e-10): - norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1, keepdim=True)) - return in_feat / (norm_factor + eps) - - -def l2(p0, p1, range=255.): - return .5 * np.mean((p0 / range - p1 / range) ** 2) - - -def psnr(p0, p1, peak=255.): - return 10 * np.log10(peak ** 2 / np.mean((1. * p0 - 1. * p1) ** 2)) - - -def dssim(p0, p1, range=255.): - return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2. - - -def rgb2lab(in_img, mean_cent=False): - from skimage import color - img_lab = color.rgb2lab(in_img) - if (mean_cent): - img_lab[:, :, 0] = img_lab[:, :, 0] - 50 - return img_lab - - -def tensor2np(tensor_obj): - # change dimension of a tensor object into a numpy array - return tensor_obj[0].cpu().float().numpy().transpose((1, 2, 0)) - - -def np2tensor(np_obj): - # change dimenion of np array into tensor array - return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1))) - - -def tensor2tensorlab(image_tensor, to_norm=True, mc_only=False): - # image tensor to lab tensor - from skimage import color - - img = tensor2im(image_tensor) - img_lab = color.rgb2lab(img) - if (mc_only): - img_lab[:, :, 0] = img_lab[:, :, 0] - 50 - if (to_norm and not mc_only): - img_lab[:, :, 0] = img_lab[:, :, 0] - 50 - img_lab = img_lab / 100. - - return np2tensor(img_lab) - - -def tensorlab2tensor(lab_tensor, return_inbnd=False): - from skimage import color - import warnings - warnings.filterwarnings("ignore") - - lab = tensor2np(lab_tensor) * 100. - lab[:, :, 0] = lab[:, :, 0] + 50 - - rgb_back = 255. * np.clip(color.lab2rgb(lab.astype('float')), 0, 1) - if (return_inbnd): - # convert back to lab, see if we match - lab_back = color.rgb2lab(rgb_back.astype('uint8')) - mask = 1. * np.isclose(lab_back, lab, atol=2.) - mask = np2tensor(np.prod(mask, axis=2)[:, :, np.newaxis]) - return (im2tensor(rgb_back), mask) - else: - return im2tensor(rgb_back) - - -def rgb2lab(input): - from skimage import color - return color.rgb2lab(input / 255.) - - -def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.): - image_numpy = image_tensor[0].cpu().float().numpy() - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor - return image_numpy.astype(imtype) - - -def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.): - return torch.Tensor((image / factor - cent) - [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) - - -def tensor2vec(vector_tensor): - return vector_tensor.data.cpu().numpy()[:, :, 0, 0] - - -def voc_ap(rec, prec, use_07_metric=False): - """ ap = voc_ap(rec, prec, [use_07_metric]) - Compute VOC AP given precision and recall. - If use_07_metric is true, uses the - VOC 07 11 point method (default:False). - """ - if use_07_metric: - # 11 point metric - ap = 0. - for t in np.arange(0., 1.1, 0.1): - if np.sum(rec >= t) == 0: - p = 0 - else: - p = np.max(prec[rec >= t]) - ap = ap + p / 11. - else: - # correct AP calculation - # first append sentinel values at the end - mrec = np.concatenate(([0.], rec, [1.])) - mpre = np.concatenate(([0.], prec, [0.])) - - # compute the precision envelope - for i in range(mpre.size - 1, 0, -1): - mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) - - # to calculate area under PR curve, look for points - # where X axis (recall) changes value - i = np.where(mrec[1:] != mrec[:-1])[0] - - # and sum (\Delta recall) * prec - ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) - return ap - - -def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.): - # def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.): - image_numpy = image_tensor[0].cpu().float().numpy() - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor - return image_numpy.astype(imtype) - - -def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.): - # def im2tensor(image, imtype=np.uint8, cent=1., factor=1.): - return torch.Tensor((image / factor - cent) - [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) - - -############################################################ -# base_model.py # -############################################################ - - -class BaseModel(torch.nn.Module): - def __init__(self): - super().__init__() - - def name(self): - return 'BaseModel' - - def initialize(self, use_gpu=True): - self.use_gpu = use_gpu - - def forward(self): - pass - - def get_image_paths(self): - pass - - def optimize_parameters(self): - pass - - def get_current_visuals(self): - return self.input - - def get_current_errors(self): - return {} - - def save(self, label): - pass - - # helper saving function that can be used by subclasses - def save_network(self, network, path, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(path, save_filename) - torch.save(network.state_dict(), save_path) - - # helper loading function that can be used by subclasses - def load_network(self, network, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(self.save_dir, save_filename) - print('Loading network from %s' % save_path) - network.load_state_dict(torch.load(save_path, map_location='cpu')) - - def update_learning_rate(): - pass - - def get_image_paths(self): - return self.image_paths - - def save_done(self, flag=False): - np.save(os.path.join(self.save_dir, 'done_flag'), flag) - np.savetxt(os.path.join(self.save_dir, 'done_flag'), [flag, ], fmt='%i') - - -############################################################ -# dist_model.py # -############################################################ - -import os -from collections import OrderedDict -from scipy.ndimage import zoom -from tqdm import tqdm - - -class DistModel(BaseModel): - def name(self): - return self.model_name - - def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, - model_path=None, - use_gpu=True, printNet=False, spatial=False, - is_train=False, lr=.0001, beta1=0.5, version='0.1'): - ''' - INPUTS - model - ['net-lin'] for linearly calibrated network - ['net'] for off-the-shelf network - ['L2'] for L2 distance in Lab colorspace - ['SSIM'] for ssim in RGB colorspace - net - ['squeeze','alex','vgg'] - model_path - if None, will look in weights/[NET_NAME].pth - colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM - use_gpu - bool - whether or not to use a GPU - printNet - bool - whether or not to print network architecture out - spatial - bool - whether to output an array containing varying distances across spatial dimensions - spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below). - spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images. - spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear). - is_train - bool - [True] for training mode - lr - float - initial learning rate - beta1 - float - initial momentum term for adam - version - 0.1 for latest, 0.0 was original (with a bug) - ''' - BaseModel.initialize(self, use_gpu=use_gpu) - - self.model = model - self.net = net - self.is_train = is_train - self.spatial = spatial - self.model_name = '%s [%s]' % (model, net) - - if (self.model == 'net-lin'): # pretrained net + linear layer - self.net = PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net, - use_dropout=True, spatial=spatial, version=version, lpips=True) - kw = dict(map_location='cpu') - if (model_path is None): - import inspect - model_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', '..', '..', 'models', 'lpips_models', f'{net}.pth')) - - if (not is_train): - self.net.load_state_dict(torch.load(model_path, **kw), strict=False) - - elif (self.model == 'net'): # pretrained network - self.net = PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False) - elif (self.model in ['L2', 'l2']): - self.net = L2(use_gpu=use_gpu, colorspace=colorspace) # not really a network, only for testing - self.model_name = 'L2' - elif (self.model in ['DSSIM', 'dssim', 'SSIM', 'ssim']): - self.net = DSSIM(use_gpu=use_gpu, colorspace=colorspace) - self.model_name = 'SSIM' - else: - raise ValueError("Model [%s] not recognized." % self.model) - - self.trainable_parameters = list(self.net.parameters()) - - if self.is_train: # training mode - # extra network on top to go from distances (d0,d1) => predicted human judgment (h*) - self.rankLoss = BCERankingLoss() - self.trainable_parameters += list(self.rankLoss.net.parameters()) - self.lr = lr - self.old_lr = lr - self.optimizer_net = torch.optim.Adam(self.trainable_parameters, lr=lr, betas=(beta1, 0.999)) - else: # test mode - self.net.eval() - - # if (use_gpu): - # self.net.to(gpu_ids[0]) - # self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids) - # if (self.is_train): - # self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0 - - if (printNet): - print('---------- Networks initialized -------------') - print_network(self.net) - print('-----------------------------------------------') - - def forward(self, in0, in1, retPerLayer=False): - ''' Function computes the distance between image patches in0 and in1 - INPUTS - in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1] - OUTPUT - computed distances between in0 and in1 - ''' - - return self.net(in0, in1, retPerLayer=retPerLayer) - - # ***** TRAINING FUNCTIONS ***** - def optimize_parameters(self): - self.forward_train() - self.optimizer_net.zero_grad() - self.backward_train() - self.optimizer_net.step() - self.clamp_weights() - - def clamp_weights(self): - for module in self.net.modules(): - if (hasattr(module, 'weight') and module.kernel_size == (1, 1)): - module.weight.data = torch.clamp(module.weight.data, min=0) - - def set_input(self, data): - self.input_ref = data['ref'] - self.input_p0 = data['p0'] - self.input_p1 = data['p1'] - self.input_judge = data['judge'] - - # if (self.use_gpu): - # self.input_ref = self.input_ref.to(device=self.gpu_ids[0]) - # self.input_p0 = self.input_p0.to(device=self.gpu_ids[0]) - # self.input_p1 = self.input_p1.to(device=self.gpu_ids[0]) - # self.input_judge = self.input_judge.to(device=self.gpu_ids[0]) - - # self.var_ref = Variable(self.input_ref, requires_grad=True) - # self.var_p0 = Variable(self.input_p0, requires_grad=True) - # self.var_p1 = Variable(self.input_p1, requires_grad=True) - - def forward_train(self): # run forward pass - # print(self.net.module.scaling_layer.shift) - # print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item()) - - assert False, "We shoud've not get here when using LPIPS as a metric" - - self.d0 = self(self.var_ref, self.var_p0) - self.d1 = self(self.var_ref, self.var_p1) - self.acc_r = self.compute_accuracy(self.d0, self.d1, self.input_judge) - - self.var_judge = Variable(1. * self.input_judge).view(self.d0.size()) - - self.loss_total = self.rankLoss(self.d0, self.d1, self.var_judge * 2. - 1.) - - return self.loss_total - - def backward_train(self): - torch.mean(self.loss_total).backward() - - def compute_accuracy(self, d0, d1, judge): - ''' d0, d1 are Variables, judge is a Tensor ''' - d1_lt_d0 = (d1 < d0).cpu().data.numpy().flatten() - judge_per = judge.cpu().numpy().flatten() - return d1_lt_d0 * judge_per + (1 - d1_lt_d0) * (1 - judge_per) - - def get_current_errors(self): - retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()), - ('acc_r', self.acc_r)]) - - for key in retDict.keys(): - retDict[key] = np.mean(retDict[key]) - - return retDict - - def get_current_visuals(self): - zoom_factor = 256 / self.var_ref.data.size()[2] - - ref_img = tensor2im(self.var_ref.data) - p0_img = tensor2im(self.var_p0.data) - p1_img = tensor2im(self.var_p1.data) - - ref_img_vis = zoom(ref_img, [zoom_factor, zoom_factor, 1], order=0) - p0_img_vis = zoom(p0_img, [zoom_factor, zoom_factor, 1], order=0) - p1_img_vis = zoom(p1_img, [zoom_factor, zoom_factor, 1], order=0) - - return OrderedDict([('ref', ref_img_vis), - ('p0', p0_img_vis), - ('p1', p1_img_vis)]) - - def save(self, path, label): - if (self.use_gpu): - self.save_network(self.net.module, path, '', label) - else: - self.save_network(self.net, path, '', label) - self.save_network(self.rankLoss.net, path, 'rank', label) - - def update_learning_rate(self, nepoch_decay): - lrd = self.lr / nepoch_decay - lr = self.old_lr - lrd - - for param_group in self.optimizer_net.param_groups: - param_group['lr'] = lr - - print('update lr [%s] decay: %f -> %f' % (type, self.old_lr, lr)) - self.old_lr = lr - - -def score_2afc_dataset(data_loader, func, name=''): - ''' Function computes Two Alternative Forced Choice (2AFC) score using - distance function 'func' in dataset 'data_loader' - INPUTS - data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside - func - callable distance function - calling d=func(in0,in1) should take 2 - pytorch tensors with shape Nx3xXxY, and return numpy array of length N - OUTPUTS - [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators - [1] - dictionary with following elements - d0s,d1s - N arrays containing distances between reference patch to perturbed patches - gts - N array in [0,1], preferred patch selected by human evaluators - (closer to "0" for left patch p0, "1" for right patch p1, - "0.6" means 60pct people preferred right patch, 40pct preferred left) - scores - N array in [0,1], corresponding to what percentage function agreed with humans - CONSTS - N - number of test triplets in data_loader - ''' - - d0s = [] - d1s = [] - gts = [] - - for data in tqdm(data_loader.load_data(), desc=name): - d0s += func(data['ref'], data['p0']).data.cpu().numpy().flatten().tolist() - d1s += func(data['ref'], data['p1']).data.cpu().numpy().flatten().tolist() - gts += data['judge'].cpu().numpy().flatten().tolist() - - d0s = np.array(d0s) - d1s = np.array(d1s) - gts = np.array(gts) - scores = (d0s < d1s) * (1. - gts) + (d1s < d0s) * gts + (d1s == d0s) * .5 - - return (np.mean(scores), dict(d0s=d0s, d1s=d1s, gts=gts, scores=scores)) - - -def score_jnd_dataset(data_loader, func, name=''): - ''' Function computes JND score using distance function 'func' in dataset 'data_loader' - INPUTS - data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside - func - callable distance function - calling d=func(in0,in1) should take 2 - pytorch tensors with shape Nx3xXxY, and return pytorch array of length N - OUTPUTS - [0] - JND score in [0,1], mAP score (area under precision-recall curve) - [1] - dictionary with following elements - ds - N array containing distances between two patches shown to human evaluator - sames - N array containing fraction of people who thought the two patches were identical - CONSTS - N - number of test triplets in data_loader - ''' - - ds = [] - gts = [] - - for data in tqdm(data_loader.load_data(), desc=name): - ds += func(data['p0'], data['p1']).data.cpu().numpy().tolist() - gts += data['same'].cpu().numpy().flatten().tolist() - - sames = np.array(gts) - ds = np.array(ds) - - sorted_inds = np.argsort(ds) - ds_sorted = ds[sorted_inds] - sames_sorted = sames[sorted_inds] - - TPs = np.cumsum(sames_sorted) - FPs = np.cumsum(1 - sames_sorted) - FNs = np.sum(sames_sorted) - TPs - - precs = TPs / (TPs + FPs) - recs = TPs / (TPs + FNs) - score = voc_ap(recs, precs) - - return (score, dict(ds=ds, sames=sames)) - - -############################################################ -# networks_basic.py # -############################################################ - -import torch.nn as nn -from torch.autograd import Variable -import numpy as np - - -def spatial_average(in_tens, keepdim=True): - return in_tens.mean([2, 3], keepdim=keepdim) - - -def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W - in_H = in_tens.shape[2] - scale_factor = 1. * out_H / in_H - - return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) - - -# Learned perceptual metric -class PNetLin(nn.Module): - def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False, - version='0.1', lpips=True): - super(PNetLin, self).__init__() - - self.pnet_type = pnet_type - self.pnet_tune = pnet_tune - self.pnet_rand = pnet_rand - self.spatial = spatial - self.lpips = lpips - self.version = version - self.scaling_layer = ScalingLayer() - - if (self.pnet_type in ['vgg', 'vgg16']): - net_type = vgg16 - self.chns = [64, 128, 256, 512, 512] - elif (self.pnet_type == 'alex'): - net_type = alexnet - self.chns = [64, 192, 384, 256, 256] - elif (self.pnet_type == 'squeeze'): - net_type = squeezenet - self.chns = [64, 128, 256, 384, 384, 512, 512] - self.L = len(self.chns) - - self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune) - - if (lpips): - self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) - self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) - self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) - self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) - self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) - self.lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4] - if (self.pnet_type == 'squeeze'): # 7 layers for squeezenet - self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout) - self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout) - self.lins += [self.lin5, self.lin6] - - def forward(self, in0, in1, retPerLayer=False): - # v0.0 - original release had a bug, where input was not scaled - in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version == '0.1' else ( - in0, in1) - outs0, outs1 = self.net(in0_input), self.net(in1_input) - feats0, feats1, diffs = {}, {}, {} - - for kk in range(self.L): - feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk]) - diffs[kk] = (feats0[kk] - feats1[kk]) ** 2 - - if (self.lpips): - if (self.spatial): - res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)] - else: - res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)] - else: - if (self.spatial): - res = [upsample(diffs[kk].sum(dim=1, keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)] - else: - res = [spatial_average(diffs[kk].sum(dim=1, keepdim=True), keepdim=True) for kk in range(self.L)] - - val = res[0] - for l in range(1, self.L): - val += res[l] - - if (retPerLayer): - return (val, res) - else: - return val - - -class ScalingLayer(nn.Module): - def __init__(self): - super(ScalingLayer, self).__init__() - self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) - self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None]) - - def forward(self, inp): - return (inp - self.shift) / self.scale - - -class NetLinLayer(nn.Module): - ''' A single linear layer which does a 1x1 conv ''' - - def __init__(self, chn_in, chn_out=1, use_dropout=False): - super(NetLinLayer, self).__init__() - - layers = [nn.Dropout(), ] if (use_dropout) else [] - layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ] - self.model = nn.Sequential(*layers) - - -class Dist2LogitLayer(nn.Module): - ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) ''' - - def __init__(self, chn_mid=32, use_sigmoid=True): - super(Dist2LogitLayer, self).__init__() - - layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True), ] - layers += [nn.LeakyReLU(0.2, True), ] - layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True), ] - layers += [nn.LeakyReLU(0.2, True), ] - layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True), ] - if (use_sigmoid): - layers += [nn.Sigmoid(), ] - self.model = nn.Sequential(*layers) - - def forward(self, d0, d1, eps=0.1): - return self.model(torch.cat((d0, d1, d0 - d1, d0 / (d1 + eps), d1 / (d0 + eps)), dim=1)) - - -class BCERankingLoss(nn.Module): - def __init__(self, chn_mid=32): - super(BCERankingLoss, self).__init__() - self.net = Dist2LogitLayer(chn_mid=chn_mid) - # self.parameters = list(self.net.parameters()) - self.loss = torch.nn.BCELoss() - - def forward(self, d0, d1, judge): - per = (judge + 1.) / 2. - self.logit = self.net(d0, d1) - return self.loss(self.logit, per) - - -# L2, DSSIM metrics -class FakeNet(nn.Module): - def __init__(self, use_gpu=True, colorspace='Lab'): - super(FakeNet, self).__init__() - self.use_gpu = use_gpu - self.colorspace = colorspace - - -class L2(FakeNet): - - def forward(self, in0, in1, retPerLayer=None): - assert (in0.size()[0] == 1) # currently only supports batchSize 1 - - if (self.colorspace == 'RGB'): - (N, C, X, Y) = in0.size() - value = torch.mean(torch.mean(torch.mean((in0 - in1) ** 2, dim=1).view(N, 1, X, Y), dim=2).view(N, 1, 1, Y), - dim=3).view(N) - return value - elif (self.colorspace == 'Lab'): - value = l2(tensor2np(tensor2tensorlab(in0.data, to_norm=False)), - tensor2np(tensor2tensorlab(in1.data, to_norm=False)), range=100.).astype('float') - ret_var = Variable(torch.Tensor((value,))) - # if (self.use_gpu): - # ret_var = ret_var.cuda() - return ret_var - - -class DSSIM(FakeNet): - - def forward(self, in0, in1, retPerLayer=None): - assert (in0.size()[0] == 1) # currently only supports batchSize 1 - - if (self.colorspace == 'RGB'): - value = dssim(1. * tensor2im(in0.data), 1. * tensor2im(in1.data), range=255.).astype('float') - elif (self.colorspace == 'Lab'): - value = dssim(tensor2np(tensor2tensorlab(in0.data, to_norm=False)), - tensor2np(tensor2tensorlab(in1.data, to_norm=False)), range=100.).astype('float') - ret_var = Variable(torch.Tensor((value,))) - # if (self.use_gpu): - # ret_var = ret_var.cuda() - return ret_var - - -def print_network(net): - num_params = 0 - for param in net.parameters(): - num_params += param.numel() - print('Network', net) - print('Total number of parameters: %d' % num_params) - - -############################################################ -# pretrained_networks.py # -############################################################ - -from collections import namedtuple -import torch -from torchvision import models as tv - - -class squeezenet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(squeezenet, self).__init__() - pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.slice6 = torch.nn.Sequential() - self.slice7 = torch.nn.Sequential() - self.N_slices = 7 - for x in range(2): - self.slice1.add_module(str(x), pretrained_features[x]) - for x in range(2, 5): - self.slice2.add_module(str(x), pretrained_features[x]) - for x in range(5, 8): - self.slice3.add_module(str(x), pretrained_features[x]) - for x in range(8, 10): - self.slice4.add_module(str(x), pretrained_features[x]) - for x in range(10, 11): - self.slice5.add_module(str(x), pretrained_features[x]) - for x in range(11, 12): - self.slice6.add_module(str(x), pretrained_features[x]) - for x in range(12, 13): - self.slice7.add_module(str(x), pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1 = h - h = self.slice2(h) - h_relu2 = h - h = self.slice3(h) - h_relu3 = h - h = self.slice4(h) - h_relu4 = h - h = self.slice5(h) - h_relu5 = h - h = self.slice6(h) - h_relu6 = h - h = self.slice7(h) - h_relu7 = h - vgg_outputs = namedtuple("SqueezeOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5', 'relu6', 'relu7']) - out = vgg_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5, h_relu6, h_relu7) - - return out - - -class alexnet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(alexnet, self).__init__() - alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.N_slices = 5 - for x in range(2): - self.slice1.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(2, 5): - self.slice2.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(5, 8): - self.slice3.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(8, 10): - self.slice4.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(10, 12): - self.slice5.add_module(str(x), alexnet_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1 = h - h = self.slice2(h) - h_relu2 = h - h = self.slice3(h) - h_relu3 = h - h = self.slice4(h) - h_relu4 = h - h = self.slice5(h) - h_relu5 = h - alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5']) - out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5) - - return out - - -class vgg16(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(vgg16, self).__init__() - vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.N_slices = 5 - for x in range(4): - self.slice1.add_module(str(x), vgg_pretrained_features[x]) - for x in range(4, 9): - self.slice2.add_module(str(x), vgg_pretrained_features[x]) - for x in range(9, 16): - self.slice3.add_module(str(x), vgg_pretrained_features[x]) - for x in range(16, 23): - self.slice4.add_module(str(x), vgg_pretrained_features[x]) - for x in range(23, 30): - self.slice5.add_module(str(x), vgg_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1_2 = h - h = self.slice2(h) - h_relu2_2 = h - h = self.slice3(h) - h_relu3_3 = h - h = self.slice4(h) - h_relu4_3 = h - h = self.slice5(h) - h_relu5_3 = h - vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']) - out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) - - return out - - -class resnet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True, num=18): - super(resnet, self).__init__() - if (num == 18): - self.net = tv.resnet18(pretrained=pretrained) - elif (num == 34): - self.net = tv.resnet34(pretrained=pretrained) - elif (num == 50): - self.net = tv.resnet50(pretrained=pretrained) - elif (num == 101): - self.net = tv.resnet101(pretrained=pretrained) - elif (num == 152): - self.net = tv.resnet152(pretrained=pretrained) - self.N_slices = 5 - - self.conv1 = self.net.conv1 - self.bn1 = self.net.bn1 - self.relu = self.net.relu - self.maxpool = self.net.maxpool - self.layer1 = self.net.layer1 - self.layer2 = self.net.layer2 - self.layer3 = self.net.layer3 - self.layer4 = self.net.layer4 - - def forward(self, X): - h = self.conv1(X) - h = self.bn1(h) - h = self.relu(h) - h_relu1 = h - h = self.maxpool(h) - h = self.layer1(h) - h_conv2 = h - h = self.layer2(h) - h_conv3 = h - h = self.layer3(h) - h_conv4 = h - h = self.layer4(h) - h_conv5 = h - - outputs = namedtuple("Outputs", ['relu1', 'conv2', 'conv3', 'conv4', 'conv5']) - out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5) - - return out diff --git a/spaces/jhwen/bingo/src/pages/api/sydney.ts b/spaces/jhwen/bingo/src/pages/api/sydney.ts deleted file mode 100644 index 8bd7074bc72bd2803e4acf89d3814908893ff044..0000000000000000000000000000000000000000 --- a/spaces/jhwen/bingo/src/pages/api/sydney.ts +++ /dev/null @@ -1,66 +0,0 @@ -import { NextApiRequest, NextApiResponse } from 'next' -import { WebSocket, debug } from '@/lib/isomorphic' -import { BingWebBot } from '@/lib/bots/bing' -import { websocketUtils } from '@/lib/bots/bing/utils' -import { WatchDog, createHeaders } from '@/lib/utils' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const conversationContext = req.body - const headers = createHeaders(req.cookies) - const id = headers['x-forwarded-for'] - - debug(id, headers) - res.setHeader('Content-Type', 'text/stream; charset=UTF-8') - - const ws = new WebSocket('wss://sydney.bing.com/sydney/ChatHub', { - headers: { - ...headers, - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - pragma: 'no-cache', - } - }) - - const closeDog = new WatchDog() - const timeoutDog = new WatchDog() - ws.onmessage = (event) => { - timeoutDog.watch(() => { - debug(id, 'timeout') - ws.send(websocketUtils.packMessage({ type: 6 })) - }, 3000) - closeDog.watch(() => { - debug(id, 'timeout close') - ws.close() - }, 20000) - res.write(event.data) - if (/\{"type":([367])\}/.test(String(event.data))) { - const type = parseInt(RegExp.$1, 10) - debug(id, 'connection type', type) - if (type === 3) { - ws.close() - } else { - ws.send(websocketUtils.packMessage({ type })) - } - } - } - - ws.onclose = () => { - timeoutDog.reset() - closeDog.reset() - debug(id, 'ws close') - res.end() - } - - await new Promise((resolve) => ws.onopen = resolve) - ws.send(websocketUtils.packMessage({ protocol: 'json', version: 1 })) - ws.send(websocketUtils.packMessage({ type: 6 })) - ws.send(websocketUtils.packMessage(BingWebBot.buildChatRequest(conversationContext!))) - req.socket.once('close', () => { - debug(id, 'connection close') - ws.close() - if (!res.closed) { - res.end() - } - }) -} diff --git a/spaces/jiawei011/dreamgaussian/mesh_utils.py b/spaces/jiawei011/dreamgaussian/mesh_utils.py deleted file mode 100644 index ca9fce9232f5133d6f91d5cf64d9e17b0725a5c9..0000000000000000000000000000000000000000 --- a/spaces/jiawei011/dreamgaussian/mesh_utils.py +++ /dev/null @@ -1,147 +0,0 @@ -import numpy as np -import pymeshlab as pml - - -def poisson_mesh_reconstruction(points, normals=None): - # points/normals: [N, 3] np.ndarray - - import open3d as o3d - - pcd = o3d.geometry.PointCloud() - pcd.points = o3d.utility.Vector3dVector(points) - - # outlier removal - pcd, ind = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=10) - - # normals - if normals is None: - pcd.estimate_normals() - else: - pcd.normals = o3d.utility.Vector3dVector(normals[ind]) - - # visualize - o3d.visualization.draw_geometries([pcd], point_show_normal=False) - - mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson( - pcd, depth=9 - ) - vertices_to_remove = densities < np.quantile(densities, 0.1) - mesh.remove_vertices_by_mask(vertices_to_remove) - - # visualize - o3d.visualization.draw_geometries([mesh]) - - vertices = np.asarray(mesh.vertices) - triangles = np.asarray(mesh.triangles) - - print( - f"[INFO] poisson mesh reconstruction: {points.shape} --> {vertices.shape} / {triangles.shape}" - ) - - return vertices, triangles - - -def decimate_mesh( - verts, faces, target, backend="pymeshlab", remesh=False, optimalplacement=True -): - # optimalplacement: default is True, but for flat mesh must turn False to prevent spike artifect. - - _ori_vert_shape = verts.shape - _ori_face_shape = faces.shape - - if backend == "pyfqmr": - import pyfqmr - - solver = pyfqmr.Simplify() - solver.setMesh(verts, faces) - solver.simplify_mesh(target_count=target, preserve_border=False, verbose=False) - verts, faces, normals = solver.getMesh() - else: - m = pml.Mesh(verts, faces) - ms = pml.MeshSet() - ms.add_mesh(m, "mesh") # will copy! - - # filters - # ms.meshing_decimation_clustering(threshold=pml.Percentage(1)) - ms.meshing_decimation_quadric_edge_collapse( - targetfacenum=int(target), optimalplacement=optimalplacement - ) - - if remesh: - # ms.apply_coord_taubin_smoothing() - ms.meshing_isotropic_explicit_remeshing( - iterations=3, targetlen=pml.Percentage(1) - ) - - # extract mesh - m = ms.current_mesh() - verts = m.vertex_matrix() - faces = m.face_matrix() - - print( - f"[INFO] mesh decimation: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}" - ) - - return verts, faces - - -def clean_mesh( - verts, - faces, - v_pct=1, - min_f=64, - min_d=20, - repair=True, - remesh=True, - remesh_size=0.01, -): - # verts: [N, 3] - # faces: [N, 3] - - _ori_vert_shape = verts.shape - _ori_face_shape = faces.shape - - m = pml.Mesh(verts, faces) - ms = pml.MeshSet() - ms.add_mesh(m, "mesh") # will copy! - - # filters - ms.meshing_remove_unreferenced_vertices() # verts not refed by any faces - - if v_pct > 0: - ms.meshing_merge_close_vertices( - threshold=pml.Percentage(v_pct) - ) # 1/10000 of bounding box diagonal - - ms.meshing_remove_duplicate_faces() # faces defined by the same verts - ms.meshing_remove_null_faces() # faces with area == 0 - - if min_d > 0: - ms.meshing_remove_connected_component_by_diameter( - mincomponentdiag=pml.Percentage(min_d) - ) - - if min_f > 0: - ms.meshing_remove_connected_component_by_face_number(mincomponentsize=min_f) - - if repair: - # ms.meshing_remove_t_vertices(method=0, threshold=40, repeat=True) - ms.meshing_repair_non_manifold_edges(method=0) - ms.meshing_repair_non_manifold_vertices(vertdispratio=0) - - if remesh: - # ms.apply_coord_taubin_smoothing() - ms.meshing_isotropic_explicit_remeshing( - iterations=3, targetlen=pml.AbsoluteValue(remesh_size) - ) - - # extract mesh - m = ms.current_mesh() - verts = m.vertex_matrix() - faces = m.face_matrix() - - print( - f"[INFO] mesh cleaning: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}" - ) - - return verts, faces diff --git a/spaces/jmyungjoon/cartoon/network/__init__.py b/spaces/jmyungjoon/cartoon/network/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/file/video_audio.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/file/video_audio.py deleted file mode 100644 index d0d3742e829121099f7808ba6a68f3a9f0803e5f..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/file/video_audio.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Video audio parser. - -Contains parsers for mp3, mp4 files. - -""" -from pathlib import Path -from typing import Any, Dict, cast - -from gpt_index.readers.file.base_parser import BaseParser - - -class VideoAudioParser(BaseParser): - """Video audio parser. - - Extract text from transcript of video/audio files. - - """ - - def __init__(self, *args: Any, model_version: str = "base", **kwargs: Any) -> None: - """Init params.""" - super().__init__(*args, **kwargs) - self._model_version = model_version - - def _init_parser(self) -> Dict: - """Init parser.""" - try: - import whisper - except ImportError: - raise ImportError( - "Please install OpenAI whisper model " - "'pip install git+https://github.com/openai/whisper.git' " - "to use the model" - ) - - model = whisper.load_model(self._model_version) - - return {"model": model} - - def parse_file(self, file: Path, errors: str = "ignore") -> str: - """Parse file.""" - import whisper - - if file.name.endswith("mp4"): - try: - from pydub import AudioSegment # noqa: F401 - except ImportError: - raise ImportError("Please install pydub 'pip install pydub' ") - # open file - video = AudioSegment.from_file(file, format="mp4") - - # Extract audio from video - audio = video.split_to_mono()[0] - - file_str = str(file)[:-4] + ".mp3" - # export file - audio.export(file_str, format="mp3") - - model = cast(whisper.Whisper, self.parser_config["model"]) - result = model.transcribe(str(file)) - - transcript = result["text"] - - return transcript diff --git a/spaces/johnslegers/stable-diffusion-gui-test/modules/runtime.py b/spaces/johnslegers/stable-diffusion-gui-test/modules/runtime.py deleted file mode 100644 index 8d4bcb75ebe1b4419443669b91da7e3bfe5704c2..0000000000000000000000000000000000000000 --- a/spaces/johnslegers/stable-diffusion-gui-test/modules/runtime.py +++ /dev/null @@ -1,682 +0,0 @@ -import json -import os, re -import traceback -import torch -import numpy as np -from omegaconf import OmegaConf -from PIL import Image, ImageOps -from tqdm import tqdm, trange -from itertools import islice -from einops import rearrange -import time -from pytorch_lightning import seed_everything -from torch import autocast -from contextlib import nullcontext -from einops import rearrange, repeat -from ldmlib.util import instantiate_from_config -from optimizedSD.optimUtils import split_weighted_subprompts -from transformers import logging - -from gfpgan import GFPGANer -from basicsr.archs.rrdbnet_arch import RRDBNet -from realesrgan import RealESRGANer - -import uuid - -logging.set_verbosity_error() - -# consts -config_yaml = "optimizedSD/v1-inference.yaml" -filename_regex = re.compile('[^a-zA-Z0-9]') - -# api stuff -from sd_internal import Request, Response, Image as ResponseImage -import base64 -from io import BytesIO -#from colorama import Fore - -# local -stop_processing = False -temp_images = {} - -ckpt_file = None -gfpgan_file = None -real_esrgan_file = None - -model = None -modelCS = None -modelFS = None -model_gfpgan = None -model_real_esrgan = None - -model_is_half = False -model_fs_is_half = False -device = None -unet_bs = 1 -precision = 'autocast' -sampler_plms = None -sampler_ddim = None - -has_valid_gpu = False -force_full_precision = False -try: - gpu = torch.cuda.current_device() - gpu_name = torch.cuda.get_device_name(gpu) - print('GPU detected: ', gpu_name) - - force_full_precision = ('nvidia' in gpu_name.lower() or 'geforce' in gpu_name.lower()) and (' 1660' in gpu_name or ' 1650' in gpu_name) # otherwise these NVIDIA cards create green images - if force_full_precision: - print('forcing full precision on NVIDIA 16xx cards, to avoid green images. GPU detected: ', gpu_name) - - mem_free, mem_total = torch.cuda.mem_get_info(gpu) - mem_total /= float(10**9) - if mem_total < 3.0: - print("GPUs with less than 3 GB of VRAM are not compatible with Stable Diffusion") - raise Exception() - - has_valid_gpu = True -except: - print('WARNING: No compatible GPU found. Using the CPU, but this will be very slow!') - pass - -def load_model_ckpt(ckpt_to_use, device_to_use='cuda', turbo=False, unet_bs_to_use=1, precision_to_use='autocast'): - global ckpt_file, model, modelCS, modelFS, model_is_half, device, unet_bs, precision, model_fs_is_half - - device = device_to_use if has_valid_gpu else 'cpu' - precision = precision_to_use if not force_full_precision else 'full' - unet_bs = unet_bs_to_use - - unload_model() - - if device == 'cpu': - precision = 'full' - - sd = load_model_from_config(f"{ckpt_to_use}.ckpt") - li, lo = [], [] - for key, value in sd.items(): - sp = key.split(".") - if (sp[0]) == "model": - if "input_blocks" in sp: - li.append(key) - elif "middle_block" in sp: - li.append(key) - elif "time_embed" in sp: - li.append(key) - else: - lo.append(key) - for key in li: - sd["model1." + key[6:]] = sd.pop(key) - for key in lo: - sd["model2." + key[6:]] = sd.pop(key) - - config = OmegaConf.load(f"{config_yaml}") - - model = instantiate_from_config(config.modelUNet) - _, _ = model.load_state_dict(sd, strict=False) - model.eval() - model.cdevice = device - model.unet_bs = unet_bs - model.turbo = turbo - - modelCS = instantiate_from_config(config.modelCondStage) - _, _ = modelCS.load_state_dict(sd, strict=False) - modelCS.eval() - modelCS.cond_stage_model.device = device - - modelFS = instantiate_from_config(config.modelFirstStage) - _, _ = modelFS.load_state_dict(sd, strict=False) - modelFS.eval() - del sd - - if device != "cpu" and precision == "autocast": - model.half() - modelCS.half() - modelFS.half() - model_is_half = True - model_fs_is_half = True - else: - model_is_half = False - model_fs_is_half = False - - ckpt_file = ckpt_to_use - - print('loaded ', ckpt_file, 'to', device, 'precision', precision) - -def unload_model(): - global model, modelCS, modelFS - - if model is not None: - del model - del modelCS - del modelFS - - model = None - modelCS = None - modelFS = None - -def load_model_gfpgan(gfpgan_to_use): - global gfpgan_file, model_gfpgan - - if gfpgan_to_use is None: - return - - gfpgan_file = gfpgan_to_use - model_path = gfpgan_to_use + ".pth" - - if device == 'cpu': - model_gfpgan = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device('cpu')) - else: - model_gfpgan = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device('cuda')) - - print('loaded ', gfpgan_to_use, 'to', device, 'precision', precision) - -def load_model_real_esrgan(real_esrgan_to_use): - global real_esrgan_file, model_real_esrgan - - if real_esrgan_to_use is None: - return - - real_esrgan_file = real_esrgan_to_use - model_path = real_esrgan_to_use + ".pth" - - RealESRGAN_models = { - 'RealESRGAN_x4plus': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4), - 'RealESRGAN_x4plus_anime_6B': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) - } - - model_to_use = RealESRGAN_models[real_esrgan_to_use] - - if device == 'cpu': - model_real_esrgan = RealESRGANer(scale=2, model_path=model_path, model=model_to_use, pre_pad=0, half=False) # cpu does not support half - model_real_esrgan.device = torch.device('cpu') - model_real_esrgan.model.to('cpu') - else: - model_real_esrgan = RealESRGANer(scale=2, model_path=model_path, model=model_to_use, pre_pad=0, half=model_is_half) - - model_real_esrgan.model.name = real_esrgan_to_use - - print('loaded ', real_esrgan_to_use, 'to', device, 'precision', precision) - -def mk_img(req: Request): - try: - yield from do_mk_img(req) - except Exception as e: - print(traceback.format_exc()) - - gc() - - if device != "cpu": - modelFS.to("cpu") - modelCS.to("cpu") - - model.model1.to("cpu") - model.model2.to("cpu") - - gc() - - yield json.dumps({ - "status": 'failed', - "detail": str(e) - }) - -def do_mk_img(req: Request): - global ckpt_file - global model, modelCS, modelFS, device - global model_gfpgan, model_real_esrgan - global stop_processing - - stop_processing = False - - res = Response() - res.request = req - res.images = [] - - temp_images.clear() - - # custom model support: - # the req.use_stable_diffusion_model needs to be a valid path - # to the ckpt file (without the extension). - - needs_model_reload = False - ckpt_to_use = ckpt_file - if ckpt_to_use != req.use_stable_diffusion_model: - ckpt_to_use = req.use_stable_diffusion_model - needs_model_reload = True - - model.turbo = req.turbo - if req.use_cpu: - if device != 'cpu': - device = 'cpu' - - if model_is_half: - load_model_ckpt(ckpt_to_use, device) - needs_model_reload = False - - load_model_gfpgan(gfpgan_file) - load_model_real_esrgan(real_esrgan_file) - else: - if has_valid_gpu: - prev_device = device - device = 'cuda' - - if (precision == 'autocast' and (req.use_full_precision or not model_is_half)) or \ - (precision == 'full' and not req.use_full_precision and not force_full_precision): - - load_model_ckpt(ckpt_to_use, device, req.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast')) - needs_model_reload = False - - if prev_device != device: - load_model_gfpgan(gfpgan_file) - load_model_real_esrgan(real_esrgan_file) - - if needs_model_reload: - load_model_ckpt(ckpt_to_use, device, req.turbo, unet_bs, precision) - - if req.use_face_correction != gfpgan_file: - load_model_gfpgan(req.use_face_correction) - - if req.use_upscale != real_esrgan_file: - load_model_real_esrgan(req.use_upscale) - - model.cdevice = device - modelCS.cond_stage_model.device = device - - opt_prompt = req.prompt - opt_seed = req.seed - opt_n_samples = req.num_outputs - opt_n_iter = 1 - opt_scale = req.guidance_scale - opt_C = 4 - opt_H = req.height - opt_W = req.width - opt_f = 8 - opt_ddim_steps = req.num_inference_steps - opt_ddim_eta = 0.0 - opt_strength = req.prompt_strength - opt_save_to_disk_path = req.save_to_disk_path - opt_init_img = req.init_image - opt_use_face_correction = req.use_face_correction - opt_use_upscale = req.use_upscale - opt_show_only_filtered = req.show_only_filtered_image - opt_format = req.output_format - opt_sampler_name = req.sampler - - print(req.to_string(), '\n device', device) - - print('\n\n Using precision:', precision) - - seed_everything(opt_seed) - - batch_size = opt_n_samples - prompt = opt_prompt - assert prompt is not None - data = [batch_size * [prompt]] - - if precision == "autocast" and device != "cpu": - precision_scope = autocast - else: - precision_scope = nullcontext - - mask = None - - if req.init_image is None: - handler = _txt2img - - init_latent = None - t_enc = None - else: - handler = _img2img - - init_image = load_img(req.init_image, opt_W, opt_H) - init_image = init_image.to(device) - - if device != "cpu" and precision == "autocast": - init_image = init_image.half() - - modelFS.to(device) - - init_image = repeat(init_image, '1 ... -> b ...', b=batch_size) - init_latent = modelFS.get_first_stage_encoding(modelFS.encode_first_stage(init_image)) # move to latent space - - if req.mask is not None: - mask = load_mask(req.mask, opt_W, opt_H, init_latent.shape[2], init_latent.shape[3], True).to(device) - mask = mask[0][0].unsqueeze(0).repeat(4, 1, 1).unsqueeze(0) - mask = repeat(mask, '1 ... -> b ...', b=batch_size) - - if device != "cpu" and precision == "autocast": - mask = mask.half() - - move_fs_to_cpu() - - assert 0. <= opt_strength <= 1., 'can only work with strength in [0.0, 1.0]' - t_enc = int(opt_strength * opt_ddim_steps) - print(f"target t_enc is {t_enc} steps") - - if opt_save_to_disk_path is not None: - session_out_path = os.path.join(opt_save_to_disk_path, req.session_id) - os.makedirs(session_out_path, exist_ok=True) - else: - session_out_path = None - - seeds = "" - with torch.no_grad(): - for n in trange(opt_n_iter, desc="Sampling"): - for prompts in tqdm(data, desc="data"): - - with precision_scope("cuda"): - modelCS.to(device) - uc = None - if opt_scale != 1.0: - uc = modelCS.get_learned_conditioning(batch_size * [req.negative_prompt]) - if isinstance(prompts, tuple): - prompts = list(prompts) - - subprompts, weights = split_weighted_subprompts(prompts[0]) - if len(subprompts) > 1: - c = torch.zeros_like(uc) - totalWeight = sum(weights) - # normalize each "sub prompt" and add it - for i in range(len(subprompts)): - weight = weights[i] - # if not skip_normalize: - weight = weight / totalWeight - c = torch.add(c, modelCS.get_learned_conditioning(subprompts[i]), alpha=weight) - else: - c = modelCS.get_learned_conditioning(prompts) - - modelFS.to(device) - - partial_x_samples = None - def img_callback(x_samples, i): - nonlocal partial_x_samples - - partial_x_samples = x_samples - - if req.stream_progress_updates: - n_steps = opt_ddim_steps if req.init_image is None else t_enc - progress = {"step": i, "total_steps": n_steps} - - if req.stream_image_progress and i % 5 == 0: - partial_images = [] - - for i in range(batch_size): - x_samples_ddim = modelFS.decode_first_stage(x_samples[i].unsqueeze(0)) - x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) - x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c") - x_sample = x_sample.astype(np.uint8) - img = Image.fromarray(x_sample) - buf = BytesIO() - img.save(buf, format='JPEG') - buf.seek(0) - - del img, x_sample, x_samples_ddim - # don't delete x_samples, it is used in the code that called this callback - - temp_images[str(req.session_id) + '/' + str(i)] = buf - partial_images.append({'path': f'/image/tmp/{req.session_id}/{i}'}) - - progress['output'] = partial_images - - yield json.dumps(progress) - - if stop_processing: - raise UserInitiatedStop("User requested that we stop processing") - - # run the handler - try: - if handler == _txt2img: - x_samples = _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, opt_sampler_name) - else: - x_samples = _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask) - - yield from x_samples - - x_samples = partial_x_samples - except UserInitiatedStop: - if partial_x_samples is None: - continue - - x_samples = partial_x_samples - - print("saving images") - for i in range(batch_size): - - x_samples_ddim = modelFS.decode_first_stage(x_samples[i].unsqueeze(0)) - x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) - x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c") - x_sample = x_sample.astype(np.uint8) - img = Image.fromarray(x_sample) - - has_filters = (opt_use_face_correction is not None and opt_use_face_correction.startswith('GFPGAN')) or \ - (opt_use_upscale is not None and opt_use_upscale.startswith('RealESRGAN')) - - return_orig_img = not has_filters or not opt_show_only_filtered - - if stop_processing: - return_orig_img = True - - if opt_save_to_disk_path is not None: - prompt_flattened = filename_regex.sub('_', prompts[0]) - prompt_flattened = prompt_flattened[:50] - - img_id = str(uuid.uuid4())[-8:] - - file_path = f"{prompt_flattened}_{img_id}" - img_out_path = os.path.join(session_out_path, f"{file_path}.{opt_format}") - meta_out_path = os.path.join(session_out_path, f"{file_path}.txt") - - if return_orig_img: - save_image(img, img_out_path) - - save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_strength, opt_use_face_correction, opt_use_upscale, opt_sampler_name, req.negative_prompt, ckpt_file) - - if return_orig_img: - img_data = img_to_base64_str(img, opt_format) - res_image_orig = ResponseImage(data=img_data, seed=opt_seed) - res.images.append(res_image_orig) - - if opt_save_to_disk_path is not None: - res_image_orig.path_abs = img_out_path - - del img - - if has_filters and not stop_processing: - print('Applying filters..') - - gc() - filters_applied = [] - - if opt_use_face_correction: - _, _, output = model_gfpgan.enhance(x_sample[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True) - x_sample = output[:,:,::-1] - filters_applied.append(opt_use_face_correction) - - if opt_use_upscale: - output, _ = model_real_esrgan.enhance(x_sample[:,:,::-1]) - x_sample = output[:,:,::-1] - filters_applied.append(opt_use_upscale) - - filtered_image = Image.fromarray(x_sample) - - filtered_img_data = img_to_base64_str(filtered_image, opt_format) - res_image_filtered = ResponseImage(data=filtered_img_data, seed=opt_seed) - res.images.append(res_image_filtered) - - filters_applied = "_".join(filters_applied) - - if opt_save_to_disk_path is not None: - filtered_img_out_path = os.path.join(session_out_path, f"{file_path}_{filters_applied}.{opt_format}") - save_image(filtered_image, filtered_img_out_path) - res_image_filtered.path_abs = filtered_img_out_path - - del filtered_image - - seeds += str(opt_seed) + "," - opt_seed += 1 - - move_fs_to_cpu() - gc() - del x_samples, x_samples_ddim, x_sample - print("memory_final = ", torch.cuda.memory_allocated() / 1e6) - - print('Task completed') - - yield json.dumps(res.json()) - -def save_image(img, img_out_path): - try: - img.save(img_out_path) - except: - print('could not save the file', traceback.format_exc()) - -def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_prompt_strength, opt_correct_face, opt_upscale, sampler_name, negative_prompt, ckpt_file): - metadata = f"{prompts[0]}\nWidth: {opt_W}\nHeight: {opt_H}\nSeed: {opt_seed}\nSteps: {opt_ddim_steps}\nGuidance Scale: {opt_scale}\nPrompt Strength: {opt_prompt_strength}\nUse Face Correction: {opt_correct_face}\nUse Upscaling: {opt_upscale}\nSampler: {sampler_name}\nNegative Prompt: {negative_prompt}\nStable Diffusion Model: {ckpt_file + '.ckpt'}" - - try: - with open(meta_out_path, 'w') as f: - f.write(metadata) - except: - print('could not save the file', traceback.format_exc()) - -def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, sampler_name): - shape = [opt_n_samples, opt_C, opt_H // opt_f, opt_W // opt_f] - - if device != "cpu": - mem = torch.cuda.memory_allocated() / 1e6 - modelCS.to("cpu") - while torch.cuda.memory_allocated() / 1e6 >= mem: - time.sleep(1) - - if sampler_name == 'ddim': - model.make_schedule(ddim_num_steps=opt_ddim_steps, ddim_eta=opt_ddim_eta, verbose=False) - - samples_ddim = model.sample( - S=opt_ddim_steps, - conditioning=c, - seed=opt_seed, - shape=shape, - verbose=False, - unconditional_guidance_scale=opt_scale, - unconditional_conditioning=uc, - eta=opt_ddim_eta, - x_T=start_code, - img_callback=img_callback, - mask=mask, - sampler = sampler_name, - ) - - yield from samples_ddim - -def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask): - # encode (scaled latent) - z_enc = model.stochastic_encode( - init_latent, - torch.tensor([t_enc] * batch_size).to(device), - opt_seed, - opt_ddim_eta, - opt_ddim_steps, - ) - x_T = None if mask is None else init_latent - - # decode it - samples_ddim = model.sample( - t_enc, - c, - z_enc, - unconditional_guidance_scale=opt_scale, - unconditional_conditioning=uc, - img_callback=img_callback, - mask=mask, - x_T=x_T, - sampler = 'ddim' - ) - - yield from samples_ddim - -def move_fs_to_cpu(): - if device != "cpu": - mem = torch.cuda.memory_allocated() / 1e6 - modelFS.to("cpu") - while torch.cuda.memory_allocated() / 1e6 >= mem: - time.sleep(1) - -def gc(): - if device == 'cpu': - return - - torch.cuda.empty_cache() - torch.cuda.ipc_collect() - -# internal - -def chunk(it, size): - it = iter(it) - return iter(lambda: tuple(islice(it, size)), ()) - - -def load_model_from_config(ckpt, verbose=False): - print(f"Loading model from {ckpt}") - pl_sd = torch.load(ckpt, map_location="cpu") - if "global_step" in pl_sd: - print(f"Global Step: {pl_sd['global_step']}") - sd = pl_sd["state_dict"] - return sd - -# utils -class UserInitiatedStop(Exception): - pass - -def load_img(img_str, w0, h0): - image = base64_str_to_img(img_str).convert("RGB") - w, h = image.size - print(f"loaded input image of size ({w}, {h}) from base64") - if h0 is not None and w0 is not None: - h, w = h0, w0 - - w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64 - image = image.resize((w, h), resample=Image.Resampling.LANCZOS) - image = np.array(image).astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - image = torch.from_numpy(image) - return 2.*image - 1. - -def load_mask(mask_str, h0, w0, newH, newW, invert=False): - image = base64_str_to_img(mask_str).convert("RGB") - w, h = image.size - print(f"loaded input mask of size ({w}, {h})") - - if invert: - print("inverted") - image = ImageOps.invert(image) - # where_0, where_1 = np.where(image == 0), np.where(image == 255) - # image[where_0], image[where_1] = 255, 0 - - if h0 is not None and w0 is not None: - h, w = h0, w0 - - w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64 - - print(f"New mask size ({w}, {h})") - image = image.resize((newW, newH), resample=Image.Resampling.LANCZOS) - image = np.array(image) - - image = image.astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - image = torch.from_numpy(image) - return image - -# https://stackoverflow.com/a/61114178 -def img_to_base64_str(img, output_format="PNG"): - buffered = BytesIO() - img.save(buffered, format=output_format) - buffered.seek(0) - img_byte = buffered.getvalue() - img_str = "data:image/png;base64," + base64.b64encode(img_byte).decode() - return img_str - -def base64_str_to_img(img_str): - img_str = img_str[len("data:image/png;base64,"):] - data = base64.b64decode(img_str) - buffered = BytesIO(data) - img = Image.open(buffered) - return img diff --git a/spaces/jonas/sdg-policy-tracing/analyse_site.py b/spaces/jonas/sdg-policy-tracing/analyse_site.py deleted file mode 100644 index c6d702c678e16023d9b0d6613f5dbef2ce6b7f93..0000000000000000000000000000000000000000 --- a/spaces/jonas/sdg-policy-tracing/analyse_site.py +++ /dev/null @@ -1,43 +0,0 @@ -import streamlit as st - -import glob, os, sys; sys.path.append('/src') -#import helper -from src import preprocessing as pre -from src import cleaning as clean - -def app(): - # Sidebar - st.sidebar.title('Analyse Policy Document') - - # Container - with st.container(): - st.markdown("

      SDSN X GIZ Policy Tracing

      ", - unsafe_allow_html=True) - - file = st.file_uploader('Upload PDF File', type=['pdf', 'docx', 'txt']) - - if file is not None: - st.write("Filename: ", file.name) - # text = [] - # with pdfplumber.open(file) as pdf: - # for page in pdf.pages: - # text.append(page.extract_text()) - # text_str = ' '.join([page for page in text]) - - # st.write('Number of pages:',len(pdf.pages)) - - # load document - docs = pre.load_document(file) - - # preprocess document - docs_processed, df, all_text = clean.preprocessing(docs) - - - - st.write('... ') - - else: - st.write(' ') - st.write(' ') - st.markdown("

      no PDF uploaded ...

      ", - unsafe_allow_html=True) diff --git a/spaces/jone/GFPGAN/setup.py b/spaces/jone/GFPGAN/setup.py deleted file mode 100644 index 474e9188aa2dc5c19614921760ce4ad99bd19c13..0000000000000000000000000000000000000000 --- a/spaces/jone/GFPGAN/setup.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python - -from setuptools import find_packages, setup - -import os -import subprocess -import time - -version_file = 'gfpgan/version.py' - - -def readme(): - with open('README.md', encoding='utf-8') as f: - content = f.read() - return content - - -def get_git_hash(): - - def _minimal_ext_cmd(cmd): - # construct minimal environment - env = {} - for k in ['SYSTEMROOT', 'PATH', 'HOME']: - v = os.environ.get(k) - if v is not None: - env[k] = v - # LANGUAGE is used on win32 - env['LANGUAGE'] = 'C' - env['LANG'] = 'C' - env['LC_ALL'] = 'C' - out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] - return out - - try: - out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) - sha = out.strip().decode('ascii') - except OSError: - sha = 'unknown' - - return sha - - -def get_hash(): - if os.path.exists('.git'): - sha = get_git_hash()[:7] - else: - sha = 'unknown' - - return sha - - -def write_version_py(): - content = """# GENERATED VERSION FILE -# TIME: {} -__version__ = '{}' -__gitsha__ = '{}' -version_info = ({}) -""" - sha = get_hash() - with open('VERSION', 'r') as f: - SHORT_VERSION = f.read().strip() - VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')]) - - version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO) - with open(version_file, 'w') as f: - f.write(version_file_str) - - -def get_version(): - with open(version_file, 'r') as f: - exec(compile(f.read(), version_file, 'exec')) - return locals()['__version__'] - - -def get_requirements(filename='requirements.txt'): - here = os.path.dirname(os.path.realpath(__file__)) - with open(os.path.join(here, filename), 'r') as f: - requires = [line.replace('\n', '') for line in f.readlines()] - return requires - - -if __name__ == '__main__': - write_version_py() - setup( - name='gfpgan', - version=get_version(), - description='GFPGAN aims at developing Practical Algorithms for Real-world Face Restoration', - long_description=readme(), - long_description_content_type='text/markdown', - author='Xintao Wang', - author_email='xintao.wang@outlook.com', - keywords='computer vision, pytorch, image restoration, super-resolution, face restoration, gan, gfpgan', - url='https://github.com/TencentARC/GFPGAN', - include_package_data=True, - packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')), - classifiers=[ - 'Development Status :: 4 - Beta', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - ], - license='Apache License Version 2.0', - setup_requires=['cython', 'numpy'], - install_requires=get_requirements(), - zip_safe=False) diff --git a/spaces/joshen/gpt-academic/crazy_functions/test_project/cpp/libJPG/jpgd.cpp b/spaces/joshen/gpt-academic/crazy_functions/test_project/cpp/libJPG/jpgd.cpp deleted file mode 100644 index 36d06c8e9068570c3e7624895d474f33dbfe3d29..0000000000000000000000000000000000000000 --- a/spaces/joshen/gpt-academic/crazy_functions/test_project/cpp/libJPG/jpgd.cpp +++ /dev/null @@ -1,3276 +0,0 @@ -// jpgd.cpp - C++ class for JPEG decompression. -// Public domain, Rich Geldreich -// Last updated Apr. 16, 2011 -// Alex Evans: Linear memory allocator (taken from jpge.h). -// -// Supports progressive and baseline sequential JPEG image files, and the most common chroma subsampling factors: Y, H1V1, H2V1, H1V2, and H2V2. -// -// Chroma upsampling quality: H2V2 is upsampled in the frequency domain, H2V1 and H1V2 are upsampled using point sampling. -// Chroma upsampling reference: "Fast Scheme for Image Size Change in the Compressed Domain" -// http://vision.ai.uiuc.edu/~dugad/research/dct/index.html - -#include "jpgd.h" -#include - -#include -// BEGIN EPIC MOD -#define JPGD_ASSERT(x) { assert(x); CA_ASSUME(x); } (void)0 -// END EPIC MOD - -#ifdef _MSC_VER -#pragma warning (disable : 4611) // warning C4611: interaction between '_setjmp' and C++ object destruction is non-portable -#endif - -// Set to 1 to enable freq. domain chroma upsampling on images using H2V2 subsampling (0=faster nearest neighbor sampling). -// This is slower, but results in higher quality on images with highly saturated colors. -#define JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING 1 - -#define JPGD_TRUE (1) -#define JPGD_FALSE (0) - -#define JPGD_MAX(a,b) (((a)>(b)) ? (a) : (b)) -#define JPGD_MIN(a,b) (((a)<(b)) ? (a) : (b)) - -namespace jpgd { - - static inline void *jpgd_malloc(size_t nSize) { return FMemory::Malloc(nSize); } - static inline void jpgd_free(void *p) { FMemory::Free(p); } - -// BEGIN EPIC MOD -//@UE3 - use UE3 BGRA encoding instead of assuming RGBA - // stolen from IImageWrapper.h - enum ERGBFormatJPG - { - Invalid = -1, - RGBA = 0, - BGRA = 1, - Gray = 2, - }; - static ERGBFormatJPG jpg_format; -// END EPIC MOD - - // DCT coefficients are stored in this sequence. - static int g_ZAG[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 }; - - enum JPEG_MARKER - { - M_SOF0 = 0xC0, M_SOF1 = 0xC1, M_SOF2 = 0xC2, M_SOF3 = 0xC3, M_SOF5 = 0xC5, M_SOF6 = 0xC6, M_SOF7 = 0xC7, M_JPG = 0xC8, - M_SOF9 = 0xC9, M_SOF10 = 0xCA, M_SOF11 = 0xCB, M_SOF13 = 0xCD, M_SOF14 = 0xCE, M_SOF15 = 0xCF, M_DHT = 0xC4, M_DAC = 0xCC, - M_RST0 = 0xD0, M_RST1 = 0xD1, M_RST2 = 0xD2, M_RST3 = 0xD3, M_RST4 = 0xD4, M_RST5 = 0xD5, M_RST6 = 0xD6, M_RST7 = 0xD7, - M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_DNL = 0xDC, M_DRI = 0xDD, M_DHP = 0xDE, M_EXP = 0xDF, - M_APP0 = 0xE0, M_APP15 = 0xEF, M_JPG0 = 0xF0, M_JPG13 = 0xFD, M_COM = 0xFE, M_TEM = 0x01, M_ERROR = 0x100, RST0 = 0xD0 - }; - - enum JPEG_SUBSAMPLING { JPGD_GRAYSCALE = 0, JPGD_YH1V1, JPGD_YH2V1, JPGD_YH1V2, JPGD_YH2V2 }; - -#define CONST_BITS 13 -#define PASS1_BITS 2 -#define SCALEDONE ((int32)1) - -#define FIX_0_298631336 ((int32)2446) /* FIX(0.298631336) */ -#define FIX_0_390180644 ((int32)3196) /* FIX(0.390180644) */ -#define FIX_0_541196100 ((int32)4433) /* FIX(0.541196100) */ -#define FIX_0_765366865 ((int32)6270) /* FIX(0.765366865) */ -#define FIX_0_899976223 ((int32)7373) /* FIX(0.899976223) */ -#define FIX_1_175875602 ((int32)9633) /* FIX(1.175875602) */ -#define FIX_1_501321110 ((int32)12299) /* FIX(1.501321110) */ -#define FIX_1_847759065 ((int32)15137) /* FIX(1.847759065) */ -#define FIX_1_961570560 ((int32)16069) /* FIX(1.961570560) */ -#define FIX_2_053119869 ((int32)16819) /* FIX(2.053119869) */ -#define FIX_2_562915447 ((int32)20995) /* FIX(2.562915447) */ -#define FIX_3_072711026 ((int32)25172) /* FIX(3.072711026) */ - -#define DESCALE(x,n) (((x) + (SCALEDONE << ((n)-1))) >> (n)) -#define DESCALE_ZEROSHIFT(x,n) (((x) + (128 << (n)) + (SCALEDONE << ((n)-1))) >> (n)) - -#define MULTIPLY(var, cnst) ((var) * (cnst)) - -#define CLAMP(i) ((static_cast(i) > 255) ? (((~i) >> 31) & 0xFF) : (i)) - - // Compiler creates a fast path 1D IDCT for X non-zero columns - template - struct Row - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { - // ACCESS_COL() will be optimized at compile time to either an array access, or 0. -#define ACCESS_COL(x) (((x) < NONZERO_COLS) ? (int)pSrc[x] : 0) - - const int z2 = ACCESS_COL(2), z3 = ACCESS_COL(6); - - const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100); - const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); - const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); - - const int tmp0 = (ACCESS_COL(0) + ACCESS_COL(4)) << CONST_BITS; - const int tmp1 = (ACCESS_COL(0) - ACCESS_COL(4)) << CONST_BITS; - - const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2; - - const int atmp0 = ACCESS_COL(7), atmp1 = ACCESS_COL(5), atmp2 = ACCESS_COL(3), atmp3 = ACCESS_COL(1); - - const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3; - const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602); - - const int az1 = MULTIPLY(bz1, - FIX_0_899976223); - const int az2 = MULTIPLY(bz2, - FIX_2_562915447); - const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5; - const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5; - - const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3; - const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4; - const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3; - const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4; - - pTemp[0] = DESCALE(tmp10 + btmp3, CONST_BITS-PASS1_BITS); - pTemp[7] = DESCALE(tmp10 - btmp3, CONST_BITS-PASS1_BITS); - pTemp[1] = DESCALE(tmp11 + btmp2, CONST_BITS-PASS1_BITS); - pTemp[6] = DESCALE(tmp11 - btmp2, CONST_BITS-PASS1_BITS); - pTemp[2] = DESCALE(tmp12 + btmp1, CONST_BITS-PASS1_BITS); - pTemp[5] = DESCALE(tmp12 - btmp1, CONST_BITS-PASS1_BITS); - pTemp[3] = DESCALE(tmp13 + btmp0, CONST_BITS-PASS1_BITS); - pTemp[4] = DESCALE(tmp13 - btmp0, CONST_BITS-PASS1_BITS); - } - }; - - template <> - struct Row<0> - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { -#ifdef _MSC_VER - pTemp; pSrc; -#endif - } - }; - - template <> - struct Row<1> - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { - const int dcval = (pSrc[0] << PASS1_BITS); - - pTemp[0] = dcval; - pTemp[1] = dcval; - pTemp[2] = dcval; - pTemp[3] = dcval; - pTemp[4] = dcval; - pTemp[5] = dcval; - pTemp[6] = dcval; - pTemp[7] = dcval; - } - }; - - // Compiler creates a fast path 1D IDCT for X non-zero rows - template - struct Col - { - static void idct(uint8* pDst_ptr, const int* pTemp) - { - // ACCESS_ROW() will be optimized at compile time to either an array access, or 0. -#define ACCESS_ROW(x) (((x) < NONZERO_ROWS) ? pTemp[x * 8] : 0) - - const int z2 = ACCESS_ROW(2); - const int z3 = ACCESS_ROW(6); - - const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100); - const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); - const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); - - const int tmp0 = (ACCESS_ROW(0) + ACCESS_ROW(4)) << CONST_BITS; - const int tmp1 = (ACCESS_ROW(0) - ACCESS_ROW(4)) << CONST_BITS; - - const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2; - - const int atmp0 = ACCESS_ROW(7), atmp1 = ACCESS_ROW(5), atmp2 = ACCESS_ROW(3), atmp3 = ACCESS_ROW(1); - - const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3; - const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602); - - const int az1 = MULTIPLY(bz1, - FIX_0_899976223); - const int az2 = MULTIPLY(bz2, - FIX_2_562915447); - const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5; - const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5; - - const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3; - const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4; - const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3; - const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4; - - int i = DESCALE_ZEROSHIFT(tmp10 + btmp3, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*0] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp10 - btmp3, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*7] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp11 + btmp2, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*1] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp11 - btmp2, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*6] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp12 + btmp1, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*2] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp12 - btmp1, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*5] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp13 + btmp0, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*3] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp13 - btmp0, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*4] = (uint8)CLAMP(i); - } - }; - - template <> - struct Col<1> - { - static void idct(uint8* pDst_ptr, const int* pTemp) - { - int dcval = DESCALE_ZEROSHIFT(pTemp[0], PASS1_BITS+3); - const uint8 dcval_clamped = (uint8)CLAMP(dcval); - pDst_ptr[0*8] = dcval_clamped; - pDst_ptr[1*8] = dcval_clamped; - pDst_ptr[2*8] = dcval_clamped; - pDst_ptr[3*8] = dcval_clamped; - pDst_ptr[4*8] = dcval_clamped; - pDst_ptr[5*8] = dcval_clamped; - pDst_ptr[6*8] = dcval_clamped; - pDst_ptr[7*8] = dcval_clamped; - } - }; - - static const uint8 s_idct_row_table[] = - { - 1,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0, 2,1,0,0,0,0,0,0, 2,1,1,0,0,0,0,0, 2,2,1,0,0,0,0,0, 3,2,1,0,0,0,0,0, 4,2,1,0,0,0,0,0, 4,3,1,0,0,0,0,0, - 4,3,2,0,0,0,0,0, 4,3,2,1,0,0,0,0, 4,3,2,1,1,0,0,0, 4,3,2,2,1,0,0,0, 4,3,3,2,1,0,0,0, 4,4,3,2,1,0,0,0, 5,4,3,2,1,0,0,0, 6,4,3,2,1,0,0,0, - 6,5,3,2,1,0,0,0, 6,5,4,2,1,0,0,0, 6,5,4,3,1,0,0,0, 6,5,4,3,2,0,0,0, 6,5,4,3,2,1,0,0, 6,5,4,3,2,1,1,0, 6,5,4,3,2,2,1,0, 6,5,4,3,3,2,1,0, - 6,5,4,4,3,2,1,0, 6,5,5,4,3,2,1,0, 6,6,5,4,3,2,1,0, 7,6,5,4,3,2,1,0, 8,6,5,4,3,2,1,0, 8,7,5,4,3,2,1,0, 8,7,6,4,3,2,1,0, 8,7,6,5,3,2,1,0, - 8,7,6,5,4,2,1,0, 8,7,6,5,4,3,1,0, 8,7,6,5,4,3,2,0, 8,7,6,5,4,3,2,1, 8,7,6,5,4,3,2,2, 8,7,6,5,4,3,3,2, 8,7,6,5,4,4,3,2, 8,7,6,5,5,4,3,2, - 8,7,6,6,5,4,3,2, 8,7,7,6,5,4,3,2, 8,8,7,6,5,4,3,2, 8,8,8,6,5,4,3,2, 8,8,8,7,5,4,3,2, 8,8,8,7,6,4,3,2, 8,8,8,7,6,5,3,2, 8,8,8,7,6,5,4,2, - 8,8,8,7,6,5,4,3, 8,8,8,7,6,5,4,4, 8,8,8,7,6,5,5,4, 8,8,8,7,6,6,5,4, 8,8,8,7,7,6,5,4, 8,8,8,8,7,6,5,4, 8,8,8,8,8,6,5,4, 8,8,8,8,8,7,5,4, - 8,8,8,8,8,7,6,4, 8,8,8,8,8,7,6,5, 8,8,8,8,8,7,6,6, 8,8,8,8,8,7,7,6, 8,8,8,8,8,8,7,6, 8,8,8,8,8,8,8,6, 8,8,8,8,8,8,8,7, 8,8,8,8,8,8,8,8, - }; - - static const uint8 s_idct_col_table[] = { 1, 1, 2, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; - - void idct(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr, int block_max_zag) - { - JPGD_ASSERT(block_max_zag >= 1); - JPGD_ASSERT(block_max_zag <= 64); - - if (block_max_zag == 1) - { - int k = ((pSrc_ptr[0] + 4) >> 3) + 128; - k = CLAMP(k); - k = k | (k<<8); - k = k | (k<<16); - - for (int i = 8; i > 0; i--) - { - *(int*)&pDst_ptr[0] = k; - *(int*)&pDst_ptr[4] = k; - pDst_ptr += 8; - } - return; - } - - int temp[64]; - - const jpgd_block_t* pSrc = pSrc_ptr; - int* pTemp = temp; - - const uint8* pRow_tab = &s_idct_row_table[(block_max_zag - 1) * 8]; - int i; - for (i = 8; i > 0; i--, pRow_tab++) - { - switch (*pRow_tab) - { - case 0: Row<0>::idct(pTemp, pSrc); break; - case 1: Row<1>::idct(pTemp, pSrc); break; - case 2: Row<2>::idct(pTemp, pSrc); break; - case 3: Row<3>::idct(pTemp, pSrc); break; - case 4: Row<4>::idct(pTemp, pSrc); break; - case 5: Row<5>::idct(pTemp, pSrc); break; - case 6: Row<6>::idct(pTemp, pSrc); break; - case 7: Row<7>::idct(pTemp, pSrc); break; - case 8: Row<8>::idct(pTemp, pSrc); break; - } - - pSrc += 8; - pTemp += 8; - } - - pTemp = temp; - - const int nonzero_rows = s_idct_col_table[block_max_zag - 1]; - for (i = 8; i > 0; i--) - { - switch (nonzero_rows) - { - case 1: Col<1>::idct(pDst_ptr, pTemp); break; - case 2: Col<2>::idct(pDst_ptr, pTemp); break; - case 3: Col<3>::idct(pDst_ptr, pTemp); break; - case 4: Col<4>::idct(pDst_ptr, pTemp); break; - case 5: Col<5>::idct(pDst_ptr, pTemp); break; - case 6: Col<6>::idct(pDst_ptr, pTemp); break; - case 7: Col<7>::idct(pDst_ptr, pTemp); break; - case 8: Col<8>::idct(pDst_ptr, pTemp); break; - } - - pTemp++; - pDst_ptr++; - } - } - - void idct_4x4(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr) - { - int temp[64]; - int* pTemp = temp; - const jpgd_block_t* pSrc = pSrc_ptr; - - for (int i = 4; i > 0; i--) - { - Row<4>::idct(pTemp, pSrc); - pSrc += 8; - pTemp += 8; - } - - pTemp = temp; - for (int i = 8; i > 0; i--) - { - Col<4>::idct(pDst_ptr, pTemp); - pTemp++; - pDst_ptr++; - } - } - - // Retrieve one character from the input stream. - inline uint jpeg_decoder::get_char() - { - // Any bytes remaining in buffer? - if (!m_in_buf_left) - { - // Try to get more bytes. - prep_in_buffer(); - // Still nothing to get? - if (!m_in_buf_left) - { - // Pad the end of the stream with 0xFF 0xD9 (EOI marker) - int t = m_tem_flag; - m_tem_flag ^= 1; - if (t) - return 0xD9; - else - return 0xFF; - } - } - - uint c = *m_pIn_buf_ofs++; - m_in_buf_left--; - - return c; - } - - // Same as previous method, except can indicate if the character is a pad character or not. - inline uint jpeg_decoder::get_char(bool *pPadding_flag) - { - if (!m_in_buf_left) - { - prep_in_buffer(); - if (!m_in_buf_left) - { - *pPadding_flag = true; - int t = m_tem_flag; - m_tem_flag ^= 1; - if (t) - return 0xD9; - else - return 0xFF; - } - } - - *pPadding_flag = false; - - uint c = *m_pIn_buf_ofs++; - m_in_buf_left--; - - return c; - } - - // Inserts a previously retrieved character back into the input buffer. - inline void jpeg_decoder::stuff_char(uint8 q) - { - *(--m_pIn_buf_ofs) = q; - m_in_buf_left++; - } - - // Retrieves one character from the input stream, but does not read past markers. Will continue to return 0xFF when a marker is encountered. - inline uint8 jpeg_decoder::get_octet() - { - bool padding_flag; - int c = get_char(&padding_flag); - - if (c == 0xFF) - { - if (padding_flag) - return 0xFF; - - c = get_char(&padding_flag); - if (padding_flag) - { - stuff_char(0xFF); - return 0xFF; - } - - if (c == 0x00) - return 0xFF; - else - { - stuff_char(static_cast(c)); - stuff_char(0xFF); - return 0xFF; - } - } - - return static_cast(c); - } - - // Retrieves a variable number of bits from the input stream. Does not recognize markers. - inline uint jpeg_decoder::get_bits(int num_bits) - { - if (!num_bits) - return 0; - - uint i = m_bit_buf >> (32 - num_bits); - - if ((m_bits_left -= num_bits) <= 0) - { - m_bit_buf <<= (num_bits += m_bits_left); - - uint c1 = get_char(); - uint c2 = get_char(); - m_bit_buf = (m_bit_buf & 0xFFFF0000) | (c1 << 8) | c2; - - m_bit_buf <<= -m_bits_left; - - m_bits_left += 16; - - JPGD_ASSERT(m_bits_left >= 0); - } - else - m_bit_buf <<= num_bits; - - return i; - } - - // Retrieves a variable number of bits from the input stream. Markers will not be read into the input bit buffer. Instead, an infinite number of all 1's will be returned when a marker is encountered. - inline uint jpeg_decoder::get_bits_no_markers(int num_bits) - { - if (!num_bits) - return 0; - - uint i = m_bit_buf >> (32 - num_bits); - - if ((m_bits_left -= num_bits) <= 0) - { - m_bit_buf <<= (num_bits += m_bits_left); - - if ((m_in_buf_left < 2) || (m_pIn_buf_ofs[0] == 0xFF) || (m_pIn_buf_ofs[1] == 0xFF)) - { - uint c1 = get_octet(); - uint c2 = get_octet(); - m_bit_buf |= (c1 << 8) | c2; - } - else - { - m_bit_buf |= ((uint)m_pIn_buf_ofs[0] << 8) | m_pIn_buf_ofs[1]; - m_in_buf_left -= 2; - m_pIn_buf_ofs += 2; - } - - m_bit_buf <<= -m_bits_left; - - m_bits_left += 16; - - JPGD_ASSERT(m_bits_left >= 0); - } - else - m_bit_buf <<= num_bits; - - return i; - } - - // Decodes a Huffman encoded symbol. - inline int jpeg_decoder::huff_decode(huff_tables *pH) - { - int symbol; - - // Check first 8-bits: do we have a complete symbol? - if ((symbol = pH->look_up[m_bit_buf >> 24]) < 0) - { - // Decode more bits, use a tree traversal to find symbol. - int ofs = 23; - do - { - symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))]; - ofs--; - } while (symbol < 0); - - get_bits_no_markers(8 + (23 - ofs)); - } - else - get_bits_no_markers(pH->code_size[symbol]); - - return symbol; - } - - // Decodes a Huffman encoded symbol. - inline int jpeg_decoder::huff_decode(huff_tables *pH, int& extra_bits) - { - int symbol; - - // Check first 8-bits: do we have a complete symbol? - if ((symbol = pH->look_up2[m_bit_buf >> 24]) < 0) - { - // Use a tree traversal to find symbol. - int ofs = 23; - do - { - symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))]; - ofs--; - } while (symbol < 0); - - get_bits_no_markers(8 + (23 - ofs)); - - extra_bits = get_bits_no_markers(symbol & 0xF); - } - else - { - JPGD_ASSERT(((symbol >> 8) & 31) == pH->code_size[symbol & 255] + ((symbol & 0x8000) ? (symbol & 15) : 0)); - - if (symbol & 0x8000) - { - get_bits_no_markers((symbol >> 8) & 31); - extra_bits = symbol >> 16; - } - else - { - int code_size = (symbol >> 8) & 31; - int num_extra_bits = symbol & 0xF; - int bits = code_size + num_extra_bits; - if (bits <= (m_bits_left + 16)) - extra_bits = get_bits_no_markers(bits) & ((1 << num_extra_bits) - 1); - else - { - get_bits_no_markers(code_size); - extra_bits = get_bits_no_markers(num_extra_bits); - } - } - - symbol &= 0xFF; - } - - return symbol; - } - - // Tables and macro used to fully decode the DPCM differences. - static const int s_extend_test[16] = { 0, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000 }; - static const int s_extend_offset[16] = { 0, -1, -3, -7, -15, -31, -63, -127, -255, -511, -1023, -2047, -4095, -8191, -16383, -32767 }; - static const int s_extend_mask[] = { 0, (1<<0), (1<<1), (1<<2), (1<<3), (1<<4), (1<<5), (1<<6), (1<<7), (1<<8), (1<<9), (1<<10), (1<<11), (1<<12), (1<<13), (1<<14), (1<<15), (1<<16) }; -#define HUFF_EXTEND(x,s) ((x) < s_extend_test[s] ? (x) + s_extend_offset[s] : (x)) - - // Clamps a value between 0-255. - inline uint8 jpeg_decoder::clamp(int i) - { - if (static_cast(i) > 255) - i = (((~i) >> 31) & 0xFF); - - return static_cast(i); - } - - namespace DCT_Upsample - { - struct Matrix44 - { - typedef int Element_Type; - enum { NUM_ROWS = 4, NUM_COLS = 4 }; - - Element_Type v[NUM_ROWS][NUM_COLS]; - - inline int rows() const { return NUM_ROWS; } - inline int cols() const { return NUM_COLS; } - - inline const Element_Type & at(int r, int c) const { return v[r][c]; } - inline Element_Type & at(int r, int c) { return v[r][c]; } - - inline Matrix44() { } - - inline Matrix44& operator += (const Matrix44& a) - { - for (int r = 0; r < NUM_ROWS; r++) - { - at(r, 0) += a.at(r, 0); - at(r, 1) += a.at(r, 1); - at(r, 2) += a.at(r, 2); - at(r, 3) += a.at(r, 3); - } - return *this; - } - - inline Matrix44& operator -= (const Matrix44& a) - { - for (int r = 0; r < NUM_ROWS; r++) - { - at(r, 0) -= a.at(r, 0); - at(r, 1) -= a.at(r, 1); - at(r, 2) -= a.at(r, 2); - at(r, 3) -= a.at(r, 3); - } - return *this; - } - - friend inline Matrix44 operator + (const Matrix44& a, const Matrix44& b) - { - Matrix44 ret; - for (int r = 0; r < NUM_ROWS; r++) - { - ret.at(r, 0) = a.at(r, 0) + b.at(r, 0); - ret.at(r, 1) = a.at(r, 1) + b.at(r, 1); - ret.at(r, 2) = a.at(r, 2) + b.at(r, 2); - ret.at(r, 3) = a.at(r, 3) + b.at(r, 3); - } - return ret; - } - - friend inline Matrix44 operator - (const Matrix44& a, const Matrix44& b) - { - Matrix44 ret; - for (int r = 0; r < NUM_ROWS; r++) - { - ret.at(r, 0) = a.at(r, 0) - b.at(r, 0); - ret.at(r, 1) = a.at(r, 1) - b.at(r, 1); - ret.at(r, 2) = a.at(r, 2) - b.at(r, 2); - ret.at(r, 3) = a.at(r, 3) - b.at(r, 3); - } - return ret; - } - - static inline void add_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b) - { - for (int r = 0; r < 4; r++) - { - pDst[0*8 + r] = static_cast(a.at(r, 0) + b.at(r, 0)); - pDst[1*8 + r] = static_cast(a.at(r, 1) + b.at(r, 1)); - pDst[2*8 + r] = static_cast(a.at(r, 2) + b.at(r, 2)); - pDst[3*8 + r] = static_cast(a.at(r, 3) + b.at(r, 3)); - } - } - - static inline void sub_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b) - { - for (int r = 0; r < 4; r++) - { - pDst[0*8 + r] = static_cast(a.at(r, 0) - b.at(r, 0)); - pDst[1*8 + r] = static_cast(a.at(r, 1) - b.at(r, 1)); - pDst[2*8 + r] = static_cast(a.at(r, 2) - b.at(r, 2)); - pDst[3*8 + r] = static_cast(a.at(r, 3) - b.at(r, 3)); - } - } - }; - - const int FRACT_BITS = 10; - const int SCALE = 1 << FRACT_BITS; - - typedef int Temp_Type; -#define D(i) (((i) + (SCALE >> 1)) >> FRACT_BITS) -#define F(i) ((int)((i) * SCALE + .5f)) - - // Any decent C++ compiler will optimize this at compile time to a 0, or an array access. -#define AT(c, r) ((((c)>=NUM_COLS)||((r)>=NUM_ROWS)) ? 0 : pSrc[(c)+(r)*8]) - - // NUM_ROWS/NUM_COLS = # of non-zero rows/cols in input matrix - template - struct P_Q - { - static void calc(Matrix44& P, Matrix44& Q, const jpgd_block_t* pSrc) - { - // 4x8 = 4x8 times 8x8, matrix 0 is constant - const Temp_Type X000 = AT(0, 0); - const Temp_Type X001 = AT(0, 1); - const Temp_Type X002 = AT(0, 2); - const Temp_Type X003 = AT(0, 3); - const Temp_Type X004 = AT(0, 4); - const Temp_Type X005 = AT(0, 5); - const Temp_Type X006 = AT(0, 6); - const Temp_Type X007 = AT(0, 7); - const Temp_Type X010 = D(F(0.415735f) * AT(1, 0) + F(0.791065f) * AT(3, 0) + F(-0.352443f) * AT(5, 0) + F(0.277785f) * AT(7, 0)); - const Temp_Type X011 = D(F(0.415735f) * AT(1, 1) + F(0.791065f) * AT(3, 1) + F(-0.352443f) * AT(5, 1) + F(0.277785f) * AT(7, 1)); - const Temp_Type X012 = D(F(0.415735f) * AT(1, 2) + F(0.791065f) * AT(3, 2) + F(-0.352443f) * AT(5, 2) + F(0.277785f) * AT(7, 2)); - const Temp_Type X013 = D(F(0.415735f) * AT(1, 3) + F(0.791065f) * AT(3, 3) + F(-0.352443f) * AT(5, 3) + F(0.277785f) * AT(7, 3)); - const Temp_Type X014 = D(F(0.415735f) * AT(1, 4) + F(0.791065f) * AT(3, 4) + F(-0.352443f) * AT(5, 4) + F(0.277785f) * AT(7, 4)); - const Temp_Type X015 = D(F(0.415735f) * AT(1, 5) + F(0.791065f) * AT(3, 5) + F(-0.352443f) * AT(5, 5) + F(0.277785f) * AT(7, 5)); - const Temp_Type X016 = D(F(0.415735f) * AT(1, 6) + F(0.791065f) * AT(3, 6) + F(-0.352443f) * AT(5, 6) + F(0.277785f) * AT(7, 6)); - const Temp_Type X017 = D(F(0.415735f) * AT(1, 7) + F(0.791065f) * AT(3, 7) + F(-0.352443f) * AT(5, 7) + F(0.277785f) * AT(7, 7)); - const Temp_Type X020 = AT(4, 0); - const Temp_Type X021 = AT(4, 1); - const Temp_Type X022 = AT(4, 2); - const Temp_Type X023 = AT(4, 3); - const Temp_Type X024 = AT(4, 4); - const Temp_Type X025 = AT(4, 5); - const Temp_Type X026 = AT(4, 6); - const Temp_Type X027 = AT(4, 7); - const Temp_Type X030 = D(F(0.022887f) * AT(1, 0) + F(-0.097545f) * AT(3, 0) + F(0.490393f) * AT(5, 0) + F(0.865723f) * AT(7, 0)); - const Temp_Type X031 = D(F(0.022887f) * AT(1, 1) + F(-0.097545f) * AT(3, 1) + F(0.490393f) * AT(5, 1) + F(0.865723f) * AT(7, 1)); - const Temp_Type X032 = D(F(0.022887f) * AT(1, 2) + F(-0.097545f) * AT(3, 2) + F(0.490393f) * AT(5, 2) + F(0.865723f) * AT(7, 2)); - const Temp_Type X033 = D(F(0.022887f) * AT(1, 3) + F(-0.097545f) * AT(3, 3) + F(0.490393f) * AT(5, 3) + F(0.865723f) * AT(7, 3)); - const Temp_Type X034 = D(F(0.022887f) * AT(1, 4) + F(-0.097545f) * AT(3, 4) + F(0.490393f) * AT(5, 4) + F(0.865723f) * AT(7, 4)); - const Temp_Type X035 = D(F(0.022887f) * AT(1, 5) + F(-0.097545f) * AT(3, 5) + F(0.490393f) * AT(5, 5) + F(0.865723f) * AT(7, 5)); - const Temp_Type X036 = D(F(0.022887f) * AT(1, 6) + F(-0.097545f) * AT(3, 6) + F(0.490393f) * AT(5, 6) + F(0.865723f) * AT(7, 6)); - const Temp_Type X037 = D(F(0.022887f) * AT(1, 7) + F(-0.097545f) * AT(3, 7) + F(0.490393f) * AT(5, 7) + F(0.865723f) * AT(7, 7)); - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - P.at(0, 0) = X000; - P.at(0, 1) = D(X001 * F(0.415735f) + X003 * F(0.791065f) + X005 * F(-0.352443f) + X007 * F(0.277785f)); - P.at(0, 2) = X004; - P.at(0, 3) = D(X001 * F(0.022887f) + X003 * F(-0.097545f) + X005 * F(0.490393f) + X007 * F(0.865723f)); - P.at(1, 0) = X010; - P.at(1, 1) = D(X011 * F(0.415735f) + X013 * F(0.791065f) + X015 * F(-0.352443f) + X017 * F(0.277785f)); - P.at(1, 2) = X014; - P.at(1, 3) = D(X011 * F(0.022887f) + X013 * F(-0.097545f) + X015 * F(0.490393f) + X017 * F(0.865723f)); - P.at(2, 0) = X020; - P.at(2, 1) = D(X021 * F(0.415735f) + X023 * F(0.791065f) + X025 * F(-0.352443f) + X027 * F(0.277785f)); - P.at(2, 2) = X024; - P.at(2, 3) = D(X021 * F(0.022887f) + X023 * F(-0.097545f) + X025 * F(0.490393f) + X027 * F(0.865723f)); - P.at(3, 0) = X030; - P.at(3, 1) = D(X031 * F(0.415735f) + X033 * F(0.791065f) + X035 * F(-0.352443f) + X037 * F(0.277785f)); - P.at(3, 2) = X034; - P.at(3, 3) = D(X031 * F(0.022887f) + X033 * F(-0.097545f) + X035 * F(0.490393f) + X037 * F(0.865723f)); - // 40 muls 24 adds - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - Q.at(0, 0) = D(X001 * F(0.906127f) + X003 * F(-0.318190f) + X005 * F(0.212608f) + X007 * F(-0.180240f)); - Q.at(0, 1) = X002; - Q.at(0, 2) = D(X001 * F(-0.074658f) + X003 * F(0.513280f) + X005 * F(0.768178f) + X007 * F(-0.375330f)); - Q.at(0, 3) = X006; - Q.at(1, 0) = D(X011 * F(0.906127f) + X013 * F(-0.318190f) + X015 * F(0.212608f) + X017 * F(-0.180240f)); - Q.at(1, 1) = X012; - Q.at(1, 2) = D(X011 * F(-0.074658f) + X013 * F(0.513280f) + X015 * F(0.768178f) + X017 * F(-0.375330f)); - Q.at(1, 3) = X016; - Q.at(2, 0) = D(X021 * F(0.906127f) + X023 * F(-0.318190f) + X025 * F(0.212608f) + X027 * F(-0.180240f)); - Q.at(2, 1) = X022; - Q.at(2, 2) = D(X021 * F(-0.074658f) + X023 * F(0.513280f) + X025 * F(0.768178f) + X027 * F(-0.375330f)); - Q.at(2, 3) = X026; - Q.at(3, 0) = D(X031 * F(0.906127f) + X033 * F(-0.318190f) + X035 * F(0.212608f) + X037 * F(-0.180240f)); - Q.at(3, 1) = X032; - Q.at(3, 2) = D(X031 * F(-0.074658f) + X033 * F(0.513280f) + X035 * F(0.768178f) + X037 * F(-0.375330f)); - Q.at(3, 3) = X036; - // 40 muls 24 adds - } - }; - - template - struct R_S - { - static void calc(Matrix44& R, Matrix44& S, const jpgd_block_t* pSrc) - { - // 4x8 = 4x8 times 8x8, matrix 0 is constant - const Temp_Type X100 = D(F(0.906127f) * AT(1, 0) + F(-0.318190f) * AT(3, 0) + F(0.212608f) * AT(5, 0) + F(-0.180240f) * AT(7, 0)); - const Temp_Type X101 = D(F(0.906127f) * AT(1, 1) + F(-0.318190f) * AT(3, 1) + F(0.212608f) * AT(5, 1) + F(-0.180240f) * AT(7, 1)); - const Temp_Type X102 = D(F(0.906127f) * AT(1, 2) + F(-0.318190f) * AT(3, 2) + F(0.212608f) * AT(5, 2) + F(-0.180240f) * AT(7, 2)); - const Temp_Type X103 = D(F(0.906127f) * AT(1, 3) + F(-0.318190f) * AT(3, 3) + F(0.212608f) * AT(5, 3) + F(-0.180240f) * AT(7, 3)); - const Temp_Type X104 = D(F(0.906127f) * AT(1, 4) + F(-0.318190f) * AT(3, 4) + F(0.212608f) * AT(5, 4) + F(-0.180240f) * AT(7, 4)); - const Temp_Type X105 = D(F(0.906127f) * AT(1, 5) + F(-0.318190f) * AT(3, 5) + F(0.212608f) * AT(5, 5) + F(-0.180240f) * AT(7, 5)); - const Temp_Type X106 = D(F(0.906127f) * AT(1, 6) + F(-0.318190f) * AT(3, 6) + F(0.212608f) * AT(5, 6) + F(-0.180240f) * AT(7, 6)); - const Temp_Type X107 = D(F(0.906127f) * AT(1, 7) + F(-0.318190f) * AT(3, 7) + F(0.212608f) * AT(5, 7) + F(-0.180240f) * AT(7, 7)); - const Temp_Type X110 = AT(2, 0); - const Temp_Type X111 = AT(2, 1); - const Temp_Type X112 = AT(2, 2); - const Temp_Type X113 = AT(2, 3); - const Temp_Type X114 = AT(2, 4); - const Temp_Type X115 = AT(2, 5); - const Temp_Type X116 = AT(2, 6); - const Temp_Type X117 = AT(2, 7); - const Temp_Type X120 = D(F(-0.074658f) * AT(1, 0) + F(0.513280f) * AT(3, 0) + F(0.768178f) * AT(5, 0) + F(-0.375330f) * AT(7, 0)); - const Temp_Type X121 = D(F(-0.074658f) * AT(1, 1) + F(0.513280f) * AT(3, 1) + F(0.768178f) * AT(5, 1) + F(-0.375330f) * AT(7, 1)); - const Temp_Type X122 = D(F(-0.074658f) * AT(1, 2) + F(0.513280f) * AT(3, 2) + F(0.768178f) * AT(5, 2) + F(-0.375330f) * AT(7, 2)); - const Temp_Type X123 = D(F(-0.074658f) * AT(1, 3) + F(0.513280f) * AT(3, 3) + F(0.768178f) * AT(5, 3) + F(-0.375330f) * AT(7, 3)); - const Temp_Type X124 = D(F(-0.074658f) * AT(1, 4) + F(0.513280f) * AT(3, 4) + F(0.768178f) * AT(5, 4) + F(-0.375330f) * AT(7, 4)); - const Temp_Type X125 = D(F(-0.074658f) * AT(1, 5) + F(0.513280f) * AT(3, 5) + F(0.768178f) * AT(5, 5) + F(-0.375330f) * AT(7, 5)); - const Temp_Type X126 = D(F(-0.074658f) * AT(1, 6) + F(0.513280f) * AT(3, 6) + F(0.768178f) * AT(5, 6) + F(-0.375330f) * AT(7, 6)); - const Temp_Type X127 = D(F(-0.074658f) * AT(1, 7) + F(0.513280f) * AT(3, 7) + F(0.768178f) * AT(5, 7) + F(-0.375330f) * AT(7, 7)); - const Temp_Type X130 = AT(6, 0); - const Temp_Type X131 = AT(6, 1); - const Temp_Type X132 = AT(6, 2); - const Temp_Type X133 = AT(6, 3); - const Temp_Type X134 = AT(6, 4); - const Temp_Type X135 = AT(6, 5); - const Temp_Type X136 = AT(6, 6); - const Temp_Type X137 = AT(6, 7); - // 80 muls 48 adds - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - R.at(0, 0) = X100; - R.at(0, 1) = D(X101 * F(0.415735f) + X103 * F(0.791065f) + X105 * F(-0.352443f) + X107 * F(0.277785f)); - R.at(0, 2) = X104; - R.at(0, 3) = D(X101 * F(0.022887f) + X103 * F(-0.097545f) + X105 * F(0.490393f) + X107 * F(0.865723f)); - R.at(1, 0) = X110; - R.at(1, 1) = D(X111 * F(0.415735f) + X113 * F(0.791065f) + X115 * F(-0.352443f) + X117 * F(0.277785f)); - R.at(1, 2) = X114; - R.at(1, 3) = D(X111 * F(0.022887f) + X113 * F(-0.097545f) + X115 * F(0.490393f) + X117 * F(0.865723f)); - R.at(2, 0) = X120; - R.at(2, 1) = D(X121 * F(0.415735f) + X123 * F(0.791065f) + X125 * F(-0.352443f) + X127 * F(0.277785f)); - R.at(2, 2) = X124; - R.at(2, 3) = D(X121 * F(0.022887f) + X123 * F(-0.097545f) + X125 * F(0.490393f) + X127 * F(0.865723f)); - R.at(3, 0) = X130; - R.at(3, 1) = D(X131 * F(0.415735f) + X133 * F(0.791065f) + X135 * F(-0.352443f) + X137 * F(0.277785f)); - R.at(3, 2) = X134; - R.at(3, 3) = D(X131 * F(0.022887f) + X133 * F(-0.097545f) + X135 * F(0.490393f) + X137 * F(0.865723f)); - // 40 muls 24 adds - // 4x4 = 4x8 times 8x4, matrix 1 is constant - S.at(0, 0) = D(X101 * F(0.906127f) + X103 * F(-0.318190f) + X105 * F(0.212608f) + X107 * F(-0.180240f)); - S.at(0, 1) = X102; - S.at(0, 2) = D(X101 * F(-0.074658f) + X103 * F(0.513280f) + X105 * F(0.768178f) + X107 * F(-0.375330f)); - S.at(0, 3) = X106; - S.at(1, 0) = D(X111 * F(0.906127f) + X113 * F(-0.318190f) + X115 * F(0.212608f) + X117 * F(-0.180240f)); - S.at(1, 1) = X112; - S.at(1, 2) = D(X111 * F(-0.074658f) + X113 * F(0.513280f) + X115 * F(0.768178f) + X117 * F(-0.375330f)); - S.at(1, 3) = X116; - S.at(2, 0) = D(X121 * F(0.906127f) + X123 * F(-0.318190f) + X125 * F(0.212608f) + X127 * F(-0.180240f)); - S.at(2, 1) = X122; - S.at(2, 2) = D(X121 * F(-0.074658f) + X123 * F(0.513280f) + X125 * F(0.768178f) + X127 * F(-0.375330f)); - S.at(2, 3) = X126; - S.at(3, 0) = D(X131 * F(0.906127f) + X133 * F(-0.318190f) + X135 * F(0.212608f) + X137 * F(-0.180240f)); - S.at(3, 1) = X132; - S.at(3, 2) = D(X131 * F(-0.074658f) + X133 * F(0.513280f) + X135 * F(0.768178f) + X137 * F(-0.375330f)); - S.at(3, 3) = X136; - // 40 muls 24 adds - } - }; - } // end namespace DCT_Upsample - - // Unconditionally frees all allocated m_blocks. - void jpeg_decoder::free_all_blocks() - { - m_pStream = NULL; - for (mem_block *b = m_pMem_blocks; b; ) - { - mem_block *n = b->m_pNext; - jpgd_free(b); - b = n; - } - m_pMem_blocks = NULL; - } - - // This method handles all errors. - // It could easily be changed to use C++ exceptions. - void jpeg_decoder::stop_decoding(jpgd_status status) - { - m_error_code = status; - free_all_blocks(); - longjmp(m_jmp_state, status); - - // we shouldn't get here as longjmp shouldn't return, but we put it here to make it explicit - // that this function doesn't return, otherwise we get this error: - // - // error : function declared 'noreturn' should not return - exit(1); - } - - void *jpeg_decoder::alloc(size_t nSize, bool zero) - { - nSize = (JPGD_MAX(nSize, 1) + 3) & ~3; - char *rv = NULL; - for (mem_block *b = m_pMem_blocks; b; b = b->m_pNext) - { - if ((b->m_used_count + nSize) <= b->m_size) - { - rv = b->m_data + b->m_used_count; - b->m_used_count += nSize; - break; - } - } - if (!rv) - { - int capacity = JPGD_MAX(32768 - 256, (nSize + 2047) & ~2047); - mem_block *b = (mem_block*)jpgd_malloc(sizeof(mem_block) + capacity); - if (!b) stop_decoding(JPGD_NOTENOUGHMEM); - b->m_pNext = m_pMem_blocks; m_pMem_blocks = b; - b->m_used_count = nSize; - b->m_size = capacity; - rv = b->m_data; - } - if (zero) memset(rv, 0, nSize); - return rv; - } - - void jpeg_decoder::word_clear(void *p, uint16 c, uint n) - { - uint8 *pD = (uint8*)p; - const uint8 l = c & 0xFF, h = (c >> 8) & 0xFF; - while (n) - { - pD[0] = l; pD[1] = h; pD += 2; - n--; - } - } - - // Refill the input buffer. - // This method will sit in a loop until (A) the buffer is full or (B) - // the stream's read() method reports and end of file condition. - void jpeg_decoder::prep_in_buffer() - { - m_in_buf_left = 0; - m_pIn_buf_ofs = m_in_buf; - - if (m_eof_flag) - return; - - do - { - int bytes_read = m_pStream->read(m_in_buf + m_in_buf_left, JPGD_IN_BUF_SIZE - m_in_buf_left, &m_eof_flag); - if (bytes_read == -1) - stop_decoding(JPGD_STREAM_READ); - - m_in_buf_left += bytes_read; - } while ((m_in_buf_left < JPGD_IN_BUF_SIZE) && (!m_eof_flag)); - - m_total_bytes_read += m_in_buf_left; - - // Pad the end of the block with M_EOI (prevents the decompressor from going off the rails if the stream is invalid). - // (This dates way back to when this decompressor was written in C/asm, and the all-asm Huffman decoder did some fancy things to increase perf.) - word_clear(m_pIn_buf_ofs + m_in_buf_left, 0xD9FF, 64); - } - - // Read a Huffman code table. - void jpeg_decoder::read_dht_marker() - { - int i, index, count; - uint8 huff_num[17]; - uint8 huff_val[256]; - - uint num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_DHT_MARKER); - - num_left -= 2; - - while (num_left) - { - index = get_bits(8); - - huff_num[0] = 0; - - count = 0; - - for (i = 1; i <= 16; i++) - { - huff_num[i] = static_cast(get_bits(8)); - count += huff_num[i]; - } - - if (count > 255) - stop_decoding(JPGD_BAD_DHT_COUNTS); - - for (i = 0; i < count; i++) - huff_val[i] = static_cast(get_bits(8)); - - i = 1 + 16 + count; - - if (num_left < (uint)i) - stop_decoding(JPGD_BAD_DHT_MARKER); - - num_left -= i; - - if ((index & 0x10) > 0x10) - stop_decoding(JPGD_BAD_DHT_INDEX); - - index = (index & 0x0F) + ((index & 0x10) >> 4) * (JPGD_MAX_HUFF_TABLES >> 1); - - if (index >= JPGD_MAX_HUFF_TABLES) - stop_decoding(JPGD_BAD_DHT_INDEX); - - if (!m_huff_num[index]) - m_huff_num[index] = (uint8 *)alloc(17); - - if (!m_huff_val[index]) - m_huff_val[index] = (uint8 *)alloc(256); - - m_huff_ac[index] = (index & 0x10) != 0; - memcpy(m_huff_num[index], huff_num, 17); - memcpy(m_huff_val[index], huff_val, 256); - } - } - - // Read a quantization table. - void jpeg_decoder::read_dqt_marker() - { - int n, i, prec; - uint num_left; - uint temp; - - num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_DQT_MARKER); - - num_left -= 2; - - while (num_left) - { - n = get_bits(8); - prec = n >> 4; - n &= 0x0F; - - if (n >= JPGD_MAX_QUANT_TABLES) - stop_decoding(JPGD_BAD_DQT_TABLE); - - if (!m_quant[n]) - m_quant[n] = (jpgd_quant_t *)alloc(64 * sizeof(jpgd_quant_t)); - - // read quantization entries, in zag order - for (i = 0; i < 64; i++) - { - temp = get_bits(8); - - if (prec) - temp = (temp << 8) + get_bits(8); - - m_quant[n][i] = static_cast(temp); - } - - i = 64 + 1; - - if (prec) - i += 64; - - if (num_left < (uint)i) - stop_decoding(JPGD_BAD_DQT_LENGTH); - - num_left -= i; - } - } - - // Read the start of frame (SOF) marker. - void jpeg_decoder::read_sof_marker() - { - int i; - uint num_left; - - num_left = get_bits(16); - - if (get_bits(8) != 8) /* precision: sorry, only 8-bit precision is supported right now */ - stop_decoding(JPGD_BAD_PRECISION); - - m_image_y_size = get_bits(16); - - if ((m_image_y_size < 1) || (m_image_y_size > JPGD_MAX_HEIGHT)) - stop_decoding(JPGD_BAD_HEIGHT); - - m_image_x_size = get_bits(16); - - if ((m_image_x_size < 1) || (m_image_x_size > JPGD_MAX_WIDTH)) - stop_decoding(JPGD_BAD_WIDTH); - - m_comps_in_frame = get_bits(8); - - if (m_comps_in_frame > JPGD_MAX_COMPONENTS) - stop_decoding(JPGD_TOO_MANY_COMPONENTS); - - if (num_left != (uint)(m_comps_in_frame * 3 + 8)) - stop_decoding(JPGD_BAD_SOF_LENGTH); - - for (i = 0; i < m_comps_in_frame; i++) - { - m_comp_ident[i] = get_bits(8); - m_comp_h_samp[i] = get_bits(4); - m_comp_v_samp[i] = get_bits(4); - m_comp_quant[i] = get_bits(8); - } - } - - // Used to skip unrecognized markers. - void jpeg_decoder::skip_variable_marker() - { - uint num_left; - - num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_VARIABLE_MARKER); - - num_left -= 2; - - while (num_left) - { - get_bits(8); - num_left--; - } - } - - // Read a define restart interval (DRI) marker. - void jpeg_decoder::read_dri_marker() - { - if (get_bits(16) != 4) - stop_decoding(JPGD_BAD_DRI_LENGTH); - - m_restart_interval = get_bits(16); - } - - // Read a start of scan (SOS) marker. - void jpeg_decoder::read_sos_marker() - { - uint num_left; - int i, ci, n, c, cc; - - num_left = get_bits(16); - - n = get_bits(8); - - m_comps_in_scan = n; - - num_left -= 3; - - if ( (num_left != (uint)(n * 2 + 3)) || (n < 1) || (n > JPGD_MAX_COMPS_IN_SCAN) ) - stop_decoding(JPGD_BAD_SOS_LENGTH); - - for (i = 0; i < n; i++) - { - cc = get_bits(8); - c = get_bits(8); - num_left -= 2; - - for (ci = 0; ci < m_comps_in_frame; ci++) - if (cc == m_comp_ident[ci]) - break; - - if (ci >= m_comps_in_frame) - stop_decoding(JPGD_BAD_SOS_COMP_ID); - - m_comp_list[i] = ci; - m_comp_dc_tab[ci] = (c >> 4) & 15; - m_comp_ac_tab[ci] = (c & 15) + (JPGD_MAX_HUFF_TABLES >> 1); - } - - m_spectral_start = get_bits(8); - m_spectral_end = get_bits(8); - m_successive_high = get_bits(4); - m_successive_low = get_bits(4); - - if (!m_progressive_flag) - { - m_spectral_start = 0; - m_spectral_end = 63; - } - - num_left -= 3; - - while (num_left) /* read past whatever is num_left */ - { - get_bits(8); - num_left--; - } - } - - // Finds the next marker. - int jpeg_decoder::next_marker() - { - uint c, bytes; - - bytes = 0; - - do - { - do - { - bytes++; - c = get_bits(8); - } while (c != 0xFF); - - do - { - c = get_bits(8); - } while (c == 0xFF); - - } while (c == 0); - - // If bytes > 0 here, there where extra bytes before the marker (not good). - - return c; - } - - // Process markers. Returns when an SOFx, SOI, EOI, or SOS marker is - // encountered. - int jpeg_decoder::process_markers() - { - int c; - - for ( ; ; ) - { - c = next_marker(); - - switch (c) - { - case M_SOF0: - case M_SOF1: - case M_SOF2: - case M_SOF3: - case M_SOF5: - case M_SOF6: - case M_SOF7: - // case M_JPG: - case M_SOF9: - case M_SOF10: - case M_SOF11: - case M_SOF13: - case M_SOF14: - case M_SOF15: - case M_SOI: - case M_EOI: - case M_SOS: - { - return c; - } - case M_DHT: - { - read_dht_marker(); - break; - } - // No arithmitic support - dumb patents! - case M_DAC: - { - stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT); - break; - } - case M_DQT: - { - read_dqt_marker(); - break; - } - case M_DRI: - { - read_dri_marker(); - break; - } - //case M_APP0: /* no need to read the JFIF marker */ - - case M_JPG: - case M_RST0: /* no parameters */ - case M_RST1: - case M_RST2: - case M_RST3: - case M_RST4: - case M_RST5: - case M_RST6: - case M_RST7: - case M_TEM: - { - stop_decoding(JPGD_UNEXPECTED_MARKER); - break; - } - default: /* must be DNL, DHP, EXP, APPn, JPGn, COM, or RESn or APP0 */ - { - skip_variable_marker(); - break; - } - } - } - } - - // Finds the start of image (SOI) marker. - // This code is rather defensive: it only checks the first 512 bytes to avoid - // false positives. - void jpeg_decoder::locate_soi_marker() - { - uint lastchar, thischar; - uint bytesleft; - - lastchar = get_bits(8); - - thischar = get_bits(8); - - /* ok if it's a normal JPEG file without a special header */ - - if ((lastchar == 0xFF) && (thischar == M_SOI)) - return; - - bytesleft = 4096; //512; - - for ( ; ; ) - { - if (--bytesleft == 0) - stop_decoding(JPGD_NOT_JPEG); - - lastchar = thischar; - - thischar = get_bits(8); - - if (lastchar == 0xFF) - { - if (thischar == M_SOI) - break; - else if (thischar == M_EOI) // get_bits will keep returning M_EOI if we read past the end - stop_decoding(JPGD_NOT_JPEG); - } - } - - // Check the next character after marker: if it's not 0xFF, it can't be the start of the next marker, so the file is bad. - thischar = (m_bit_buf >> 24) & 0xFF; - - if (thischar != 0xFF) - stop_decoding(JPGD_NOT_JPEG); - } - - // Find a start of frame (SOF) marker. - void jpeg_decoder::locate_sof_marker() - { - locate_soi_marker(); - - int c = process_markers(); - - switch (c) - { - case M_SOF2: - m_progressive_flag = JPGD_TRUE; - case M_SOF0: /* baseline DCT */ - case M_SOF1: /* extended sequential DCT */ - { - read_sof_marker(); - break; - } - case M_SOF9: /* Arithmitic coding */ - { - stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT); - break; - } - default: - { - stop_decoding(JPGD_UNSUPPORTED_MARKER); - break; - } - } - } - - // Find a start of scan (SOS) marker. - int jpeg_decoder::locate_sos_marker() - { - int c; - - c = process_markers(); - - if (c == M_EOI) - return JPGD_FALSE; - else if (c != M_SOS) - stop_decoding(JPGD_UNEXPECTED_MARKER); - - read_sos_marker(); - - return JPGD_TRUE; - } - - // Reset everything to default/uninitialized state. - void jpeg_decoder::init(jpeg_decoder_stream *pStream) - { - m_pMem_blocks = NULL; - m_error_code = JPGD_SUCCESS; - m_ready_flag = false; - m_image_x_size = m_image_y_size = 0; - m_pStream = pStream; - m_progressive_flag = JPGD_FALSE; - - memset(m_huff_ac, 0, sizeof(m_huff_ac)); - memset(m_huff_num, 0, sizeof(m_huff_num)); - memset(m_huff_val, 0, sizeof(m_huff_val)); - memset(m_quant, 0, sizeof(m_quant)); - - m_scan_type = 0; - m_comps_in_frame = 0; - - memset(m_comp_h_samp, 0, sizeof(m_comp_h_samp)); - memset(m_comp_v_samp, 0, sizeof(m_comp_v_samp)); - memset(m_comp_quant, 0, sizeof(m_comp_quant)); - memset(m_comp_ident, 0, sizeof(m_comp_ident)); - memset(m_comp_h_blocks, 0, sizeof(m_comp_h_blocks)); - memset(m_comp_v_blocks, 0, sizeof(m_comp_v_blocks)); - - m_comps_in_scan = 0; - memset(m_comp_list, 0, sizeof(m_comp_list)); - memset(m_comp_dc_tab, 0, sizeof(m_comp_dc_tab)); - memset(m_comp_ac_tab, 0, sizeof(m_comp_ac_tab)); - - m_spectral_start = 0; - m_spectral_end = 0; - m_successive_low = 0; - m_successive_high = 0; - m_max_mcu_x_size = 0; - m_max_mcu_y_size = 0; - m_blocks_per_mcu = 0; - m_max_blocks_per_row = 0; - m_mcus_per_row = 0; - m_mcus_per_col = 0; - m_expanded_blocks_per_component = 0; - m_expanded_blocks_per_mcu = 0; - m_expanded_blocks_per_row = 0; - m_freq_domain_chroma_upsample = false; - - memset(m_mcu_org, 0, sizeof(m_mcu_org)); - - m_total_lines_left = 0; - m_mcu_lines_left = 0; - m_real_dest_bytes_per_scan_line = 0; - m_dest_bytes_per_scan_line = 0; - m_dest_bytes_per_pixel = 0; - - memset(m_pHuff_tabs, 0, sizeof(m_pHuff_tabs)); - - memset(m_dc_coeffs, 0, sizeof(m_dc_coeffs)); - memset(m_ac_coeffs, 0, sizeof(m_ac_coeffs)); - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - m_eob_run = 0; - - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - m_pIn_buf_ofs = m_in_buf; - m_in_buf_left = 0; - m_eof_flag = false; - m_tem_flag = 0; - - memset(m_in_buf_pad_start, 0, sizeof(m_in_buf_pad_start)); - memset(m_in_buf, 0, sizeof(m_in_buf)); - memset(m_in_buf_pad_end, 0, sizeof(m_in_buf_pad_end)); - - m_restart_interval = 0; - m_restarts_left = 0; - m_next_restart_num = 0; - - m_max_mcus_per_row = 0; - m_max_blocks_per_mcu = 0; - m_max_mcus_per_col = 0; - - memset(m_last_dc_val, 0, sizeof(m_last_dc_val)); - m_pMCU_coefficients = NULL; - m_pSample_buf = NULL; - - m_total_bytes_read = 0; - - m_pScan_line_0 = NULL; - m_pScan_line_1 = NULL; - - // Ready the input buffer. - prep_in_buffer(); - - // Prime the bit buffer. - m_bits_left = 16; - m_bit_buf = 0; - - get_bits(16); - get_bits(16); - - for (int i = 0; i < JPGD_MAX_BLOCKS_PER_MCU; i++) - m_mcu_block_max_zag[i] = 64; - } - -#define SCALEBITS 16 -#define ONE_HALF ((int) 1 << (SCALEBITS-1)) -#define FIX(x) ((int) ((x) * (1L<> SCALEBITS; - m_cbb[i] = ( FIX(1.77200f) * k + ONE_HALF) >> SCALEBITS; - m_crg[i] = (-FIX(0.71414f)) * k; - m_cbg[i] = (-FIX(0.34414f)) * k + ONE_HALF; - } - } - - // This method throws back into the stream any bytes that where read - // into the bit buffer during initial marker scanning. - void jpeg_decoder::fix_in_buffer() - { - // In case any 0xFF's where pulled into the buffer during marker scanning. - JPGD_ASSERT((m_bits_left & 7) == 0); - - if (m_bits_left == 16) - stuff_char( (uint8)(m_bit_buf & 0xFF)); - - if (m_bits_left >= 8) - stuff_char( (uint8)((m_bit_buf >> 8) & 0xFF)); - - stuff_char((uint8)((m_bit_buf >> 16) & 0xFF)); - stuff_char((uint8)((m_bit_buf >> 24) & 0xFF)); - - m_bits_left = 16; - get_bits_no_markers(16); - get_bits_no_markers(16); - } - - void jpeg_decoder::transform_mcu(int mcu_row) - { - jpgd_block_t* pSrc_ptr = m_pMCU_coefficients; - uint8* pDst_ptr = m_pSample_buf + mcu_row * m_blocks_per_mcu * 64; - - for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]); - pSrc_ptr += 64; - pDst_ptr += 64; - } - } - - static const uint8 s_max_rc[64] = - { - 17, 18, 34, 50, 50, 51, 52, 52, 52, 68, 84, 84, 84, 84, 85, 86, 86, 86, 86, 86, - 102, 118, 118, 118, 118, 118, 118, 119, 120, 120, 120, 120, 120, 120, 120, 136, - 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, - 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136 - }; - - void jpeg_decoder::transform_mcu_expand(int mcu_row) - { - jpgd_block_t* pSrc_ptr = m_pMCU_coefficients; - uint8* pDst_ptr = m_pSample_buf + mcu_row * m_expanded_blocks_per_mcu * 64; - - // Y IDCT - int mcu_block; - for (mcu_block = 0; mcu_block < m_expanded_blocks_per_component; mcu_block++) - { - idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]); - pSrc_ptr += 64; - pDst_ptr += 64; - } - - // Chroma IDCT, with upsampling - jpgd_block_t temp_block[64]; - - for (int i = 0; i < 2; i++) - { - DCT_Upsample::Matrix44 P, Q, R, S; - - JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] >= 1); - JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] <= 64); - - switch (s_max_rc[m_mcu_block_max_zag[mcu_block++] - 1]) - { - case 1*16+1: - DCT_Upsample::P_Q<1, 1>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<1, 1>::calc(R, S, pSrc_ptr); - break; - case 1*16+2: - DCT_Upsample::P_Q<1, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<1, 2>::calc(R, S, pSrc_ptr); - break; - case 2*16+2: - DCT_Upsample::P_Q<2, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<2, 2>::calc(R, S, pSrc_ptr); - break; - case 3*16+2: - DCT_Upsample::P_Q<3, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 2>::calc(R, S, pSrc_ptr); - break; - case 3*16+3: - DCT_Upsample::P_Q<3, 3>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 3>::calc(R, S, pSrc_ptr); - break; - case 3*16+4: - DCT_Upsample::P_Q<3, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 4>::calc(R, S, pSrc_ptr); - break; - case 4*16+4: - DCT_Upsample::P_Q<4, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<4, 4>::calc(R, S, pSrc_ptr); - break; - case 5*16+4: - DCT_Upsample::P_Q<5, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 4>::calc(R, S, pSrc_ptr); - break; - case 5*16+5: - DCT_Upsample::P_Q<5, 5>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 5>::calc(R, S, pSrc_ptr); - break; - case 5*16+6: - DCT_Upsample::P_Q<5, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 6>::calc(R, S, pSrc_ptr); - break; - case 6*16+6: - DCT_Upsample::P_Q<6, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<6, 6>::calc(R, S, pSrc_ptr); - break; - case 7*16+6: - DCT_Upsample::P_Q<7, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 6>::calc(R, S, pSrc_ptr); - break; - case 7*16+7: - DCT_Upsample::P_Q<7, 7>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 7>::calc(R, S, pSrc_ptr); - break; - case 7*16+8: - DCT_Upsample::P_Q<7, 8>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 8>::calc(R, S, pSrc_ptr); - break; - case 8*16+8: - DCT_Upsample::P_Q<8, 8>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<8, 8>::calc(R, S, pSrc_ptr); - break; - default: - JPGD_ASSERT(false); - } - - DCT_Upsample::Matrix44 a(P + Q); P -= Q; - DCT_Upsample::Matrix44& b = P; - DCT_Upsample::Matrix44 c(R + S); R -= S; - DCT_Upsample::Matrix44& d = R; - - DCT_Upsample::Matrix44::add_and_store(temp_block, a, c); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::sub_and_store(temp_block, a, c); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::add_and_store(temp_block, b, d); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::sub_and_store(temp_block, b, d); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - pSrc_ptr += 64; - } - } - - // Loads and dequantizes the next row of (already decoded) coefficients. - // Progressive images only. - void jpeg_decoder::load_next_row() - { - int i; - jpgd_block_t *p; - jpgd_quant_t *q; - int mcu_row, mcu_block, row_block = 0; - int component_num, component_id; - int block_x_mcu[JPGD_MAX_COMPONENTS]; - - memset(block_x_mcu, 0, JPGD_MAX_COMPONENTS * sizeof(int)); - - for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0; - - for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - component_id = m_mcu_org[mcu_block]; - q = m_quant[m_comp_quant[component_id]]; - - p = m_pMCU_coefficients + 64 * mcu_block; - - jpgd_block_t* pAC = coeff_buf_getp(m_ac_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - jpgd_block_t* pDC = coeff_buf_getp(m_dc_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - p[0] = pDC[0]; - memcpy(&p[1], &pAC[1], 63 * sizeof(jpgd_block_t)); - - for (i = 63; i > 0; i--) - if (p[g_ZAG[i]]) - break; - - m_mcu_block_max_zag[mcu_block] = i + 1; - - for ( ; i >= 0; i--) - if (p[g_ZAG[i]]) - p[g_ZAG[i]] = static_cast(p[g_ZAG[i]] * q[i]); - - row_block++; - - if (m_comps_in_scan == 1) - block_x_mcu[component_id]++; - else - { - if (++block_x_mcu_ofs == m_comp_h_samp[component_id]) - { - block_x_mcu_ofs = 0; - - if (++block_y_mcu_ofs == m_comp_v_samp[component_id]) - { - block_y_mcu_ofs = 0; - - block_x_mcu[component_id] += m_comp_h_samp[component_id]; - } - } - } - } - - if (m_freq_domain_chroma_upsample) - transform_mcu_expand(mcu_row); - else - transform_mcu(mcu_row); - } - - if (m_comps_in_scan == 1) - m_block_y_mcu[m_comp_list[0]]++; - else - { - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - component_id = m_comp_list[component_num]; - - m_block_y_mcu[component_id] += m_comp_v_samp[component_id]; - } - } - } - - // Restart interval processing. - void jpeg_decoder::process_restart() - { - int i; - int c = 0; - - // Align to a byte boundry - // FIXME: Is this really necessary? get_bits_no_markers() never reads in markers! - //get_bits_no_markers(m_bits_left & 7); - - // Let's scan a little bit to find the marker, but not _too_ far. - // 1536 is a "fudge factor" that determines how much to scan. - for (i = 1536; i > 0; i--) - if (get_char() == 0xFF) - break; - - if (i == 0) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - for ( ; i > 0; i--) - if ((c = get_char()) != 0xFF) - break; - - if (i == 0) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - // Is it the expected marker? If not, something bad happened. - if (c != (m_next_restart_num + M_RST0)) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - // Reset each component's DC prediction values. - memset(&m_last_dc_val, 0, m_comps_in_frame * sizeof(uint)); - - m_eob_run = 0; - - m_restarts_left = m_restart_interval; - - m_next_restart_num = (m_next_restart_num + 1) & 7; - - // Get the bit buffer going again... - - m_bits_left = 16; - get_bits_no_markers(16); - get_bits_no_markers(16); - } - - static inline int dequantize_ac(int c, int q) { c *= q; return c; } - - // Decodes and dequantizes the next row of coefficients. - void jpeg_decoder::decode_next_row() - { - int row_block = 0; - - for (int mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - if ((m_restart_interval) && (m_restarts_left == 0)) - process_restart(); - - jpgd_block_t* p = m_pMCU_coefficients; - for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++, p += 64) - { - int component_id = m_mcu_org[mcu_block]; - jpgd_quant_t* q = m_quant[m_comp_quant[component_id]]; - - int r, s; - s = huff_decode(m_pHuff_tabs[m_comp_dc_tab[component_id]], r); - s = HUFF_EXTEND(r, s); - - m_last_dc_val[component_id] = (s += m_last_dc_val[component_id]); - - p[0] = static_cast(s * q[0]); - - int prev_num_set = m_mcu_block_max_zag[mcu_block]; - - huff_tables *pH = m_pHuff_tabs[m_comp_ac_tab[component_id]]; - - int k; - for (k = 1; k < 64; k++) - { - int extra_bits; - s = huff_decode(pH, extra_bits); - - r = s >> 4; - s &= 15; - - if (s) - { - if (r) - { - if ((k + r) > 63) - stop_decoding(JPGD_DECODE_ERROR); - - if (k < prev_num_set) - { - int n = JPGD_MIN(r, prev_num_set - k); - int kt = k; - while (n--) - p[g_ZAG[kt++]] = 0; - } - - k += r; - } - - s = HUFF_EXTEND(extra_bits, s); - - JPGD_ASSERT(k < 64); - - p[g_ZAG[k]] = static_cast(dequantize_ac(s, q[k])); //s * q[k]; - } - else - { - if (r == 15) - { - if ((k + 16) > 64) - stop_decoding(JPGD_DECODE_ERROR); - - if (k < prev_num_set) - { - int n = JPGD_MIN(16, prev_num_set - k); - int kt = k; - while (n--) - { - JPGD_ASSERT(kt <= 63); - p[g_ZAG[kt++]] = 0; - } - } - - k += 16 - 1; // - 1 because the loop counter is k - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64 && p[g_ZAG[k]] == 0); - // END EPIC MOD - } - else - break; - } - } - - if (k < prev_num_set) - { - int kt = k; - while (kt < prev_num_set) - p[g_ZAG[kt++]] = 0; - } - - m_mcu_block_max_zag[mcu_block] = k; - - row_block++; - } - - if (m_freq_domain_chroma_upsample) - transform_mcu_expand(mcu_row); - else - transform_mcu(mcu_row); - - m_restarts_left--; - } - } - - // YCbCr H1V1 (1x1:1:1, 3 m_blocks per MCU) to RGB - void jpeg_decoder::H1V1Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d = m_pScan_line_0; - uint8 *s = m_pSample_buf + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int j = 0; j < 8; j++) - { - int y = s[j]; - int cb = s[64+j]; - int cr = s[128+j]; - - if (jpg_format == ERGBFormatJPG::BGRA) - { - d[0] = clamp(y + m_cbb[cb]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_crr[cr]); - d[3] = 255; - } - else - { - d[0] = clamp(y + m_crr[cr]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_cbb[cb]); - d[3] = 255; - } - d += 4; - } - - s += 64*3; - } - } - - // YCbCr H2V1 (2x1:1:1, 4 m_blocks per MCU) to RGB - void jpeg_decoder::H2V1Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *y = m_pSample_buf + row * 8; - uint8 *c = m_pSample_buf + 2*64 + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int l = 0; l < 2; l++) - { - for (int j = 0; j < 4; j++) - { - int cb = c[0]; - int cr = c[64]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j<<1]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[(j<<1)+1]; - d0[4] = clamp(yy+bc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+rc); - d0[7] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[(j<<1)+1]; - d0[4] = clamp(yy+rc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+bc); - d0[7] = 255; - } - - d0 += 8; - - c++; - } - y += 64; - } - - y += 64*4 - 64*2; - c += 64*4 - 8; - } - } - - // YCbCr H2V1 (1x2:1:1, 4 m_blocks per MCU) to RGB - void jpeg_decoder::H1V2Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *d1 = m_pScan_line_1; - uint8 *y; - uint8 *c; - - if (row < 8) - y = m_pSample_buf + row * 8; - else - y = m_pSample_buf + 64*1 + (row & 7) * 8; - - c = m_pSample_buf + 64*2 + (row >> 1) * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int j = 0; j < 8; j++) - { - int cb = c[0+j]; - int cr = c[64+j]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[8+j]; - d1[0] = clamp(yy+bc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+rc); - d1[3] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[8+j]; - d1[0] = clamp(yy+rc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+bc); - d1[3] = 255; - } - - d0 += 4; - d1 += 4; - } - - y += 64*4; - c += 64*4; - } - } - - // YCbCr H2V2 (2x2:1:1, 6 m_blocks per MCU) to RGB - void jpeg_decoder::H2V2Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *d1 = m_pScan_line_1; - uint8 *y; - uint8 *c; - - if (row < 8) - y = m_pSample_buf + row * 8; - else - y = m_pSample_buf + 64*2 + (row & 7) * 8; - - c = m_pSample_buf + 64*4 + (row >> 1) * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int l = 0; l < 2; l++) - { - for (int j = 0; j < 8; j += 2) - { - int cb = c[0]; - int cr = c[64]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[j+1]; - d0[4] = clamp(yy+bc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+rc); - d0[7] = 255; - yy = y[j+8]; - d1[0] = clamp(yy+bc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+rc); - d1[3] = 255; - yy = y[j+8+1]; - d1[4] = clamp(yy+bc); - d1[5] = clamp(yy+gc); - d1[6] = clamp(yy+rc); - d1[7] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[j+1]; - d0[4] = clamp(yy+rc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+bc); - d0[7] = 255; - yy = y[j+8]; - d1[0] = clamp(yy+rc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+bc); - d1[3] = 255; - yy = y[j+8+1]; - d1[4] = clamp(yy+rc); - d1[5] = clamp(yy+gc); - d1[6] = clamp(yy+bc); - d1[7] = 255; - } - - d0 += 8; - d1 += 8; - - c++; - } - y += 64; - } - - y += 64*6 - 64*2; - c += 64*6 - 8; - } - } - - // Y (1 block per MCU) to 8-bit grayscale - void jpeg_decoder::gray_convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d = m_pScan_line_0; - uint8 *s = m_pSample_buf + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - *(uint *)d = *(uint *)s; - *(uint *)(&d[4]) = *(uint *)(&s[4]); - - s += 64; - d += 8; - } - } - - void jpeg_decoder::expanded_convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - - uint8* Py = m_pSample_buf + (row / 8) * 64 * m_comp_h_samp[0] + (row & 7) * 8; - - uint8* d = m_pScan_line_0; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int k = 0; k < m_max_mcu_x_size; k += 8) - { - const int Y_ofs = k * 8; - const int Cb_ofs = Y_ofs + 64 * m_expanded_blocks_per_component; - const int Cr_ofs = Y_ofs + 64 * m_expanded_blocks_per_component * 2; - for (int j = 0; j < 8; j++) - { - int y = Py[Y_ofs + j]; - int cb = Py[Cb_ofs + j]; - int cr = Py[Cr_ofs + j]; - - if (jpg_format == ERGBFormatJPG::BGRA) - { - d[0] = clamp(y + m_cbb[cb]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_crr[cr]); - d[3] = 255; - } - else - { - d[0] = clamp(y + m_crr[cr]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_cbb[cb]); - d[3] = 255; - } - - d += 4; - } - } - - Py += 64 * m_expanded_blocks_per_mcu; - } - } - - // Find end of image (EOI) marker, so we can return to the user the exact size of the input stream. - void jpeg_decoder::find_eoi() - { - if (!m_progressive_flag) - { - // Attempt to read the EOI marker. - //get_bits_no_markers(m_bits_left & 7); - - // Prime the bit buffer - m_bits_left = 16; - get_bits(16); - get_bits(16); - - // The next marker _should_ be EOI - process_markers(); - } - - m_total_bytes_read -= m_in_buf_left; - } - - int jpeg_decoder::decode(const void** pScan_line, uint* pScan_line_len) - { - if ((m_error_code) || (!m_ready_flag)) - return JPGD_FAILED; - - if (m_total_lines_left == 0) - return JPGD_DONE; - - if (m_mcu_lines_left == 0) - { - if (setjmp(m_jmp_state)) - return JPGD_FAILED; - - if (m_progressive_flag) - load_next_row(); - else - decode_next_row(); - - // Find the EOI marker if that was the last row. - if (m_total_lines_left <= m_max_mcu_y_size) - find_eoi(); - - m_mcu_lines_left = m_max_mcu_y_size; - } - - if (m_freq_domain_chroma_upsample) - { - expanded_convert(); - *pScan_line = m_pScan_line_0; - } - else - { - switch (m_scan_type) - { - case JPGD_YH2V2: - { - if ((m_mcu_lines_left & 1) == 0) - { - H2V2Convert(); - *pScan_line = m_pScan_line_0; - } - else - *pScan_line = m_pScan_line_1; - - break; - } - case JPGD_YH2V1: - { - H2V1Convert(); - *pScan_line = m_pScan_line_0; - break; - } - case JPGD_YH1V2: - { - if ((m_mcu_lines_left & 1) == 0) - { - H1V2Convert(); - *pScan_line = m_pScan_line_0; - } - else - *pScan_line = m_pScan_line_1; - - break; - } - case JPGD_YH1V1: - { - H1V1Convert(); - *pScan_line = m_pScan_line_0; - break; - } - case JPGD_GRAYSCALE: - { - gray_convert(); - *pScan_line = m_pScan_line_0; - - break; - } - } - } - - *pScan_line_len = m_real_dest_bytes_per_scan_line; - - m_mcu_lines_left--; - m_total_lines_left--; - - return JPGD_SUCCESS; - } - - // Creates the tables needed for efficient Huffman decoding. - void jpeg_decoder::make_huff_table(int index, huff_tables *pH) - { - int p, i, l, si; - uint8 huffsize[257]; - uint huffcode[257]; - uint code; - uint subtree; - int code_size; - int lastp; - int nextfreeentry; - int currententry; - - pH->ac_table = m_huff_ac[index] != 0; - - p = 0; - - for (l = 1; l <= 16; l++) - { - for (i = 1; i <= m_huff_num[index][l]; i++) - huffsize[p++] = static_cast(l); - } - - huffsize[p] = 0; - - lastp = p; - - code = 0; - si = huffsize[0]; - p = 0; - - while (huffsize[p]) - { - while (huffsize[p] == si) - { - huffcode[p++] = code; - code++; - } - - code <<= 1; - si++; - } - - memset(pH->look_up, 0, sizeof(pH->look_up)); - memset(pH->look_up2, 0, sizeof(pH->look_up2)); - memset(pH->tree, 0, sizeof(pH->tree)); - memset(pH->code_size, 0, sizeof(pH->code_size)); - - nextfreeentry = -1; - - p = 0; - - while (p < lastp) - { - i = m_huff_val[index][p]; - code = huffcode[p]; - code_size = huffsize[p]; - - pH->code_size[i] = static_cast(code_size); - - if (code_size <= 8) - { - code <<= (8 - code_size); - - for (l = 1 << (8 - code_size); l > 0; l--) - { - JPGD_ASSERT(i < 256); - - pH->look_up[code] = i; - - bool has_extrabits = false; - int extra_bits = 0; - int num_extra_bits = i & 15; - - int bits_to_fetch = code_size; - if (num_extra_bits) - { - int total_codesize = code_size + num_extra_bits; - if (total_codesize <= 8) - { - has_extrabits = true; - extra_bits = ((1 << num_extra_bits) - 1) & (code >> (8 - total_codesize)); - JPGD_ASSERT(extra_bits <= 0x7FFF); - bits_to_fetch += num_extra_bits; - } - } - - if (!has_extrabits) - pH->look_up2[code] = i | (bits_to_fetch << 8); - else - pH->look_up2[code] = i | 0x8000 | (extra_bits << 16) | (bits_to_fetch << 8); - - code++; - } - } - else - { - subtree = (code >> (code_size - 8)) & 0xFF; - - currententry = pH->look_up[subtree]; - - if (currententry == 0) - { - pH->look_up[subtree] = currententry = nextfreeentry; - pH->look_up2[subtree] = currententry = nextfreeentry; - - nextfreeentry -= 2; - } - - code <<= (16 - (code_size - 8)); - - for (l = code_size; l > 9; l--) - { - if ((code & 0x8000) == 0) - currententry--; - - if (pH->tree[-currententry - 1] == 0) - { - pH->tree[-currententry - 1] = nextfreeentry; - - currententry = nextfreeentry; - - nextfreeentry -= 2; - } - else - currententry = pH->tree[-currententry - 1]; - - code <<= 1; - } - - if ((code & 0x8000) == 0) - currententry--; - - pH->tree[-currententry - 1] = i; - } - - p++; - } - } - - // Verifies the quantization tables needed for this scan are available. - void jpeg_decoder::check_quant_tables() - { - for (int i = 0; i < m_comps_in_scan; i++) - if (m_quant[m_comp_quant[m_comp_list[i]]] == NULL) - stop_decoding(JPGD_UNDEFINED_QUANT_TABLE); - } - - // Verifies that all the Huffman tables needed for this scan are available. - void jpeg_decoder::check_huff_tables() - { - for (int i = 0; i < m_comps_in_scan; i++) - { - if ((m_spectral_start == 0) && (m_huff_num[m_comp_dc_tab[m_comp_list[i]]] == NULL)) - stop_decoding(JPGD_UNDEFINED_HUFF_TABLE); - - if ((m_spectral_end > 0) && (m_huff_num[m_comp_ac_tab[m_comp_list[i]]] == NULL)) - stop_decoding(JPGD_UNDEFINED_HUFF_TABLE); - } - - for (int i = 0; i < JPGD_MAX_HUFF_TABLES; i++) - if (m_huff_num[i]) - { - if (!m_pHuff_tabs[i]) - m_pHuff_tabs[i] = (huff_tables *)alloc(sizeof(huff_tables)); - - make_huff_table(i, m_pHuff_tabs[i]); - } - } - - // Determines the component order inside each MCU. - // Also calcs how many MCU's are on each row, etc. - void jpeg_decoder::calc_mcu_block_order() - { - int component_num, component_id; - int max_h_samp = 0, max_v_samp = 0; - - for (component_id = 0; component_id < m_comps_in_frame; component_id++) - { - if (m_comp_h_samp[component_id] > max_h_samp) - max_h_samp = m_comp_h_samp[component_id]; - - if (m_comp_v_samp[component_id] > max_v_samp) - max_v_samp = m_comp_v_samp[component_id]; - } - - for (component_id = 0; component_id < m_comps_in_frame; component_id++) - { - m_comp_h_blocks[component_id] = ((((m_image_x_size * m_comp_h_samp[component_id]) + (max_h_samp - 1)) / max_h_samp) + 7) / 8; - m_comp_v_blocks[component_id] = ((((m_image_y_size * m_comp_v_samp[component_id]) + (max_v_samp - 1)) / max_v_samp) + 7) / 8; - } - - if (m_comps_in_scan == 1) - { - m_mcus_per_row = m_comp_h_blocks[m_comp_list[0]]; - m_mcus_per_col = m_comp_v_blocks[m_comp_list[0]]; - } - else - { - m_mcus_per_row = (((m_image_x_size + 7) / 8) + (max_h_samp - 1)) / max_h_samp; - m_mcus_per_col = (((m_image_y_size + 7) / 8) + (max_v_samp - 1)) / max_v_samp; - } - - if (m_comps_in_scan == 1) - { - m_mcu_org[0] = m_comp_list[0]; - - m_blocks_per_mcu = 1; - } - else - { - m_blocks_per_mcu = 0; - - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - int num_blocks; - - component_id = m_comp_list[component_num]; - - num_blocks = m_comp_h_samp[component_id] * m_comp_v_samp[component_id]; - - while (num_blocks--) - m_mcu_org[m_blocks_per_mcu++] = component_id; - } - } - } - - // Starts a new scan. - int jpeg_decoder::init_scan() - { - if (!locate_sos_marker()) - return JPGD_FALSE; - - calc_mcu_block_order(); - - check_huff_tables(); - - check_quant_tables(); - - memset(m_last_dc_val, 0, m_comps_in_frame * sizeof(uint)); - - m_eob_run = 0; - - if (m_restart_interval) - { - m_restarts_left = m_restart_interval; - m_next_restart_num = 0; - } - - fix_in_buffer(); - - return JPGD_TRUE; - } - - // Starts a frame. Determines if the number of components or sampling factors - // are supported. - void jpeg_decoder::init_frame() - { - int i; - - if (m_comps_in_frame == 1) - { - if ((m_comp_h_samp[0] != 1) || (m_comp_v_samp[0] != 1)) - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - - m_scan_type = JPGD_GRAYSCALE; - m_max_blocks_per_mcu = 1; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 8; - } - else if (m_comps_in_frame == 3) - { - if ( ((m_comp_h_samp[1] != 1) || (m_comp_v_samp[1] != 1)) || - ((m_comp_h_samp[2] != 1) || (m_comp_v_samp[2] != 1)) ) - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - - if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1)) - { - m_scan_type = JPGD_YH1V1; - - m_max_blocks_per_mcu = 3; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 8; - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1)) - { - m_scan_type = JPGD_YH2V1; - m_max_blocks_per_mcu = 4; - m_max_mcu_x_size = 16; - m_max_mcu_y_size = 8; - } - else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 2)) - { - m_scan_type = JPGD_YH1V2; - m_max_blocks_per_mcu = 4; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 16; - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2)) - { - m_scan_type = JPGD_YH2V2; - m_max_blocks_per_mcu = 6; - m_max_mcu_x_size = 16; - m_max_mcu_y_size = 16; - } - else - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - } - else - stop_decoding(JPGD_UNSUPPORTED_COLORSPACE); - - m_max_mcus_per_row = (m_image_x_size + (m_max_mcu_x_size - 1)) / m_max_mcu_x_size; - m_max_mcus_per_col = (m_image_y_size + (m_max_mcu_y_size - 1)) / m_max_mcu_y_size; - - // These values are for the *destination* pixels: after conversion. - if (m_scan_type == JPGD_GRAYSCALE) - m_dest_bytes_per_pixel = 1; - else - m_dest_bytes_per_pixel = 4; - - m_dest_bytes_per_scan_line = ((m_image_x_size + 15) & 0xFFF0) * m_dest_bytes_per_pixel; - - m_real_dest_bytes_per_scan_line = (m_image_x_size * m_dest_bytes_per_pixel); - - // Initialize two scan line buffers. - m_pScan_line_0 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true); - if ((m_scan_type == JPGD_YH1V2) || (m_scan_type == JPGD_YH2V2)) - m_pScan_line_1 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true); - - m_max_blocks_per_row = m_max_mcus_per_row * m_max_blocks_per_mcu; - - // Should never happen - if (m_max_blocks_per_row > JPGD_MAX_BLOCKS_PER_ROW) - stop_decoding(JPGD_ASSERTION_ERROR); - - // Allocate the coefficient buffer, enough for one MCU - m_pMCU_coefficients = (jpgd_block_t*)alloc(m_max_blocks_per_mcu * 64 * sizeof(jpgd_block_t)); - - for (i = 0; i < m_max_blocks_per_mcu; i++) - m_mcu_block_max_zag[i] = 64; - - m_expanded_blocks_per_component = m_comp_h_samp[0] * m_comp_v_samp[0]; - m_expanded_blocks_per_mcu = m_expanded_blocks_per_component * m_comps_in_frame; - m_expanded_blocks_per_row = m_max_mcus_per_row * m_expanded_blocks_per_mcu; - // Freq. domain chroma upsampling is only supported for H2V2 subsampling factor. -// BEGIN EPIC MOD -#if JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING - m_freq_domain_chroma_upsample = (m_expanded_blocks_per_mcu == 4*3); -#else - m_freq_domain_chroma_upsample = 0; -#endif -// END EPIC MOD - - if (m_freq_domain_chroma_upsample) - m_pSample_buf = (uint8 *)alloc(m_expanded_blocks_per_row * 64); - else - m_pSample_buf = (uint8 *)alloc(m_max_blocks_per_row * 64); - - m_total_lines_left = m_image_y_size; - - m_mcu_lines_left = 0; - - create_look_ups(); - } - - // The coeff_buf series of methods originally stored the coefficients - // into a "virtual" file which was located in EMS, XMS, or a disk file. A cache - // was used to make this process more efficient. Now, we can store the entire - // thing in RAM. - jpeg_decoder::coeff_buf* jpeg_decoder::coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y) - { - coeff_buf* cb = (coeff_buf*)alloc(sizeof(coeff_buf)); - - cb->block_num_x = block_num_x; - cb->block_num_y = block_num_y; - cb->block_len_x = block_len_x; - cb->block_len_y = block_len_y; - cb->block_size = (block_len_x * block_len_y) * sizeof(jpgd_block_t); - cb->pData = (uint8 *)alloc(cb->block_size * block_num_x * block_num_y, true); - return cb; - } - - inline jpgd_block_t *jpeg_decoder::coeff_buf_getp(coeff_buf *cb, int block_x, int block_y) - { - JPGD_ASSERT((block_x < cb->block_num_x) && (block_y < cb->block_num_y)); - return (jpgd_block_t *)(cb->pData + block_x * cb->block_size + block_y * (cb->block_size * cb->block_num_x)); - } - - // The following methods decode the various types of m_blocks encountered - // in progressively encoded images. - void jpeg_decoder::decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int s, r; - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y); - - if ((s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_dc_tab[component_id]])) != 0) - { - r = pD->get_bits_no_markers(s); - s = HUFF_EXTEND(r, s); - } - - pD->m_last_dc_val[component_id] = (s += pD->m_last_dc_val[component_id]); - - p[0] = static_cast(s << pD->m_successive_low); - } - - void jpeg_decoder::decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - if (pD->get_bits_no_markers(1)) - { - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y); - - p[0] |= (1 << pD->m_successive_low); - } - } - - void jpeg_decoder::decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int k, s, r; - - if (pD->m_eob_run) - { - pD->m_eob_run--; - return; - } - - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y); - - for (k = pD->m_spectral_start; k <= pD->m_spectral_end; k++) - { - s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]); - - r = s >> 4; - s &= 15; - - if (s) - { - if ((k += r) > 63) - pD->stop_decoding(JPGD_DECODE_ERROR); - - r = pD->get_bits_no_markers(s); - s = HUFF_EXTEND(r, s); - - p[g_ZAG[k]] = static_cast(s << pD->m_successive_low); - } - else - { - if (r == 15) - { - if ((k += 15) > 63) - pD->stop_decoding(JPGD_DECODE_ERROR); - } - else - { - pD->m_eob_run = 1 << r; - - if (r) - pD->m_eob_run += pD->get_bits_no_markers(r); - - pD->m_eob_run--; - - break; - } - } - } - } - - void jpeg_decoder::decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int s, k, r; - int p1 = 1 << pD->m_successive_low; - int m1 = (-1) << pD->m_successive_low; - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y); - - k = pD->m_spectral_start; - - if (pD->m_eob_run == 0) - { - for ( ; k <= pD->m_spectral_end; k++) - { - s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]); - - r = s >> 4; - s &= 15; - - if (s) - { - if (s != 1) - pD->stop_decoding(JPGD_DECODE_ERROR); - - if (pD->get_bits_no_markers(1)) - s = p1; - else - s = m1; - } - else - { - if (r != 15) - { - pD->m_eob_run = 1 << r; - - if (r) - pD->m_eob_run += pD->get_bits_no_markers(r); - - break; - } - } - - do - { - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64); - // END EPIC MOD - - jpgd_block_t *this_coef = p + g_ZAG[k]; - - if (*this_coef != 0) - { - if (pD->get_bits_no_markers(1)) - { - if ((*this_coef & p1) == 0) - { - if (*this_coef >= 0) - *this_coef = static_cast(*this_coef + p1); - else - *this_coef = static_cast(*this_coef + m1); - } - } - } - else - { - if (--r < 0) - break; - } - - k++; - - } while (k <= pD->m_spectral_end); - - if ((s) && (k < 64)) - { - p[g_ZAG[k]] = static_cast(s); - } - } - } - - if (pD->m_eob_run > 0) - { - for ( ; k <= pD->m_spectral_end; k++) - { - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64); - // END EPIC MOD - - jpgd_block_t *this_coef = p + g_ZAG[k]; - - if (*this_coef != 0) - { - if (pD->get_bits_no_markers(1)) - { - if ((*this_coef & p1) == 0) - { - if (*this_coef >= 0) - *this_coef = static_cast(*this_coef + p1); - else - *this_coef = static_cast(*this_coef + m1); - } - } - } - } - - pD->m_eob_run--; - } - } - - // Decode a scan in a progressively encoded image. - void jpeg_decoder::decode_scan(pDecode_block_func decode_block_func) - { - int mcu_row, mcu_col, mcu_block; - int block_x_mcu[JPGD_MAX_COMPONENTS], m_block_y_mcu[JPGD_MAX_COMPONENTS]; - - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - for (mcu_col = 0; mcu_col < m_mcus_per_col; mcu_col++) - { - int component_num, component_id; - - memset(block_x_mcu, 0, sizeof(block_x_mcu)); - - for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0; - - if ((m_restart_interval) && (m_restarts_left == 0)) - process_restart(); - - for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - component_id = m_mcu_org[mcu_block]; - - decode_block_func(this, component_id, block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - - if (m_comps_in_scan == 1) - block_x_mcu[component_id]++; - else - { - if (++block_x_mcu_ofs == m_comp_h_samp[component_id]) - { - block_x_mcu_ofs = 0; - - if (++block_y_mcu_ofs == m_comp_v_samp[component_id]) - { - block_y_mcu_ofs = 0; - block_x_mcu[component_id] += m_comp_h_samp[component_id]; - } - } - } - } - - m_restarts_left--; - } - - if (m_comps_in_scan == 1) - m_block_y_mcu[m_comp_list[0]]++; - else - { - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - component_id = m_comp_list[component_num]; - m_block_y_mcu[component_id] += m_comp_v_samp[component_id]; - } - } - } - } - - // Decode a progressively encoded image. - void jpeg_decoder::init_progressive() - { - int i; - - if (m_comps_in_frame == 4) - stop_decoding(JPGD_UNSUPPORTED_COLORSPACE); - - // Allocate the coefficient buffers. - for (i = 0; i < m_comps_in_frame; i++) - { - m_dc_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 1, 1); - m_ac_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 8, 8); - } - - for ( ; ; ) - { - int dc_only_scan, refinement_scan; - pDecode_block_func decode_block_func; - - if (!init_scan()) - break; - - dc_only_scan = (m_spectral_start == 0); - refinement_scan = (m_successive_high != 0); - - if ((m_spectral_start > m_spectral_end) || (m_spectral_end > 63)) - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - - if (dc_only_scan) - { - if (m_spectral_end) - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - } - else if (m_comps_in_scan != 1) /* AC scans can only contain one component */ - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - - if ((refinement_scan) && (m_successive_low != m_successive_high - 1)) - stop_decoding(JPGD_BAD_SOS_SUCCESSIVE); - - if (dc_only_scan) - { - if (refinement_scan) - decode_block_func = decode_block_dc_refine; - else - decode_block_func = decode_block_dc_first; - } - else - { - if (refinement_scan) - decode_block_func = decode_block_ac_refine; - else - decode_block_func = decode_block_ac_first; - } - - decode_scan(decode_block_func); - - m_bits_left = 16; - get_bits(16); - get_bits(16); - } - - m_comps_in_scan = m_comps_in_frame; - - for (i = 0; i < m_comps_in_frame; i++) - m_comp_list[i] = i; - - calc_mcu_block_order(); - } - - void jpeg_decoder::init_sequential() - { - if (!init_scan()) - stop_decoding(JPGD_UNEXPECTED_MARKER); - } - - void jpeg_decoder::decode_start() - { - init_frame(); - - if (m_progressive_flag) - init_progressive(); - else - init_sequential(); - } - - void jpeg_decoder::decode_init(jpeg_decoder_stream *pStream) - { - init(pStream); - locate_sof_marker(); - } - - jpeg_decoder::jpeg_decoder(jpeg_decoder_stream *pStream) - { - if (setjmp(m_jmp_state)) - return; - decode_init(pStream); - } - - int jpeg_decoder::begin_decoding() - { - if (m_ready_flag) - return JPGD_SUCCESS; - - if (m_error_code) - return JPGD_FAILED; - - if (setjmp(m_jmp_state)) - return JPGD_FAILED; - - decode_start(); - - m_ready_flag = true; - - return JPGD_SUCCESS; - } - - jpeg_decoder::~jpeg_decoder() - { - free_all_blocks(); - } - - jpeg_decoder_file_stream::jpeg_decoder_file_stream() - { - m_pFile = NULL; - m_eof_flag = false; - m_error_flag = false; - } - - void jpeg_decoder_file_stream::close() - { - if (m_pFile) - { - fclose(m_pFile); - m_pFile = NULL; - } - - m_eof_flag = false; - m_error_flag = false; - } - - jpeg_decoder_file_stream::~jpeg_decoder_file_stream() - { - close(); - } - - bool jpeg_decoder_file_stream::open(const char *Pfilename) - { - close(); - - m_eof_flag = false; - m_error_flag = false; - -#if defined(_MSC_VER) - m_pFile = NULL; - fopen_s(&m_pFile, Pfilename, "rb"); -#else - m_pFile = fopen(Pfilename, "rb"); -#endif - return m_pFile != NULL; - } - - int jpeg_decoder_file_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) - { - if (!m_pFile) - return -1; - - if (m_eof_flag) - { - *pEOF_flag = true; - return 0; - } - - if (m_error_flag) - return -1; - - int bytes_read = static_cast(fread(pBuf, 1, max_bytes_to_read, m_pFile)); - if (bytes_read < max_bytes_to_read) - { - if (ferror(m_pFile)) - { - m_error_flag = true; - return -1; - } - - m_eof_flag = true; - *pEOF_flag = true; - } - - return bytes_read; - } - - bool jpeg_decoder_mem_stream::open(const uint8 *pSrc_data, uint size) - { - close(); - m_pSrc_data = pSrc_data; - m_ofs = 0; - m_size = size; - return true; - } - - int jpeg_decoder_mem_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) - { - *pEOF_flag = false; - - if (!m_pSrc_data) - return -1; - - uint bytes_remaining = m_size - m_ofs; - if ((uint)max_bytes_to_read > bytes_remaining) - { - max_bytes_to_read = bytes_remaining; - *pEOF_flag = true; - } - - memcpy(pBuf, m_pSrc_data + m_ofs, max_bytes_to_read); - m_ofs += max_bytes_to_read; - - return max_bytes_to_read; - } - - unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps) - { - if (!actual_comps) - return NULL; - *actual_comps = 0; - - if ((!pStream) || (!width) || (!height) || (!req_comps)) - return NULL; - - if ((req_comps != 1) && (req_comps != 3) && (req_comps != 4)) - return NULL; - - jpeg_decoder decoder(pStream); - if (decoder.get_error_code() != JPGD_SUCCESS) - return NULL; - - const int image_width = decoder.get_width(), image_height = decoder.get_height(); - *width = image_width; - *height = image_height; - *actual_comps = decoder.get_num_components(); - - if (decoder.begin_decoding() != JPGD_SUCCESS) - return NULL; - - const int dst_bpl = image_width * req_comps; - - uint8 *pImage_data = (uint8*)jpgd_malloc(dst_bpl * image_height); - if (!pImage_data) - return NULL; - - for (int y = 0; y < image_height; y++) - { - const uint8* pScan_line = 0; - uint scan_line_len; - if (decoder.decode((const void**)&pScan_line, &scan_line_len) != JPGD_SUCCESS) - { - jpgd_free(pImage_data); - return NULL; - } - - uint8 *pDst = pImage_data + y * dst_bpl; - - if (((req_comps == 4) && (decoder.get_num_components() == 3)) || - ((req_comps == 1) && (decoder.get_num_components() == 1))) - { - memcpy(pDst, pScan_line, dst_bpl); - } - else if (decoder.get_num_components() == 1) - { - if (req_comps == 3) - { - for (int x = 0; x < image_width; x++) - { - uint8 luma = pScan_line[x]; - pDst[0] = luma; - pDst[1] = luma; - pDst[2] = luma; - pDst += 3; - } - } - else - { - for (int x = 0; x < image_width; x++) - { - uint8 luma = pScan_line[x]; - pDst[0] = luma; - pDst[1] = luma; - pDst[2] = luma; - pDst[3] = 255; - pDst += 4; - } - } - } - else if (decoder.get_num_components() == 3) - { - if (req_comps == 1) - { - const int YR = 19595, YG = 38470, YB = 7471; - for (int x = 0; x < image_width; x++) - { - int r = pScan_line[x*4+0]; - int g = pScan_line[x*4+1]; - int b = pScan_line[x*4+2]; - *pDst++ = static_cast((r * YR + g * YG + b * YB + 32768) >> 16); - } - } - else - { - for (int x = 0; x < image_width; x++) - { - pDst[0] = pScan_line[x*4+0]; - pDst[1] = pScan_line[x*4+1]; - pDst[2] = pScan_line[x*4+2]; - pDst += 3; - } - } - } - } - - return pImage_data; - } - -// BEGIN EPIC MOD - unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format) - { - jpg_format = (ERGBFormatJPG)format; -// EMD EPIC MOD - jpgd::jpeg_decoder_mem_stream mem_stream(pSrc_data, src_data_size); - return decompress_jpeg_image_from_stream(&mem_stream, width, height, actual_comps, req_comps); - } - - unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps) - { - jpgd::jpeg_decoder_file_stream file_stream; - if (!file_stream.open(pSrc_filename)) - return NULL; - return decompress_jpeg_image_from_stream(&file_stream, width, height, actual_comps, req_comps); - } - -} // namespace jpgd diff --git a/spaces/joushe/moe-tts/mel_processing.py b/spaces/joushe/moe-tts/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/joushe/moe-tts/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/jpdiazpardo/jpdiazpardo-whisper-tiny-metal/functions/charts.py b/spaces/jpdiazpardo/jpdiazpardo-whisper-tiny-metal/functions/charts.py deleted file mode 100644 index e28f7844dddd82f027881fdf35dbc4f9b0042f43..0000000000000000000000000000000000000000 --- a/spaces/jpdiazpardo/jpdiazpardo-whisper-tiny-metal/functions/charts.py +++ /dev/null @@ -1,34 +0,0 @@ -import plotly.graph_objects as go - -def add_emoji(emotions_list): - for s in range(len(emotions_list)): - if emotions_list[s]=="surprise": emotions_list[s]="surprise 😲" - elif emotions_list[s]=="joy": emotions_list[s]="joy 😀" - elif emotions_list[s]=="anger": emotions_list[s]="anger 🤬" - elif emotions_list[s]=="neutral": emotions_list[s]="neutral 😐" - elif emotions_list[s]=="disgust": emotions_list[s]="disgust 🤢" - elif emotions_list[s]=="fear": emotions_list[s]="fear 😨" - elif emotions_list[s]=="sadness": emotions_list[s]="sadness 😭" - else: print(s) - - return emotions_list - - -def spider_chart(dictionary): - - fig = go.Figure(data=go.Scatterpolar( - r=[round(v*100,2) for v in dictionary.values()], - theta= add_emoji([k for k in dictionary.keys()]), - fill='toself')) - - fig.update_layout( - polar=dict( - radialaxis=dict( - visible=True - ), - ), - showlegend=False, - width = 400, height = 400, - title = "Audio Sentiment Analysis", title_x=0.5) - - return fig \ No newline at end of file diff --git a/spaces/katanaml-org/sparrow-ui/views/data_review.py b/spaces/katanaml-org/sparrow-ui/views/data_review.py deleted file mode 100644 index d5d3c719ce7870e3cb2e5f227a57c34fd2e671cf..0000000000000000000000000000000000000000 --- a/spaces/katanaml-org/sparrow-ui/views/data_review.py +++ /dev/null @@ -1,165 +0,0 @@ -import streamlit as st -from natsort import natsorted -import os -from PIL import Image -import math -from streamlit_sparrow_labeling import st_sparrow_labeling -import json - - -class DataReview: - class Model: - # pageTitle = "Data Review" - subheader_2 = "Select" - subheader_3 = "Result" - selection_text = "File to review" - initial_msg = "Please select a file to review" - - img_file = None - - def set_image_file(self, img_file): - st.session_state['img_file_review'] = img_file - - def get_image_file(self): - if 'img_file_review' not in st.session_state: - return None - return st.session_state['img_file_review'] - - json_file = None - - def set_json_file(self, json_file): - st.session_state['json_file_review'] = json_file - - def get_json_file(self): - if 'json_file_review' not in st.session_state: - return None - return st.session_state['json_file_review'] - - def view(self, model, ui_width, device_type, device_width): - # st.title(model.pageTitle) - - with st.sidebar: - st.markdown("---") - st.subheader(model.subheader_2) - - # get list of files in inference directory - processed_file_names = self.get_processed_file_names('docs/inference/') - - if 'selection_index' not in st.session_state: - st.session_state['selection_index'] = 0 - selection_index = 0 - else: - selection_index = st.session_state['selection_index'] - - selection = st.selectbox(model.selection_text, processed_file_names, index=selection_index) - - selection_index = self.get_selection_index(selection, processed_file_names) - st.session_state['selection_index'] = selection_index - - img_file = "docs/inference/" + selection + ".jpg" - json_file = "docs/inference/" + selection + ".json" - - model.set_image_file(img_file) - model.set_json_file(json_file) - - if model.get_image_file() is not None: - doc_img = Image.open(model.get_image_file()) - doc_height = doc_img.height - doc_width = doc_img.width - - canvas_width, number_of_columns = self.canvas_available_width(ui_width, doc_width, device_type, - device_width) - - if number_of_columns > 1: - col1, col2 = st.columns([number_of_columns, 10 - number_of_columns]) - with col1: - pass - self.render_doc(model, doc_img, canvas_width, doc_height, doc_width) - with col2: - pass - self.render_results(model) - else: - pass - self.render_doc(model, doc_img, canvas_width, doc_height, doc_width) - self.render_results(model) - else: - st.title(model.initial_msg) - - - def get_processed_file_names(self, dir_name): - # get ordered list of files without file extension, excluding hidden files, with JSON extension only - file_names = [os.path.splitext(f)[0] for f in os.listdir(dir_name) if - os.path.isfile(os.path.join(dir_name, f)) and not f.startswith('.') and f.endswith('.json')] - file_names = natsorted(file_names) - return file_names - - def get_selection_index(self, file, files_list): - return files_list.index(file) - - def canvas_available_width(self, ui_width, doc_width, device_type, device_width): - doc_width_pct = (doc_width * 100) / ui_width - if doc_width_pct < 45: - canvas_width_pct = 37 - elif doc_width_pct < 55: - canvas_width_pct = 49 - else: - canvas_width_pct = 60 - - if ui_width > 700 and canvas_width_pct == 37 and device_type == "desktop": - return math.floor(canvas_width_pct * ui_width / 100), 4 - elif ui_width > 700 and canvas_width_pct == 49 and device_type == "desktop": - return math.floor(canvas_width_pct * ui_width / 100), 5 - elif ui_width > 700 and canvas_width_pct == 60 and device_type == "desktop": - return math.floor(canvas_width_pct * ui_width / 100), 6 - else: - if device_type == "desktop": - ui_width = device_width - math.floor((device_width * 22) / 100) - elif device_type == "mobile": - ui_width = device_width - math.floor((device_width * 13) / 100) - return ui_width, 1 - - - def render_doc(self, model, doc_img, canvas_width, doc_height, doc_width): - height = 1296 - width = 864 - - annotations_json = { - "meta": { - "version": "v0.1", - "split": "train", - "image_id": 0, - "image_size": { - "width": doc_width, - "height": doc_height - } - }, - "words": [] - } - - st_sparrow_labeling( - fill_color="rgba(0, 151, 255, 0.3)", - stroke_width=2, - stroke_color="rgba(0, 50, 255, 0.7)", - background_image=doc_img, - initial_rects=annotations_json, - height=height, - width=width, - drawing_mode="transform", - display_toolbar=False, - update_streamlit=False, - canvas_width=canvas_width, - doc_height=doc_height, - doc_width=doc_width, - image_rescale=True, - key="doc_annotation" + model.get_image_file() - ) - - def render_results(self, model): - json_file = model.get_json_file() - if json_file is not None: - with open(json_file) as f: - data_json = json.load(f) - st.subheader(model.subheader_3) - st.markdown("---") - st.json(data_json) - st.markdown("---") \ No newline at end of file diff --git a/spaces/kepl/gpt/run.py b/spaces/kepl/gpt/run.py deleted file mode 100644 index 3b9ca0f439c4dd6a791f7eed62d942d096562b61..0000000000000000000000000000000000000000 --- a/spaces/kepl/gpt/run.py +++ /dev/null @@ -1,48 +0,0 @@ -import secrets - -from server.bp import bp -from server.website import Website -from server.backend import Backend_Api -from server.babel import create_babel -from json import load -from flask import Flask - -if __name__ == '__main__': - - # Load configuration from config.json - config = load(open('config.json', 'r')) - site_config = config['site_config'] - url_prefix = config.pop('url_prefix') - - # Create the app - app = Flask(__name__) - app.secret_key = secrets.token_hex(16) - - # Set up Babel - create_babel(app) - - # Set up the website routes - site = Website(bp, url_prefix) - for route in site.routes: - bp.add_url_rule( - route, - view_func=site.routes[route]['function'], - methods=site.routes[route]['methods'], - ) - - # Set up the backend API routes - backend_api = Backend_Api(bp, config) - for route in backend_api.routes: - bp.add_url_rule( - route, - view_func=backend_api.routes[route]['function'], - methods=backend_api.routes[route]['methods'], - ) - - # Register the blueprint - app.register_blueprint(bp, url_prefix=url_prefix) - - # Run the Flask server - print(f"Running on {site_config['port']}{url_prefix}") - app.run(**site_config) - print(f"Closing port {site_config['port']}") diff --git a/spaces/keremberke/valorant-object-detection/README.md b/spaces/keremberke/valorant-object-detection/README.md deleted file mode 100644 index bbb470b3979a43eeb4acfcbb2dd0cd4b6ceb8f98..0000000000000000000000000000000000000000 --- a/spaces/keremberke/valorant-object-detection/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Valorant Object Detection -emoji: 🎮 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/docs/speed_benchmark.md b/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/docs/speed_benchmark.md deleted file mode 100644 index 055aee0defe2c43a523ced48260242f0f99b7cea..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/arcface_torch/docs/speed_benchmark.md +++ /dev/null @@ -1,93 +0,0 @@ -## Test Training Speed - -- Test Commands - -You need to use the following two commands to test the Partial FC training performance. -The number of identites is **3 millions** (synthetic data), turn mixed precision training on, backbone is resnet50, -batch size is 1024. -```shell -# Model Parallel -python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/3millions -# Partial FC 0.1 -python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/3millions_pfc -``` - -- GPU Memory - -``` -# (Model Parallel) gpustat -i -[0] Tesla V100-SXM2-32GB | 64'C, 94 % | 30338 / 32510 MB -[1] Tesla V100-SXM2-32GB | 60'C, 99 % | 28876 / 32510 MB -[2] Tesla V100-SXM2-32GB | 60'C, 99 % | 28872 / 32510 MB -[3] Tesla V100-SXM2-32GB | 69'C, 99 % | 28872 / 32510 MB -[4] Tesla V100-SXM2-32GB | 66'C, 99 % | 28888 / 32510 MB -[5] Tesla V100-SXM2-32GB | 60'C, 99 % | 28932 / 32510 MB -[6] Tesla V100-SXM2-32GB | 68'C, 100 % | 28916 / 32510 MB -[7] Tesla V100-SXM2-32GB | 65'C, 99 % | 28860 / 32510 MB - -# (Partial FC 0.1) gpustat -i -[0] Tesla V100-SXM2-32GB | 60'C, 95 % | 10488 / 32510 MB │······················· -[1] Tesla V100-SXM2-32GB | 60'C, 97 % | 10344 / 32510 MB │······················· -[2] Tesla V100-SXM2-32GB | 61'C, 95 % | 10340 / 32510 MB │······················· -[3] Tesla V100-SXM2-32GB | 66'C, 95 % | 10340 / 32510 MB │······················· -[4] Tesla V100-SXM2-32GB | 65'C, 94 % | 10356 / 32510 MB │······················· -[5] Tesla V100-SXM2-32GB | 61'C, 95 % | 10400 / 32510 MB │······················· -[6] Tesla V100-SXM2-32GB | 68'C, 96 % | 10384 / 32510 MB │······················· -[7] Tesla V100-SXM2-32GB | 64'C, 95 % | 10328 / 32510 MB │······················· -``` - -- Training Speed - -```python -# (Model Parallel) trainging.log -Training: Speed 2271.33 samples/sec Loss 1.1624 LearningRate 0.2000 Epoch: 0 Global Step: 100 -Training: Speed 2269.94 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 150 -Training: Speed 2272.67 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 200 -Training: Speed 2266.55 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 250 -Training: Speed 2272.54 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 300 - -# (Partial FC 0.1) trainging.log -Training: Speed 5299.56 samples/sec Loss 1.0965 LearningRate 0.2000 Epoch: 0 Global Step: 100 -Training: Speed 5296.37 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 150 -Training: Speed 5304.37 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 200 -Training: Speed 5274.43 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 250 -Training: Speed 5300.10 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 300 -``` - -In this test case, Partial FC 0.1 only use1 1/3 of the GPU memory of the model parallel, -and the training speed is 2.5 times faster than the model parallel. - - -## Speed Benchmark - -1. Training speed of different parallel methods (samples/second), Tesla V100 32GB * 8. (Larger is better) - -| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 | -| :--- | :--- | :--- | :--- | -|125000 | 4681 | 4824 | 5004 | -|250000 | 4047 | 4521 | 4976 | -|500000 | 3087 | 4013 | 4900 | -|1000000 | 2090 | 3449 | 4803 | -|1400000 | 1672 | 3043 | 4738 | -|2000000 | - | 2593 | 4626 | -|4000000 | - | 1748 | 4208 | -|5500000 | - | 1389 | 3975 | -|8000000 | - | - | 3565 | -|16000000 | - | - | 2679 | -|29000000 | - | - | 1855 | - -2. GPU memory cost of different parallel methods (GB per GPU), Tesla V100 32GB * 8. (Smaller is better) - -| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 | -| :--- | :--- | :--- | :--- | -|125000 | 7358 | 5306 | 4868 | -|250000 | 9940 | 5826 | 5004 | -|500000 | 14220 | 7114 | 5202 | -|1000000 | 23708 | 9966 | 5620 | -|1400000 | 32252 | 11178 | 6056 | -|2000000 | - | 13978 | 6472 | -|4000000 | - | 23238 | 8284 | -|5500000 | - | 32188 | 9854 | -|8000000 | - | - | 12310 | -|16000000 | - | - | 19950 | -|29000000 | - | - | 32324 | diff --git a/spaces/kevinwang676/rvc-mlbb-v2/lib/infer_pack/onnx_inference.py b/spaces/kevinwang676/rvc-mlbb-v2/lib/infer_pack/onnx_inference.py deleted file mode 100644 index c78324cbc08414fffcc689f325312de0e51bd6b4..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/rvc-mlbb-v2/lib/infer_pack/onnx_inference.py +++ /dev/null @@ -1,143 +0,0 @@ -import onnxruntime -import librosa -import numpy as np -import soundfile - - -class ContentVec: - def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None): - print("load model(s) from {}".format(vec_path)) - if device == "cpu" or device is None: - providers = ["CPUExecutionProvider"] - elif device == "cuda": - providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] - elif device == "dml": - providers = ["DmlExecutionProvider"] - else: - raise RuntimeError("Unsportted Device") - self.model = onnxruntime.InferenceSession(vec_path, providers=providers) - - def __call__(self, wav): - return self.forward(wav) - - def forward(self, wav): - feats = wav - if feats.ndim == 2: # double channels - feats = feats.mean(-1) - assert feats.ndim == 1, feats.ndim - feats = np.expand_dims(np.expand_dims(feats, 0), 0) - onnx_input = {self.model.get_inputs()[0].name: feats} - logits = self.model.run(None, onnx_input)[0] - return logits.transpose(0, 2, 1) - - -def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs): - if f0_predictor == "pm": - from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor - - f0_predictor_object = PMF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - elif f0_predictor == "harvest": - from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import HarvestF0Predictor - - f0_predictor_object = HarvestF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - elif f0_predictor == "dio": - from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor - - f0_predictor_object = DioF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - else: - raise Exception("Unknown f0 predictor") - return f0_predictor_object - - -class OnnxRVC: - def __init__( - self, - model_path, - sr=40000, - hop_size=512, - vec_path="vec-768-layer-12", - device="cpu", - ): - vec_path = f"pretrained/{vec_path}.onnx" - self.vec_model = ContentVec(vec_path, device) - if device == "cpu" or device is None: - providers = ["CPUExecutionProvider"] - elif device == "cuda": - providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] - elif device == "dml": - providers = ["DmlExecutionProvider"] - else: - raise RuntimeError("Unsportted Device") - self.model = onnxruntime.InferenceSession(model_path, providers=providers) - self.sampling_rate = sr - self.hop_size = hop_size - - def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd): - onnx_input = { - self.model.get_inputs()[0].name: hubert, - self.model.get_inputs()[1].name: hubert_length, - self.model.get_inputs()[2].name: pitch, - self.model.get_inputs()[3].name: pitchf, - self.model.get_inputs()[4].name: ds, - self.model.get_inputs()[5].name: rnd, - } - return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16) - - def inference( - self, - raw_path, - sid, - f0_method="dio", - f0_up_key=0, - pad_time=0.5, - cr_threshold=0.02, - ): - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - f0_predictor = get_f0_predictor( - f0_method, - hop_length=self.hop_size, - sampling_rate=self.sampling_rate, - threshold=cr_threshold, - ) - wav, sr = librosa.load(raw_path, sr=self.sampling_rate) - org_length = len(wav) - if org_length / sr > 50.0: - raise RuntimeError("Reached Max Length") - - wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000) - wav16k = wav16k - - hubert = self.vec_model(wav16k) - hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32) - hubert_length = hubert.shape[1] - - pitchf = f0_predictor.compute_f0(wav, hubert_length) - pitchf = pitchf * 2 ** (f0_up_key / 12) - pitch = pitchf.copy() - f0_mel = 1127 * np.log(1 + pitch / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - pitch = np.rint(f0_mel).astype(np.int64) - - pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32) - pitch = pitch.reshape(1, len(pitch)) - ds = np.array([sid]).astype(np.int64) - - rnd = np.random.randn(1, 192, hubert_length).astype(np.float32) - hubert_length = np.array([hubert_length]).astype(np.int64) - - out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze() - out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant") - return out_wav[0:org_length] diff --git a/spaces/kevinwang676/rvc-models-new/infer_pack/models_onnx_moess.py b/spaces/kevinwang676/rvc-models-new/infer_pack/models_onnx_moess.py deleted file mode 100644 index 12efb0629a2e3d0d746a34f467254536c2bdbe5f..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/rvc-models-new/infer_pack/models_onnx_moess.py +++ /dev/null @@ -1,849 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, pitch, nsff0, sid, rnd, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds.unsqueeze(0)).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/web/static/js/wav.js b/spaces/kira4424/Tacotron-zero-short-voice-clone/web/static/js/wav.js deleted file mode 100644 index 461908c4fd68655a6f4cbc573b469fd97ed6ed8d..0000000000000000000000000000000000000000 --- a/spaces/kira4424/Tacotron-zero-short-voice-clone/web/static/js/wav.js +++ /dev/null @@ -1,86 +0,0 @@ -/* -wav编码器+编码引擎 -https://github.com/xiangyuecn/Recorder - -当然最佳推荐使用mp3、wav格式,代码也是优先照顾这两种格式 -浏览器支持情况 -https://developer.mozilla.org/en-US/docs/Web/HTML/Supported_media_formats - -编码原理:给pcm数据加上一个44直接的wav头即成wav文件;pcm数据就是Recorder中的buffers原始数据(重新采样),16位时为LE小端模式(Little Endian),实质上是未经过任何编码处理 -*/ -(function(){ -"use strict"; - -Recorder.prototype.enc_wav={ - stable:true - ,testmsg:"支持位数8位、16位(填在比特率里面),采样率取值无限制" -}; -Recorder.prototype.wav=function(res,True,False){ - var This=this,set=This.set - ,size=res.length - ,sampleRate=set.sampleRate - ,bitRate=set.bitRate==8?8:16; - - //编码数据 https://github.com/mattdiamond/Recorderjs https://www.cnblogs.com/blqw/p/3782420.html https://www.cnblogs.com/xiaoqi/p/6993912.html - var dataLength=size*(bitRate/8); - var buffer=new ArrayBuffer(44+dataLength); - var data=new DataView(buffer); - - var offset=0; - var writeString=function(str){ - for (var i=0;i>8)+128; - data.setInt8(offset,val,true); - }; - }else{ - for (var i=0;i list[tuple[str, str]]: - """Extract hyperlinks from a BeautifulSoup object - - Args: - soup (BeautifulSoup): The BeautifulSoup object - base_url (str): The base URL - - Returns: - List[Tuple[str, str]]: The extracted hyperlinks - """ - return [ - (link.text, urljoin(base_url, link["href"])) - for link in soup.find_all("a", href=True) - ] - - -def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]: - """Format hyperlinks to be displayed to the user - - Args: - hyperlinks (List[Tuple[str, str]]): The hyperlinks to format - - Returns: - List[str]: The formatted hyperlinks - """ - return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks] diff --git a/spaces/kukuhtw/VToonify/vtoonify/model/raft/README.md b/spaces/kukuhtw/VToonify/vtoonify/model/raft/README.md deleted file mode 100644 index 650275ed7c4cda12822587c6a4358f057fffe494..0000000000000000000000000000000000000000 --- a/spaces/kukuhtw/VToonify/vtoonify/model/raft/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# RAFT -This repository contains the source code for our paper: - -[RAFT: Recurrent All Pairs Field Transforms for Optical Flow](https://arxiv.org/pdf/2003.12039.pdf)
      -ECCV 2020
      -Zachary Teed and Jia Deng
      - - - -## Requirements -The code has been tested with PyTorch 1.6 and Cuda 10.1. -```Shell -conda create --name raft -conda activate raft -conda install pytorch=1.6.0 torchvision=0.7.0 cudatoolkit=10.1 matplotlib tensorboard scipy opencv -c pytorch -``` - -## Demos -Pretrained models can be downloaded by running -```Shell -./download_models.sh -``` -or downloaded from [google drive](https://drive.google.com/drive/folders/1sWDsfuZ3Up38EUQt7-JDTT1HcGHuJgvT?usp=sharing) - -You can demo a trained model on a sequence of frames -```Shell -python demo.py --model=models/raft-things.pth --path=demo-frames -``` - -## Required Data -To evaluate/train RAFT, you will need to download the required datasets. -* [FlyingChairs](https://lmb.informatik.uni-freiburg.de/resources/datasets/FlyingChairs.en.html#flyingchairs) -* [FlyingThings3D](https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html) -* [Sintel](http://sintel.is.tue.mpg.de/) -* [KITTI](http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow) -* [HD1K](http://hci-benchmark.iwr.uni-heidelberg.de/) (optional) - - -By default `datasets.py` will search for the datasets in these locations. You can create symbolic links to wherever the datasets were downloaded in the `datasets` folder - -```Shell -├── datasets - ├── Sintel - ├── test - ├── training - ├── KITTI - ├── testing - ├── training - ├── devkit - ├── FlyingChairs_release - ├── data - ├── FlyingThings3D - ├── frames_cleanpass - ├── frames_finalpass - ├── optical_flow -``` - -## Evaluation -You can evaluate a trained model using `evaluate.py` -```Shell -python evaluate.py --model=models/raft-things.pth --dataset=sintel --mixed_precision -``` - -## Training -We used the following training schedule in our paper (2 GPUs). Training logs will be written to the `runs` which can be visualized using tensorboard -```Shell -./train_standard.sh -``` - -If you have a RTX GPU, training can be accelerated using mixed precision. You can expect similiar results in this setting (1 GPU) -```Shell -./train_mixed.sh -``` - -## (Optional) Efficent Implementation -You can optionally use our alternate (efficent) implementation by compiling the provided cuda extension -```Shell -cd alt_cuda_corr && python setup.py install && cd .. -``` -and running `demo.py` and `evaluate.py` with the `--alternate_corr` flag Note, this implementation is somewhat slower than all-pairs, but uses significantly less GPU memory during the forward pass. diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/__init__.py deleted file mode 100644 index caaad2cc7b96947857fe3ba3de903be65644bde6..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/__init__.py +++ /dev/null @@ -1,96 +0,0 @@ -import pkgutil - -import gradio.components as components -import gradio.inputs as inputs -import gradio.outputs as outputs -import gradio.processing_utils -import gradio.templates -import gradio.themes as themes -from gradio.blocks import Blocks -from gradio.components import ( - HTML, - JSON, - AnnotatedImage, - Annotatedimage, - Audio, - BarPlot, - Button, - Carousel, - Chatbot, - Checkbox, - CheckboxGroup, - Checkboxgroup, - Code, - ColorPicker, - DataFrame, - Dataframe, - Dataset, - Dropdown, - File, - Gallery, - Highlight, - HighlightedText, - Highlightedtext, - Image, - Interpretation, - Json, - Label, - LinePlot, - Markdown, - Model3D, - Number, - Plot, - Radio, - ScatterPlot, - Slider, - State, - StatusTracker, - Text, - Textbox, - TimeSeries, - Timeseries, - UploadButton, - Variable, - Video, - component, -) -from gradio.deploy_space import deploy -from gradio.events import SelectData -from gradio.exceptions import Error -from gradio.external import load -from gradio.flagging import ( - CSVLogger, - FlaggingCallback, - HuggingFaceDatasetJSONSaver, - HuggingFaceDatasetSaver, - SimpleCSVLogger, -) -from gradio.helpers import EventData, Progress, make_waveform, skip, update -from gradio.helpers import create_examples as Examples # noqa: N812 -from gradio.interface import Interface, TabbedInterface, close_all -from gradio.ipython_ext import load_ipython_extension -from gradio.layouts import Accordion, Box, Column, Group, Row, Tab, TabItem, Tabs -from gradio.mix import Parallel, Series -from gradio.routes import Request, mount_gradio_app -from gradio.templates import ( - Files, - ImageMask, - ImagePaint, - List, - Matrix, - Mic, - Microphone, - Numpy, - Paint, - Pil, - PlayableVideo, - Sketchpad, - TextArea, - Webcam, -) -from gradio.themes import Base as Theme - -current_pkg_version = ( - (pkgutil.get_data(__name__, "version.txt") or b"").decode("ascii").strip() -) -__version__ = current_pkg_version diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Blocks-005a10ea.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Blocks-005a10ea.css deleted file mode 100644 index 1feac101230266e476fc5f389f286813260505b5..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Blocks-005a10ea.css +++ /dev/null @@ -1 +0,0 @@ -.wrap.svelte-1i3r921.svelte-1i3r921{padding:var(--size-6)}.attention.svelte-1i3r921.svelte-1i3r921{font-weight:var(--weight-bold);font-size:var(--text-lg)}.attention.svelte-1i3r921 code.svelte-1i3r921{border:none;background:none;color:var(--color-accent);font-weight:var(--weight-bold)}button.svelte-1i3r921.svelte-1i3r921{position:absolute;top:var(--size-5);right:var(--size-6);width:var(--size-4);color:var(--body-text-color)}button.svelte-1i3r921.svelte-1i3r921:hover{color:var(--color-accent)}@media (min-width: 768px){button.svelte-1i3r921.svelte-1i3r921{top:var(--size-6)}}h2.svelte-9i27qi.svelte-9i27qi{display:flex;color:var(--body-text-color);font-weight:var(--weight-semibold)}h2.svelte-9i27qi img.svelte-9i27qi{margin-right:var(--size-2);width:var(--size-4)}span.svelte-9i27qi.svelte-9i27qi{color:var(--color-accent)}button.svelte-9i27qi.svelte-9i27qi{position:absolute;top:var(--size-5);right:var(--size-6);width:var(--size-4);color:var(--body-text-color)}button.svelte-9i27qi.svelte-9i27qi:hover{color:var(--color-accent)}@media (min-width: 768px){button.svelte-9i27qi.svelte-9i27qi{top:var(--size-6)}h2.svelte-9i27qi img.svelte-9i27qi{width:var(--size-5)}}.counts.svelte-9i27qi.svelte-9i27qi{margin-top:auto;margin-right:var(--size-8);margin-bottom:auto;margin-left:auto;color:var(--body-text-color);font-weight:var(--weight-light)}.load-wrap.svelte-1c7hj3i{display:flex;justify-content:center;align-items:center}h4.svelte-1c7hj3i{display:flex;align-items:center;margin-top:var(--size-6);margin-bottom:var(--size-3);color:var(--body-text-color);font-weight:var(--weight-bold)}.toggle-icon.svelte-1c7hj3i{display:flex;align-items:center;margin-right:var(--size-2);border-radius:var(--radius-full);background:var(--color-grey-300);width:12px;height:4px}.toggle-dot.svelte-1c7hj3i{margin-left:auto;border-radius:var(--radius-full);background:var(--color-grey-700);width:6px;height:6px}.response-wrap.svelte-1c7hj3i{font-family:var(--font-mono)}.desc.svelte-1c7hj3i{color:var(--body-text-color-subdued)}.hide.svelte-1c7hj3i{display:none}.second-level.svelte-1c7hj3i{margin-left:var(--size-4)}code.svelte-1pu3gsl pre.svelte-1pu3gsl{overflow-x:auto;color:var(--body-text-color);font-family:var(--font-mono);tab-size:2}code.svelte-1pu3gsl.svelte-1pu3gsl{position:relative}.copy.svelte-1pu3gsl.svelte-1pu3gsl{position:absolute;top:0;right:0;margin-top:-5px;margin-right:-5px}h3.svelte-41kcm6{color:var(--body-text-color);font-weight:var(--section-header-text-weight);font-size:var(--text-lg)}.post.svelte-41kcm6{margin-right:var(--size-2);border:1px solid var(--border-color-accent);border-radius:var(--radius-sm);background:var(--color-accent-soft);padding-right:var(--size-1);padding-bottom:var(--size-1);padding-left:var(--size-1);color:var(--color-accent);font-weight:var(--weight-semibold)}code.svelte-1bqxtsy pre.svelte-1bqxtsy{overflow-x:auto;color:var(--body-text-color);font-family:var(--font-mono);tab-size:2}.token.string.svelte-1bqxtsy.svelte-1bqxtsy{display:contents;color:var(--color-accent-base)}code.svelte-1bqxtsy.svelte-1bqxtsy{position:relative}.copy.svelte-1bqxtsy.svelte-1bqxtsy{position:absolute;top:0;right:0;margin-top:-5px;margin-right:-5px}.container.svelte-1bqxtsy.svelte-1bqxtsy{display:flex;flex-direction:column;gap:var(--spacing-xxl);margin-top:var(--size-3);margin-bottom:var(--size-3)}.error.svelte-1bqxtsy.svelte-1bqxtsy{color:var(--error-text-color)}.desc.svelte-1bqxtsy.svelte-1bqxtsy{color:var(--body-text-color-subdued)}.example-inputs.svelte-1bqxtsy.svelte-1bqxtsy{border:1px solid var(--border-color-accent);border-radius:var(--radius-sm);background:var(--color-accent-soft);padding-right:var(--size-1);padding-left:var(--size-1);color:var(--color-accent)}.space.svelte-1j8n062{display:flex;flex-basis:1;margin-top:var(--size-4)}.banner-wrap.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{position:relative;border-bottom:1px solid var(--border-color-primary);padding:var(--size-4) var(--size-6);font-size:var(--text-md)}@media (min-width: 768px){.banner-wrap.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{font-size:var(--text-xl)}}.docs-wrap.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{display:flex;flex-direction:column;gap:var(--spacing-xxl)}.endpoint.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{border-radius:var(--radius-md);background:var(--background-fill-primary);padding:var(--size-6);padding-top:var(--size-1);font-size:var(--text-md)}.client-doc.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{padding-top:var(--size-6);padding-right:var(--size-6);padding-left:var(--size-6);font-size:var(--text-xl)}.library.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{border:1px solid var(--border-color-accent);border-radius:var(--radius-sm);background:var(--color-accent-soft);padding-right:var(--size-1);padding-bottom:var(--size-1);padding-left:var(--size-1);color:var(--color-accent)}.snippets.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{display:flex;align-items:center;margin-bottom:var(--size-4)}.snippets.svelte-rzp0ym>.svelte-rzp0ym+.svelte-rzp0ym{margin-left:var(--size-2)}.snippet.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{display:flex;align-items:center;border:1px solid var(--border-color-primary);border-radius:var(--radius-md);padding:var(--size-1) var(--size-1-5);color:var(--body-text-color-subdued);color:var(--body-text-color);line-height:1;user-select:none;text-transform:capitalize}.current-lang.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{border:1px solid var(--body-text-color-subdued);color:var(--body-text-color)}.inactive-lang.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{cursor:pointer;color:var(--body-text-color-subdued)}.inactive-lang.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym:hover,.inactive-lang.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym:focus{box-shadow:var(--shadow-drop);color:var(--body-text-color)}.snippet.svelte-rzp0ym img.svelte-rzp0ym.svelte-rzp0ym{margin-right:var(--size-1-5);width:var(--size-3)}.header.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{margin-top:var(--size-3);margin-bottom:var(--size-3);font-size:var(--text-xl)}.endpoint-container.svelte-rzp0ym.svelte-rzp0ym.svelte-rzp0ym{margin-top:var(--size-3);margin-bottom:var(--size-3);border:1px solid var(--border-color-primary);border-radius:var(--radius-xl);padding:var(--size-3);padding-top:0}.wrap.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{display:flex;flex-grow:1;flex-direction:column;width:var(--size-full);font-weight:var(--body-text-weight);font-size:var(--body-text-size)}footer.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{display:flex;justify-content:center;margin-top:var(--size-4);color:var(--body-text-color-subdued)}footer.svelte-1lyswbr>.svelte-1lyswbr+.svelte-1lyswbr{margin-left:var(--size-2)}.show-api.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{display:flex;align-items:center}.show-api.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr:hover{color:var(--body-text-color)}.show-api.svelte-1lyswbr img.svelte-1lyswbr.svelte-1lyswbr{margin-right:var(--size-1);margin-left:var(--size-2);width:var(--size-3)}.built-with.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{display:flex;align-items:center}.built-with.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr:hover{color:var(--body-text-color)}.built-with.svelte-1lyswbr img.svelte-1lyswbr.svelte-1lyswbr{margin-right:var(--size-1);margin-left:var(--size-2);width:var(--size-3)}.api-docs.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{display:flex;position:fixed;top:0;right:0;z-index:var(--layer-5);background:rgba(0,0,0,.5);width:var(--size-screen);height:var(--size-screen-h)}.backdrop.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{flex:1 1 0%;backdrop-filter:blur(4px)}.api-docs-wrap.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{box-shadow:var(--shadow-drop-lg);background:var(--background-fill-primary);overflow-x:hidden;overflow-y:auto}@media (min-width: 768px){.api-docs-wrap.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{border-top-left-radius:var(--radius-lg);border-bottom-left-radius:var(--radius-lg);width:950px}}@media (min-width: 1536px){.api-docs-wrap.svelte-1lyswbr.svelte-1lyswbr.svelte-1lyswbr{width:1150px}} diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/DropdownArrow-5fa4dd09.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/DropdownArrow-5fa4dd09.css deleted file mode 100644 index c47d6f6f010f0626b0036068fe41d683b37b2954..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/DropdownArrow-5fa4dd09.css +++ /dev/null @@ -1 +0,0 @@ -.dropdown-arrow.svelte-p5edak{fill:var(--body-text-color);margin-right:var(--size-2);width:var(--size-5)} diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_afm.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_afm.py deleted file mode 100644 index e5c6a83937cd68e3ae20a14d8babb9a99bd5a4f5..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/tests/test_afm.py +++ /dev/null @@ -1,137 +0,0 @@ -from io import BytesIO -import pytest -import logging - -from matplotlib import _afm -from matplotlib import font_manager as fm - - -# See note in afm.py re: use of comma as decimal separator in the -# UnderlineThickness field and re: use of non-ASCII characters in the Notice -# field. -AFM_TEST_DATA = b"""StartFontMetrics 2.0 -Comment Comments are ignored. -Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017 -FontName MyFont-Bold -EncodingScheme FontSpecific -FullName My Font Bold -FamilyName Test Fonts -Weight Bold -ItalicAngle 0.0 -IsFixedPitch false -UnderlinePosition -100 -UnderlineThickness 56,789 -Version 001.000 -Notice Copyright \xa9 2017 No one. -FontBBox 0 -321 1234 369 -StartCharMetrics 3 -C 0 ; WX 250 ; N space ; B 0 0 0 0 ; -C 42 ; WX 1141 ; N foo ; B 40 60 800 360 ; -C 99 ; WX 583 ; N bar ; B 40 -10 543 210 ; -EndCharMetrics -EndFontMetrics -""" - - -def test_nonascii_str(): - # This tests that we also decode bytes as utf-8 properly. - # Else, font files with non ascii characters fail to load. - inp_str = "привет" - byte_str = inp_str.encode("utf8") - - ret = _afm._to_str(byte_str) - assert ret == inp_str - - -def test_parse_header(): - fh = BytesIO(AFM_TEST_DATA) - header = _afm._parse_header(fh) - assert header == { - b'StartFontMetrics': 2.0, - b'FontName': 'MyFont-Bold', - b'EncodingScheme': 'FontSpecific', - b'FullName': 'My Font Bold', - b'FamilyName': 'Test Fonts', - b'Weight': 'Bold', - b'ItalicAngle': 0.0, - b'IsFixedPitch': False, - b'UnderlinePosition': -100, - b'UnderlineThickness': 56.789, - b'Version': '001.000', - b'Notice': b'Copyright \xa9 2017 No one.', - b'FontBBox': [0, -321, 1234, 369], - b'StartCharMetrics': 3, - } - - -def test_parse_char_metrics(): - fh = BytesIO(AFM_TEST_DATA) - _afm._parse_header(fh) # position - metrics = _afm._parse_char_metrics(fh) - assert metrics == ( - {0: (250.0, 'space', [0, 0, 0, 0]), - 42: (1141.0, 'foo', [40, 60, 800, 360]), - 99: (583.0, 'bar', [40, -10, 543, 210]), - }, - {'space': (250.0, 'space', [0, 0, 0, 0]), - 'foo': (1141.0, 'foo', [40, 60, 800, 360]), - 'bar': (583.0, 'bar', [40, -10, 543, 210]), - }) - - -def test_get_familyname_guessed(): - fh = BytesIO(AFM_TEST_DATA) - font = _afm.AFM(fh) - del font._header[b'FamilyName'] # remove FamilyName, so we have to guess - assert font.get_familyname() == 'My Font' - - -def test_font_manager_weight_normalization(): - font = _afm.AFM(BytesIO( - AFM_TEST_DATA.replace(b"Weight Bold\n", b"Weight Custom\n"))) - assert fm.afmFontProperty("", font).weight == "normal" - - -@pytest.mark.parametrize( - "afm_data", - [ - b"""nope -really nope""", - b"""StartFontMetrics 2.0 -Comment Comments are ignored. -Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017 -FontName MyFont-Bold -EncodingScheme FontSpecific""", - ], -) -def test_bad_afm(afm_data): - fh = BytesIO(afm_data) - with pytest.raises(RuntimeError): - _afm._parse_header(fh) - - -@pytest.mark.parametrize( - "afm_data", - [ - b"""StartFontMetrics 2.0 -Comment Comments are ignored. -Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017 -Aardvark bob -FontName MyFont-Bold -EncodingScheme FontSpecific -StartCharMetrics 3""", - b"""StartFontMetrics 2.0 -Comment Comments are ignored. -Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017 -ItalicAngle zero degrees -FontName MyFont-Bold -EncodingScheme FontSpecific -StartCharMetrics 3""", - ], -) -def test_malformed_header(afm_data, caplog): - fh = BytesIO(afm_data) - with caplog.at_level(logging.ERROR): - _afm._parse_header(fh) - - assert len(caplog.records) == 1 diff --git a/spaces/laocao1798/laocaoAI/Dockerfile b/spaces/laocao1798/laocaoAI/Dockerfile deleted file mode 100644 index 3698c7cb7938e025afc53b18a571ae2961fbdffe..0000000000000000000000000000000000000000 --- a/spaces/laocao1798/laocaoAI/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -# 添加 git,以便之后能从GitHub克隆项目 -RUN apk --no-cache add git - -# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app - -# 设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app - -# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# 使用轻量级的 alpine 镜像作为运行时的基础镜像 -FROM alpine - -# 设置工作目录 -WORKDIR /workspace/app - -# 从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . - -# 设置环境变量,此处为随机字符 -ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX5rG6bE3fZ4iO" - -# 暴露8080端口 -EXPOSE 8080 - -# 容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] \ No newline at end of file diff --git a/spaces/lc202301/ChuanhuChatGPT/overwrites.py b/spaces/lc202301/ChuanhuChatGPT/overwrites.py deleted file mode 100644 index a87499a81bb3c23bf34c1faadcc02085567cd447..0000000000000000000000000000000000000000 --- a/spaces/lc202301/ChuanhuChatGPT/overwrites.py +++ /dev/null @@ -1,55 +0,0 @@ -from __future__ import annotations -import logging - -from llama_index import Prompt -from typing import List, Tuple -import mdtex2html - -from presets import * -from llama_func import * - - -def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]: - logging.debug("Compacting text chunks...🚀🚀🚀") - combined_str = [c.strip() for c in text_chunks if c.strip()] - combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)] - combined_str = "\n\n".join(combined_str) - # resplit based on self.max_chunk_overlap - text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1) - return text_splitter.split_text(combined_str) - - -def postprocess( - self, y: List[Tuple[str | None, str | None]] -) -> List[Tuple[str | None, str | None]]: - """ - Parameters: - y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. - Returns: - List of tuples representing the message and response. Each message and response will be a string of HTML. - """ - if y is None or y == []: - return [] - tag_regex = re.compile(r"^<\w+>[^<]+") - if tag_regex.search(y[-1][1]): - y[-1] = (convert_user(y[-1][0]), y[-1][1]) - else: - y[-1] = (convert_user(y[-1][0]), convert_mdtext(y[-1][1])) - return y - -with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2: - customJS = f.read() - kelpyCodos = f2.read() - -def reload_javascript(): - print("Reloading javascript...") - js = f'' - def template_response(*args, **kwargs): - res = GradioTemplateResponseOriginal(*args, **kwargs) - res.body = res.body.replace(b'', f'{js}'.encode("utf8")) - res.init_headers() - return res - - gr.routes.templates.TemplateResponse = template_response - -GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse \ No newline at end of file diff --git a/spaces/lewiswu1209/MockingBird/synthesizer/utils/_cmudict.py b/spaces/lewiswu1209/MockingBird/synthesizer/utils/_cmudict.py deleted file mode 100644 index 2cef1f896d4fb78478884fe8e810956998d5e3b3..0000000000000000000000000000000000000000 --- a/spaces/lewiswu1209/MockingBird/synthesizer/utils/_cmudict.py +++ /dev/null @@ -1,62 +0,0 @@ -import re - -valid_symbols = [ - "AA", "AA0", "AA1", "AA2", "AE", "AE0", "AE1", "AE2", "AH", "AH0", "AH1", "AH2", - "AO", "AO0", "AO1", "AO2", "AW", "AW0", "AW1", "AW2", "AY", "AY0", "AY1", "AY2", - "B", "CH", "D", "DH", "EH", "EH0", "EH1", "EH2", "ER", "ER0", "ER1", "ER2", "EY", - "EY0", "EY1", "EY2", "F", "G", "HH", "IH", "IH0", "IH1", "IH2", "IY", "IY0", "IY1", - "IY2", "JH", "K", "L", "M", "N", "NG", "OW", "OW0", "OW1", "OW2", "OY", "OY0", - "OY1", "OY2", "P", "R", "S", "SH", "T", "TH", "UH", "UH0", "UH1", "UH2", "UW", - "UW0", "UW1", "UW2", "V", "W", "Y", "Z", "ZH" -] - -_valid_symbol_set = set(valid_symbols) - - -class CMUDict: - """Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict""" - def __init__(self, file_or_path, keep_ambiguous=True): - if isinstance(file_or_path, str): - with open(file_or_path, encoding="latin-1") as f: - entries = _parse_cmudict(f) - else: - entries = _parse_cmudict(file_or_path) - if not keep_ambiguous: - entries = {word: pron for word, pron in entries.items() if len(pron) == 1} - self._entries = entries - - - def __len__(self): - return len(self._entries) - - - def lookup(self, word): - """Returns list of ARPAbet pronunciations of the given word.""" - return self._entries.get(word.upper()) - - - -_alt_re = re.compile(r"\([0-9]+\)") - - -def _parse_cmudict(file): - cmudict = {} - for line in file: - if len(line) and (line[0] >= "A" and line[0] <= "Z" or line[0] == "'"): - parts = line.split(" ") - word = re.sub(_alt_re, "", parts[0]) - pronunciation = _get_pronunciation(parts[1]) - if pronunciation: - if word in cmudict: - cmudict[word].append(pronunciation) - else: - cmudict[word] = [pronunciation] - return cmudict - - -def _get_pronunciation(s): - parts = s.strip().split(" ") - for part in parts: - if part not in _valid_symbol_set: - return None - return " ".join(parts) diff --git a/spaces/limingcv/AlignDet/finetune/finetune_mask-rcnn_2x_coco_lr2e-2_wd1e-5/mask_rcnn_r50_fpn_2x_coco.py b/spaces/limingcv/AlignDet/finetune/finetune_mask-rcnn_2x_coco_lr2e-2_wd1e-5/mask_rcnn_r50_fpn_2x_coco.py deleted file mode 100644 index 5a1ab387081a155744bdfa5b66bc4efff25f647b..0000000000000000000000000000000000000000 --- a/spaces/limingcv/AlignDet/finetune/finetune_mask-rcnn_2x_coco_lr2e-2_wd1e-5/mask_rcnn_r50_fpn_2x_coco.py +++ /dev/null @@ -1,259 +0,0 @@ -model = dict( - type='MaskRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5, - norm_cfg=dict(type='SyncBN', requires_grad=True)), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='FCNMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='CocoDataset', - ann_file='data/coco/annotations/instances_train2017.json', - img_prefix='data/coco/train2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) - ]), - val=dict( - type='CocoDataset', - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) - ]), - test=dict( - type='CocoDataset', - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) - ])) -evaluation = dict(metric=['bbox', 'segm'], save_best='auto') -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=1e-05) -optimizer_config = dict(grad_clip=None) -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) -checkpoint_config = dict(interval=1) -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) -custom_hooks = [ - dict(type='NumClassCheckHook'), - dict( - type='MMDetWandbHook', - init_kwargs=dict(project='I2B', group='finetune'), - interval=50, - num_eval_images=0, - log_checkpoint=False) -] -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = 'work_dirs/selfsup_mask-rcnn_mstrain-soft-teacher_sampler-4096_temp0.5/final_model.pth' -resume_from = None -workflow = [('train', 1)] -opencv_num_threads = 0 -mp_start_method = 'fork' -auto_scale_lr = dict(enable=False, base_batch_size=16) -custom_imports = None -norm_cfg = dict(type='SyncBN', requires_grad=True) -work_dir = 'work_dirs/finetune_mask-rcnn_2x_coco_lr2e-2_wd1e-5' -auto_resume = True -gpu_ids = range(0, 8) diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Dude Simulator 2 Download] [Xforce Keygen].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Dude Simulator 2 Download] [Xforce Keygen].md deleted file mode 100644 index 619f9ea68b5e2bcb6523c05a2548e8a6f4fe40ac..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Dude Simulator 2 Download] [Xforce Keygen].md +++ /dev/null @@ -1,26 +0,0 @@ -

      Dude Simulator 2 Download] [Xforce keygen]


      DOWNLOAD ……… https://bytlly.com/2uGx57



      -
      -To me, Life is the best adventure. And also a relaxing one. - -Logged - -Play me. I want to fly, I want to shine. I want to be a problem in your city. I want to be an attraction on your TV. Play me, play me, play me... - -This is the game's story mode. It is an excellent game. There is a lot of fun to be had here. - -You start as a rookie reporter with a big city paper. Here you explore the world around you. This is a survival game. You'll need to eat, drink, sleep, find shelter, and have sex. (Male or Female. Also Robots.) These are the things you need to survive. You may like some or you may not. Its up to you. You are the main character in the game. - -This is the way to do a sandbox game. There is a lot of stuff to do in this game. It is an open world game. You are the director of the game. You can decide what you want to do. You can walk around, or you can hop in a car and drive somewhere. I prefer walking though. - -This is the story mode of the game. You start as a rookie reporter with a big city paper. Here you explore the world around you. This is a survival game. You'll need to eat, drink, sleep, find shelter, and have sex. (Male or Female. Also Robots.) These are the things you need to survive. You may like some or you may not. Its up to you. You are the main character in the game. - -I liked the story mode more than the free roam mode. Free roam mode is a bit hard to figure out at first. Free roam mode is very relaxing, though. I recommend trying it out. - -Also, I recommend trying the combat mode. I've only been playing this game for a week, but I am having a lot of fun with it. - -And lastly, I recommend getting the Combat Challenge mode to a level where you have no trouble surviving. - -Nice job on the review 4fefd39f24
      -
      -
      -

      diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/IATKOS ML2 (Mac OS X Mountain Lion 10.8.2).torrent [BETTER].md b/spaces/lincquiQcaudo/Top-20-Diffusion/IATKOS ML2 (Mac OS X Mountain Lion 10.8.2).torrent [BETTER].md deleted file mode 100644 index 2b4b9d12a275426519b2f8d99702677b0feee0bd..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/IATKOS ML2 (Mac OS X Mountain Lion 10.8.2).torrent [BETTER].md +++ /dev/null @@ -1,9 +0,0 @@ -
      -

      this is the iatkos ml2 (mac os x mountain lion 10.8.2).torrent crack file. the. dmg file is also posted here, and you can use that to get the installer. this is the full iatkos ml2 (mac os x mountain lion 10.torrent file, un-cracked. the main file contains a xar file which contains. iatkos ml2 (mac os x mountain lion 10.torrent is a package which contains softmod iatkos ml2 (mac os x mountain lion 10.torrent crack released by us. the purpose of this package is to connect the cracker team to the softmod iatkos ml2 (mac os x mountain lion 10.torrent crack without the crack team being public. this means you can keep in contact with team members via the member list but without giving away your identity.

      -

      to re-install mountain lion on a mac you have to remove all the apps you had installed on lion before you installed mountain lion. you only need to do that if you didnt buy mountain lion from the mac app store.

      -

      iATKOS ML2 (Mac OS X Mountain Lion 10.8.2).torrent


      Download File > https://bytlly.com/2uGwHN



      -

      - you can use a mac app store purchase to install mountain lion. - you can also reinstall os x lion on a mac that doesnt come with lion. - you cant downgrade to lion on a mac that comes with mountain lion.

      -

      these days, most macs, including apple mac mini computers and macbook laptops, can run both lion and mountain lion. there are just a few macs, including some previous-generation mac pro desktop computers, that cannot run lion or mountain lion. for more information on which mac models are able to run lion and mountain lion, click the links below.

      -

      files needed:
      iatkos s3 ver2 10.6.3 iso image (search torrents) and burn on dvd
      os x 10.7 combo update
      computer specifications:
      mobo: gigabyte ga p35-ds3p
      mobo integrated sound: realtek alc889a (works with voodoo hda )
      mobo integrated network: realtek 8111b chip (works oob)
      cpu: pentium d 930sl88s 3ghz (works with 32 bit qoopz kernel)
      graphic card: nvidia 7200gs 256mb (works after editing com.apple.boot.plist file, 32 bit only, not supported by mountain lion 10.8 which is 64bit only! )
      hdd: seagate 80gb sata
      dvdrw: sony optiarc ad-7260s, 24x, sata, dual layer
      ram: 2 x 1gb kingston
      usb mouse
      usb keyboard
      monitor should be conected via dvi, not d sub, must use sata hdd and sata dvd (not ata), mouse and keyboard should be connected via usb

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/KMS Matrix 2.0 Windows Activators.md b/spaces/lincquiQcaudo/Top-20-Diffusion/KMS Matrix 2.0 Windows Activators.md deleted file mode 100644 index 6533d104bcaa32b9ba4234dc6cd7a8bf42067c3a..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/KMS Matrix 2.0 Windows Activators.md +++ /dev/null @@ -1,37 +0,0 @@ - -

      How to Activate Windows with KMS Matrix 2.0

      -

      KMS Matrix 2.0 is a powerful tool that can activate Windows 7/8/8.1/10/11 and Office 2010/2013/2016/2019/2021 without buying a license key. It is based on the KMS (Key Management Service) technology, which is used by Microsoft to activate large-scale enterprise and educational customers. KMS Matrix 2.0 can emulate a KMS server on your local network and activate your Windows and Office products with a generic volume license key (GVLK).

      -

      KMS Matrix 2.0 Windows Activators


      Download Ziphttps://bytlly.com/2uGyhb



      -

      In this article, we will show you how to use KMS Matrix 2.0 to activate your Windows and Office applications in a few simple steps.

      -

      Step 1: Download KMS Matrix 2.0

      -

      You can download KMS Matrix 2.0 from the official website or from any trusted source. The file size is about 4 MB and it is a portable application, which means you don't need to install it on your computer. Just extract the zip file and run the executable file as administrator.

      -

      Step 2: Run KMS Matrix 2.0

      -

      When you run KMS Matrix 2.0, you will see a simple interface with four buttons: Activate Windows, Activate Office, Settings, and About. You can also see the status of your Windows and Office activation at the bottom of the window.

      -

      To activate Windows, click on the Activate Windows button and wait for a few seconds. You will see a message saying "Windows activation successful" if everything goes well.

      -

      To activate Office, click on the Activate Office button and wait for a few seconds. You will see a message saying "Office activation successful" if everything goes well.

      -

      Step 3: Check Activation Status

      -

      To verify that your Windows and Office are activated, you can use the built-in tools or commands.

      -

      -

      For Windows, you can go to Settings > Update & Security > Activation and see if it says "Windows is activated". You can also open a command prompt and type slmgr /xpr to see the expiration date of your activation.

      -

      For Office, you can open any Office application and go to File > Account and see if it says "Product Activated". You can also open a command prompt and type cscript ospp.vbs /dstatus to see the license status of your Office products.

      -

      Conclusion

      -

      KMS Matrix 2.0 is a simple and effective way to activate Windows and Office without paying for a license key. It works for most versions and editions of Windows and Office and it does not require any user intervention or internet connection. However, it is not a permanent activation solution and it may be detected by antivirus software as malware. Therefore, use it at your own risk and discretion.

      - -

      FAQs

      -

      In this section, we will answer some frequently asked questions about KMS Matrix 2.0 and its activation process.

      -

      What is KMS?

      -

      KMS stands for Key Management Service, which is a technology that allows organizations to activate multiple computers with a single product key. KMS requires a KMS host computer that runs a KMS service and a KMS client computer that connects to the KMS host for activation. The KMS host can be a Windows server or a Windows client that has been configured as a KMS host. The KMS client can be any Windows or Office product that supports KMS activation.

      -

      What is GVLK?

      -

      GVLK stands for Generic Volume License Key, which is a special type of product key that is used for KMS activation. GVLKs are pre-installed on Windows and Office products that are intended for volume licensing customers. GVLKs allow the products to automatically discover and connect to the KMS host on the local network for activation. GVLKs are also known as KMS client keys or default keys.

      -

      What is KMS Matrix 2.0?

      -

      KMS Matrix 2.0 is a tool that can emulate a KMS server on your local network and activate your Windows and Office products with GVLKs. It does not require any installation or configuration and it can activate most versions and editions of Windows and Office. It is based on the open source project KMSEmulator by mikmik38.

      -

      Is KMS Matrix 2.0 legal?

      -

      KMS Matrix 2.0 is not an official product of Microsoft and it does not have any license or authorization from Microsoft. It is considered as a hacking tool or a crack that bypasses the genuine activation process of Windows and Office. Therefore, using KMS Matrix 2.0 may violate the terms and conditions of Microsoft and may result in legal consequences. We do not recommend using KMS Matrix 2.0 for any commercial or personal purposes.

      -

      Is KMS Matrix 2.0 safe?

      -

      KMS Matrix 2.0 is not a virus or malware, but it may be detected by some antivirus software as such because of its nature and behavior. It may also trigger some security warnings or alerts from Windows Defender or SmartScreen. Therefore, you may need to disable or whitelist your antivirus software before using KMS Matrix 2.0. However, you should always download KMS Matrix 2.0 from a trusted source and scan it with a reliable antivirus software before running it.

      -

      How long does the activation last?

      -

      The activation by KMS Matrix 2.0 lasts for 180 days, after which it needs to be renewed by running the tool again. However, the tool can automatically renew the activation every 10 days if you keep it running in the background. You can also check the remaining days of your activation by using the commands slmgr /xpr for Windows and cscript ospp.vbs /dstatus for Office.

      -

      Can I update Windows and Office after activation?

      -

      Yes, you can update your Windows and Office products normally after activation by KMS Matrix 2.0. The updates will not affect your activation status or cause any problems. However, you should avoid installing any updates that may change the product key or license information of your products.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/lindeberg/whisper-webui/tests/segments_test.py b/spaces/lindeberg/whisper-webui/tests/segments_test.py deleted file mode 100644 index d829f1c77f74b3c96513fe4965d532cf2d1dceb4..0000000000000000000000000000000000000000 --- a/spaces/lindeberg/whisper-webui/tests/segments_test.py +++ /dev/null @@ -1,48 +0,0 @@ -import sys -import unittest - -sys.path.append('../whisper-webui') - -from src.segments import merge_timestamps - -class TestSegments(unittest.TestCase): - def __init__(self, *args, **kwargs): - super(TestSegments, self).__init__(*args, **kwargs) - - def test_merge_segments(self): - segments = [ - {'start': 10.0, 'end': 20.0}, - {'start': 22.0, 'end': 27.0}, - {'start': 31.0, 'end': 35.0}, - {'start': 45.0, 'end': 60.0}, - {'start': 61.0, 'end': 65.0}, - {'start': 68.0, 'end': 98.0}, - {'start': 100.0, 'end': 102.0}, - {'start': 110.0, 'end': 112.0} - ] - - result = merge_timestamps(segments, merge_window=5, max_merge_size=30, padding_left=1, padding_right=1) - - self.assertListEqual(result, [ - {'start': 9.0, 'end': 36.0}, - {'start': 44.0, 'end': 66.0}, - {'start': 67.0, 'end': 99.0}, - {'start': 99.0, 'end': 103.0}, - {'start': 109.0, 'end': 113.0} - ]) - - def test_overlap_next(self): - segments = [ - {'start': 5.0, 'end': 39.182}, - {'start': 39.986, 'end': 40.814} - ] - - result = merge_timestamps(segments, merge_window=5, max_merge_size=30, padding_left=1, padding_right=1) - - self.assertListEqual(result, [ - {'start': 4.0, 'end': 39.584}, - {'start': 39.584, 'end': 41.814} - ]) - -if __name__ == '__main__': - unittest.main() \ No newline at end of file diff --git a/spaces/linfanluntan/Grounded-SAM/GroundingDINO/groundingdino/models/registry.py b/spaces/linfanluntan/Grounded-SAM/GroundingDINO/groundingdino/models/registry.py deleted file mode 100644 index 2d22a59eec79a2a19b83fa1779f2adaf5753aec6..0000000000000000000000000000000000000000 --- a/spaces/linfanluntan/Grounded-SAM/GroundingDINO/groundingdino/models/registry.py +++ /dev/null @@ -1,66 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# -*- coding: utf-8 -*- -# @Author: Yihao Chen -# @Date: 2021-08-16 16:03:17 -# @Last Modified by: Shilong Liu -# @Last Modified time: 2022-01-23 15:26 -# modified from mmcv - -import inspect -from functools import partial - - -class Registry(object): - def __init__(self, name): - self._name = name - self._module_dict = dict() - - def __repr__(self): - format_str = self.__class__.__name__ + "(name={}, items={})".format( - self._name, list(self._module_dict.keys()) - ) - return format_str - - def __len__(self): - return len(self._module_dict) - - @property - def name(self): - return self._name - - @property - def module_dict(self): - return self._module_dict - - def get(self, key): - return self._module_dict.get(key, None) - - def registe_with_name(self, module_name=None, force=False): - return partial(self.register, module_name=module_name, force=force) - - def register(self, module_build_function, module_name=None, force=False): - """Register a module build function. - Args: - module (:obj:`nn.Module`): Module to be registered. - """ - if not inspect.isfunction(module_build_function): - raise TypeError( - "module_build_function must be a function, but got {}".format( - type(module_build_function) - ) - ) - if module_name is None: - module_name = module_build_function.__name__ - if not force and module_name in self._module_dict: - raise KeyError("{} is already registered in {}".format(module_name, self.name)) - self._module_dict[module_name] = module_build_function - - return module_build_function - - -MODULE_BUILD_FUNCS = Registry("model build functions") diff --git a/spaces/linweiyt/aiwrite/README.md b/spaces/linweiyt/aiwrite/README.md deleted file mode 100644 index 1c39c2d1562b6107c0cae3e6fadcc2de95a71649..0000000000000000000000000000000000000000 --- a/spaces/linweiyt/aiwrite/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Aiwrite -emoji: ⚡ -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ljjggr/bingo/src/pages/api/blob.ts b/spaces/ljjggr/bingo/src/pages/api/blob.ts deleted file mode 100644 index fecd48031916b2284b8958892196e0a1ad420421..0000000000000000000000000000000000000000 --- a/spaces/ljjggr/bingo/src/pages/api/blob.ts +++ /dev/null @@ -1,40 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { Readable } from 'node:stream' -import { fetch } from '@/lib/isomorphic' - -const API_DOMAIN = 'https://www.bing.com' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { bcid } = req.query - - const { headers, body } = await fetch(`${API_DOMAIN}/images/blob?bcid=${bcid}`, - { - method: 'GET', - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referrer-Policy": "origin-when-cross-origin", - }, - }, - ) - - res.writeHead(200, { - 'Content-Length': headers.get('content-length')!, - 'Content-Type': headers.get('content-type')!, - }) - // @ts-ignore - return Readable.fromWeb(body!).pipe(res) - } catch (e) { - console.log('Error', e) - return res.json({ - result: { - value: 'UploadFailed', - message: `${e}` - } - }) - } -} diff --git a/spaces/lychees/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py b/spaces/lychees/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py deleted file mode 100644 index 87d741390b3c12e174ae5ddbe71436a5a8ab8718..0000000000000000000000000000000000000000 --- a/spaces/lychees/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py +++ /dev/null @@ -1,231 +0,0 @@ -import cv2 -import gradio as gr -import numpy as np -import torch -from diffusers import ControlNetModel -from PIL import Image - -from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import ( - StableDiffusionControlNetInpaintPipeline, -) -from diffusion_webui.utils.model_list import ( - controlnet_canny_model_list, - stable_inpiant_model_list, -) -from diffusion_webui.utils.scheduler_list import ( - SCHEDULER_LIST, - get_scheduler_list, -) - -# https://github.com/mikonvergence/ControlNetInpaint - - -class StableDiffusionControlNetInpaintCannyGenerator: - def __init__(self): - self.pipe = None - - def load_model(self, stable_model_path, controlnet_model_path, scheduler): - if self.pipe is None: - controlnet = ControlNetModel.from_pretrained( - controlnet_model_path, torch_dtype=torch.float16 - ) - self.pipe = ( - StableDiffusionControlNetInpaintPipeline.from_pretrained( - pretrained_model_name_or_path=stable_model_path, - controlnet=controlnet, - safety_checker=None, - torch_dtype=torch.float16, - ) - ) - - self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler) - self.pipe.to("cuda") - self.pipe.enable_xformers_memory_efficient_attention() - - return self.pipe - - def load_image(self, image_path): - image = np.array(image_path) - image = Image.fromarray(image) - return image - - def controlnet_canny_inpaint( - self, - image_path: str, - ): - image = image_path["image"].convert("RGB").resize((512, 512)) - image = np.array(image) - - image = cv2.Canny(image, 100, 200) - image = image[:, :, None] - image = np.concatenate([image, image, image], axis=2) - image = Image.fromarray(image) - - return image - - def generate_image( - self, - image_path: str, - stable_model_path: str, - controlnet_model_path: str, - prompt: str, - negative_prompt: str, - num_images_per_prompt: int, - guidance_scale: int, - num_inference_step: int, - controlnet_conditioning_scale: int, - scheduler: str, - seed_generator: int, - ): - - normal_image = image_path["image"].convert("RGB").resize((512, 512)) - mask_image = image_path["mask"].convert("RGB").resize((512, 512)) - - normal_image = self.load_image(image_path=normal_image) - mask_image = self.load_image(image_path=mask_image) - - control_image = self.controlnet_canny_inpaint(image_path=image_path) - pipe = self.load_model( - stable_model_path=stable_model_path, - controlnet_model_path=controlnet_model_path, - scheduler=scheduler, - ) - - if seed_generator == 0: - random_seed = torch.randint(0, 1000000, (1,)) - generator = torch.manual_seed(random_seed) - else: - generator = torch.manual_seed(seed_generator) - - output = pipe( - prompt=prompt, - image=normal_image, - mask_image=mask_image, - control_image=control_image, - negative_prompt=negative_prompt, - num_images_per_prompt=num_images_per_prompt, - num_inference_steps=num_inference_step, - guidance_scale=guidance_scale, - controlnet_conditioning_scale=controlnet_conditioning_scale, - generator=generator, - ).images - - return output - - def app(): - with gr.Blocks(): - with gr.Row(): - with gr.Column(): - controlnet_canny_inpaint_image_file = gr.Image( - source="upload", - tool="sketch", - elem_id="image_upload", - type="pil", - label="Upload", - ) - - controlnet_canny_inpaint_prompt = gr.Textbox( - lines=1, placeholder="Prompt", show_label=False - ) - - controlnet_canny_inpaint_negative_prompt = gr.Textbox( - lines=1, - show_label=False, - placeholder="Negative Prompt", - ) - with gr.Row(): - with gr.Column(): - controlnet_canny_inpaint_stable_model_id = ( - gr.Dropdown( - choices=stable_inpiant_model_list, - value=stable_inpiant_model_list[0], - label="Stable Model Id", - ) - ) - - controlnet_canny_inpaint_guidance_scale = gr.Slider( - minimum=0.1, - maximum=15, - step=0.1, - value=7.5, - label="Guidance Scale", - ) - - controlnet_canny_inpaint_num_inference_step = ( - gr.Slider( - minimum=1, - maximum=100, - step=1, - value=50, - label="Num Inference Step", - ) - ) - controlnet_canny_inpaint_num_images_per_prompt = ( - gr.Slider( - minimum=1, - maximum=10, - step=1, - value=1, - label="Number Of Images", - ) - ) - with gr.Row(): - with gr.Column(): - controlnet_canny_inpaint_model_id = gr.Dropdown( - choices=controlnet_canny_model_list, - value=controlnet_canny_model_list[0], - label="Controlnet Model Id", - ) - controlnet_canny_inpaint_scheduler = ( - gr.Dropdown( - choices=SCHEDULER_LIST, - value=SCHEDULER_LIST[0], - label="Scheduler", - ) - ) - controlnet_canny_inpaint_controlnet_conditioning_scale = gr.Slider( - minimum=0.1, - maximum=1.0, - step=0.1, - value=0.5, - label="Controlnet Conditioning Scale", - ) - - controlnet_canny_inpaint_seed_generator = ( - gr.Slider( - minimum=0, - maximum=1000000, - step=1, - value=0, - label="Seed Generator", - ) - ) - - controlnet_canny_inpaint_predict = gr.Button( - value="Generator" - ) - - with gr.Column(): - output_image = gr.Gallery( - label="Generated images", - show_label=False, - elem_id="gallery", - ).style(grid=(1, 2)) - - controlnet_canny_inpaint_predict.click( - fn=StableDiffusionControlNetInpaintCannyGenerator().generate_image, - inputs=[ - controlnet_canny_inpaint_image_file, - controlnet_canny_inpaint_stable_model_id, - controlnet_canny_inpaint_model_id, - controlnet_canny_inpaint_prompt, - controlnet_canny_inpaint_negative_prompt, - controlnet_canny_inpaint_num_images_per_prompt, - controlnet_canny_inpaint_guidance_scale, - controlnet_canny_inpaint_num_inference_step, - controlnet_canny_inpaint_controlnet_conditioning_scale, - controlnet_canny_inpaint_scheduler, - controlnet_canny_inpaint_seed_generator, - ], - outputs=[output_image], - ) diff --git a/spaces/ma-xu/LIVE/pybind11/tests/test_numpy_dtypes.cpp b/spaces/ma-xu/LIVE/pybind11/tests/test_numpy_dtypes.cpp deleted file mode 100644 index 467e0253f7eb422da4fff3b4db7e4836fc2c11f2..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pybind11/tests/test_numpy_dtypes.cpp +++ /dev/null @@ -1,474 +0,0 @@ -/* - tests/test_numpy_dtypes.cpp -- Structured and compound NumPy dtypes - - Copyright (c) 2016 Ivan Smirnov - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#include "pybind11_tests.h" -#include - -#ifdef __GNUC__ -#define PYBIND11_PACKED(cls) cls __attribute__((__packed__)) -#else -#define PYBIND11_PACKED(cls) __pragma(pack(push, 1)) cls __pragma(pack(pop)) -#endif - -namespace py = pybind11; - -struct SimpleStruct { - bool bool_; - uint32_t uint_; - float float_; - long double ldbl_; -}; - -std::ostream& operator<<(std::ostream& os, const SimpleStruct& v) { - return os << "s:" << v.bool_ << "," << v.uint_ << "," << v.float_ << "," << v.ldbl_; -} - -struct SimpleStructReordered { - bool bool_; - float float_; - uint32_t uint_; - long double ldbl_; -}; - -PYBIND11_PACKED(struct PackedStruct { - bool bool_; - uint32_t uint_; - float float_; - long double ldbl_; -}); - -std::ostream& operator<<(std::ostream& os, const PackedStruct& v) { - return os << "p:" << v.bool_ << "," << v.uint_ << "," << v.float_ << "," << v.ldbl_; -} - -PYBIND11_PACKED(struct NestedStruct { - SimpleStruct a; - PackedStruct b; -}); - -std::ostream& operator<<(std::ostream& os, const NestedStruct& v) { - return os << "n:a=" << v.a << ";b=" << v.b; -} - -struct PartialStruct { - bool bool_; - uint32_t uint_; - float float_; - uint64_t dummy2; - long double ldbl_; -}; - -struct PartialNestedStruct { - uint64_t dummy1; - PartialStruct a; - uint64_t dummy2; -}; - -struct UnboundStruct { }; - -struct StringStruct { - char a[3]; - std::array b; -}; - -struct ComplexStruct { - std::complex cflt; - std::complex cdbl; -}; - -std::ostream& operator<<(std::ostream& os, const ComplexStruct& v) { - return os << "c:" << v.cflt << "," << v.cdbl; -} - -struct ArrayStruct { - char a[3][4]; - int32_t b[2]; - std::array c; - std::array d[4]; -}; - -PYBIND11_PACKED(struct StructWithUglyNames { - int8_t __x__; - uint64_t __y__; -}); - -enum class E1 : int64_t { A = -1, B = 1 }; -enum E2 : uint8_t { X = 1, Y = 2 }; - -PYBIND11_PACKED(struct EnumStruct { - E1 e1; - E2 e2; -}); - -std::ostream& operator<<(std::ostream& os, const StringStruct& v) { - os << "a='"; - for (size_t i = 0; i < 3 && v.a[i]; i++) os << v.a[i]; - os << "',b='"; - for (size_t i = 0; i < 3 && v.b[i]; i++) os << v.b[i]; - return os << "'"; -} - -std::ostream& operator<<(std::ostream& os, const ArrayStruct& v) { - os << "a={"; - for (int i = 0; i < 3; i++) { - if (i > 0) - os << ','; - os << '{'; - for (int j = 0; j < 3; j++) - os << v.a[i][j] << ','; - os << v.a[i][3] << '}'; - } - os << "},b={" << v.b[0] << ',' << v.b[1]; - os << "},c={" << int(v.c[0]) << ',' << int(v.c[1]) << ',' << int(v.c[2]); - os << "},d={"; - for (int i = 0; i < 4; i++) { - if (i > 0) - os << ','; - os << '{' << v.d[i][0] << ',' << v.d[i][1] << '}'; - } - return os << '}'; -} - -std::ostream& operator<<(std::ostream& os, const EnumStruct& v) { - return os << "e1=" << (v.e1 == E1::A ? "A" : "B") << ",e2=" << (v.e2 == E2::X ? "X" : "Y"); -} - -template -py::array mkarray_via_buffer(size_t n) { - return py::array(py::buffer_info(nullptr, sizeof(T), - py::format_descriptor::format(), - 1, { n }, { sizeof(T) })); -} - -#define SET_TEST_VALS(s, i) do { \ - s.bool_ = (i) % 2 != 0; \ - s.uint_ = (uint32_t) (i); \ - s.float_ = (float) (i) * 1.5f; \ - s.ldbl_ = (long double) (i) * -2.5L; } while (0) - -template -py::array_t create_recarray(size_t n) { - auto arr = mkarray_via_buffer(n); - auto req = arr.request(); - auto ptr = static_cast(req.ptr); - for (size_t i = 0; i < n; i++) { - SET_TEST_VALS(ptr[i], i); - } - return arr; -} - -template -py::list print_recarray(py::array_t arr) { - const auto req = arr.request(); - const auto ptr = static_cast(req.ptr); - auto l = py::list(); - for (ssize_t i = 0; i < req.size; i++) { - std::stringstream ss; - ss << ptr[i]; - l.append(py::str(ss.str())); - } - return l; -} - -py::array_t test_array_ctors(int i) { - using arr_t = py::array_t; - - std::vector data { 1, 2, 3, 4, 5, 6 }; - std::vector shape { 3, 2 }; - std::vector strides { 8, 4 }; - - auto ptr = data.data(); - auto vptr = (void *) ptr; - auto dtype = py::dtype("int32"); - - py::buffer_info buf_ndim1(vptr, 4, "i", 6); - py::buffer_info buf_ndim1_null(nullptr, 4, "i", 6); - py::buffer_info buf_ndim2(vptr, 4, "i", 2, shape, strides); - py::buffer_info buf_ndim2_null(nullptr, 4, "i", 2, shape, strides); - - auto fill = [](py::array arr) { - auto req = arr.request(); - for (int i = 0; i < 6; i++) ((int32_t *) req.ptr)[i] = i + 1; - return arr; - }; - - switch (i) { - // shape: (3, 2) - case 10: return arr_t(shape, strides, ptr); - case 11: return py::array(shape, strides, ptr); - case 12: return py::array(dtype, shape, strides, vptr); - case 13: return arr_t(shape, ptr); - case 14: return py::array(shape, ptr); - case 15: return py::array(dtype, shape, vptr); - case 16: return arr_t(buf_ndim2); - case 17: return py::array(buf_ndim2); - // shape: (3, 2) - post-fill - case 20: return fill(arr_t(shape, strides)); - case 21: return py::array(shape, strides, ptr); // can't have nullptr due to templated ctor - case 22: return fill(py::array(dtype, shape, strides)); - case 23: return fill(arr_t(shape)); - case 24: return py::array(shape, ptr); // can't have nullptr due to templated ctor - case 25: return fill(py::array(dtype, shape)); - case 26: return fill(arr_t(buf_ndim2_null)); - case 27: return fill(py::array(buf_ndim2_null)); - // shape: (6, ) - case 30: return arr_t(6, ptr); - case 31: return py::array(6, ptr); - case 32: return py::array(dtype, 6, vptr); - case 33: return arr_t(buf_ndim1); - case 34: return py::array(buf_ndim1); - // shape: (6, ) - case 40: return fill(arr_t(6)); - case 41: return py::array(6, ptr); // can't have nullptr due to templated ctor - case 42: return fill(py::array(dtype, 6)); - case 43: return fill(arr_t(buf_ndim1_null)); - case 44: return fill(py::array(buf_ndim1_null)); - } - return arr_t(); -} - -py::list test_dtype_ctors() { - py::list list; - list.append(py::dtype("int32")); - list.append(py::dtype(std::string("float64"))); - list.append(py::dtype::from_args(py::str("bool"))); - py::list names, offsets, formats; - py::dict dict; - names.append(py::str("a")); names.append(py::str("b")); dict["names"] = names; - offsets.append(py::int_(1)); offsets.append(py::int_(10)); dict["offsets"] = offsets; - formats.append(py::dtype("int32")); formats.append(py::dtype("float64")); dict["formats"] = formats; - dict["itemsize"] = py::int_(20); - list.append(py::dtype::from_args(dict)); - list.append(py::dtype(names, formats, offsets, 20)); - list.append(py::dtype(py::buffer_info((void *) 0, sizeof(unsigned int), "I", 1))); - list.append(py::dtype(py::buffer_info((void *) 0, 0, "T{i:a:f:b:}", 1))); - return list; -} - -struct A {}; -struct B {}; - -TEST_SUBMODULE(numpy_dtypes, m) { - try { py::module::import("numpy"); } - catch (...) { return; } - - // typeinfo may be registered before the dtype descriptor for scalar casts to work... - py::class_(m, "SimpleStruct"); - - PYBIND11_NUMPY_DTYPE(SimpleStruct, bool_, uint_, float_, ldbl_); - PYBIND11_NUMPY_DTYPE(SimpleStructReordered, bool_, uint_, float_, ldbl_); - PYBIND11_NUMPY_DTYPE(PackedStruct, bool_, uint_, float_, ldbl_); - PYBIND11_NUMPY_DTYPE(NestedStruct, a, b); - PYBIND11_NUMPY_DTYPE(PartialStruct, bool_, uint_, float_, ldbl_); - PYBIND11_NUMPY_DTYPE(PartialNestedStruct, a); - PYBIND11_NUMPY_DTYPE(StringStruct, a, b); - PYBIND11_NUMPY_DTYPE(ArrayStruct, a, b, c, d); - PYBIND11_NUMPY_DTYPE(EnumStruct, e1, e2); - PYBIND11_NUMPY_DTYPE(ComplexStruct, cflt, cdbl); - - // ... or after - py::class_(m, "PackedStruct"); - - PYBIND11_NUMPY_DTYPE_EX(StructWithUglyNames, __x__, "x", __y__, "y"); - - // If uncommented, this should produce a static_assert failure telling the user that the struct - // is not a POD type -// struct NotPOD { std::string v; NotPOD() : v("hi") {}; }; -// PYBIND11_NUMPY_DTYPE(NotPOD, v); - - // Check that dtypes can be registered programmatically, both from - // initializer lists of field descriptors and from other containers. - py::detail::npy_format_descriptor::register_dtype( - {} - ); - py::detail::npy_format_descriptor::register_dtype( - std::vector{} - ); - - // test_recarray, test_scalar_conversion - m.def("create_rec_simple", &create_recarray); - m.def("create_rec_packed", &create_recarray); - m.def("create_rec_nested", [](size_t n) { // test_signature - py::array_t arr = mkarray_via_buffer(n); - auto req = arr.request(); - auto ptr = static_cast(req.ptr); - for (size_t i = 0; i < n; i++) { - SET_TEST_VALS(ptr[i].a, i); - SET_TEST_VALS(ptr[i].b, i + 1); - } - return arr; - }); - m.def("create_rec_partial", &create_recarray); - m.def("create_rec_partial_nested", [](size_t n) { - py::array_t arr = mkarray_via_buffer(n); - auto req = arr.request(); - auto ptr = static_cast(req.ptr); - for (size_t i = 0; i < n; i++) { - SET_TEST_VALS(ptr[i].a, i); - } - return arr; - }); - m.def("print_rec_simple", &print_recarray); - m.def("print_rec_packed", &print_recarray); - m.def("print_rec_nested", &print_recarray); - - // test_format_descriptors - m.def("get_format_unbound", []() { return py::format_descriptor::format(); }); - m.def("print_format_descriptors", []() { - py::list l; - for (const auto &fmt : { - py::format_descriptor::format(), - py::format_descriptor::format(), - py::format_descriptor::format(), - py::format_descriptor::format(), - py::format_descriptor::format(), - py::format_descriptor::format(), - py::format_descriptor::format(), - py::format_descriptor::format(), - py::format_descriptor::format() - }) { - l.append(py::cast(fmt)); - } - return l; - }); - - // test_dtype - m.def("print_dtypes", []() { - py::list l; - for (const py::handle &d : { - py::dtype::of(), - py::dtype::of(), - py::dtype::of(), - py::dtype::of(), - py::dtype::of(), - py::dtype::of(), - py::dtype::of(), - py::dtype::of(), - py::dtype::of(), - py::dtype::of() - }) - l.append(py::str(d)); - return l; - }); - m.def("test_dtype_ctors", &test_dtype_ctors); - m.def("test_dtype_methods", []() { - py::list list; - auto dt1 = py::dtype::of(); - auto dt2 = py::dtype::of(); - list.append(dt1); list.append(dt2); - list.append(py::bool_(dt1.has_fields())); list.append(py::bool_(dt2.has_fields())); - list.append(py::int_(dt1.itemsize())); list.append(py::int_(dt2.itemsize())); - return list; - }); - struct TrailingPaddingStruct { - int32_t a; - char b; - }; - PYBIND11_NUMPY_DTYPE(TrailingPaddingStruct, a, b); - m.def("trailing_padding_dtype", []() { return py::dtype::of(); }); - - // test_string_array - m.def("create_string_array", [](bool non_empty) { - py::array_t arr = mkarray_via_buffer(non_empty ? 4 : 0); - if (non_empty) { - auto req = arr.request(); - auto ptr = static_cast(req.ptr); - for (ssize_t i = 0; i < req.size * req.itemsize; i++) - static_cast(req.ptr)[i] = 0; - ptr[1].a[0] = 'a'; ptr[1].b[0] = 'a'; - ptr[2].a[0] = 'a'; ptr[2].b[0] = 'a'; - ptr[3].a[0] = 'a'; ptr[3].b[0] = 'a'; - - ptr[2].a[1] = 'b'; ptr[2].b[1] = 'b'; - ptr[3].a[1] = 'b'; ptr[3].b[1] = 'b'; - - ptr[3].a[2] = 'c'; ptr[3].b[2] = 'c'; - } - return arr; - }); - m.def("print_string_array", &print_recarray); - - // test_array_array - m.def("create_array_array", [](size_t n) { - py::array_t arr = mkarray_via_buffer(n); - auto ptr = (ArrayStruct *) arr.mutable_data(); - for (size_t i = 0; i < n; i++) { - for (size_t j = 0; j < 3; j++) - for (size_t k = 0; k < 4; k++) - ptr[i].a[j][k] = char('A' + (i * 100 + j * 10 + k) % 26); - for (size_t j = 0; j < 2; j++) - ptr[i].b[j] = int32_t(i * 1000 + j); - for (size_t j = 0; j < 3; j++) - ptr[i].c[j] = uint8_t(i * 10 + j); - for (size_t j = 0; j < 4; j++) - for (size_t k = 0; k < 2; k++) - ptr[i].d[j][k] = float(i) * 100.0f + float(j) * 10.0f + float(k); - } - return arr; - }); - m.def("print_array_array", &print_recarray); - - // test_enum_array - m.def("create_enum_array", [](size_t n) { - py::array_t arr = mkarray_via_buffer(n); - auto ptr = (EnumStruct *) arr.mutable_data(); - for (size_t i = 0; i < n; i++) { - ptr[i].e1 = static_cast(-1 + ((int) i % 2) * 2); - ptr[i].e2 = static_cast(1 + (i % 2)); - } - return arr; - }); - m.def("print_enum_array", &print_recarray); - - // test_complex_array - m.def("create_complex_array", [](size_t n) { - py::array_t arr = mkarray_via_buffer(n); - auto ptr = (ComplexStruct *) arr.mutable_data(); - for (size_t i = 0; i < n; i++) { - ptr[i].cflt.real(float(i)); - ptr[i].cflt.imag(float(i) + 0.25f); - ptr[i].cdbl.real(double(i) + 0.5); - ptr[i].cdbl.imag(double(i) + 0.75); - } - return arr; - }); - m.def("print_complex_array", &print_recarray); - - // test_array_constructors - m.def("test_array_ctors", &test_array_ctors); - - // test_compare_buffer_info - struct CompareStruct { - bool x; - uint32_t y; - float z; - }; - PYBIND11_NUMPY_DTYPE(CompareStruct, x, y, z); - m.def("compare_buffer_info", []() { - py::list list; - list.append(py::bool_(py::detail::compare_buffer_info::compare(py::buffer_info(nullptr, sizeof(float), "f", 1)))); - list.append(py::bool_(py::detail::compare_buffer_info::compare(py::buffer_info(nullptr, sizeof(int), "I", 1)))); - list.append(py::bool_(py::detail::compare_buffer_info::compare(py::buffer_info(nullptr, sizeof(long), "l", 1)))); - list.append(py::bool_(py::detail::compare_buffer_info::compare(py::buffer_info(nullptr, sizeof(long), sizeof(long) == sizeof(int) ? "i" : "q", 1)))); - list.append(py::bool_(py::detail::compare_buffer_info::compare(py::buffer_info(nullptr, sizeof(CompareStruct), "T{?:x:3xI:y:f:z:}", 1)))); - return list; - }); - m.def("buffer_to_dtype", [](py::buffer& buf) { return py::dtype(buf.request()); }); - - // test_scalar_conversion - m.def("f_simple", [](SimpleStruct s) { return s.uint_ * 10; }); - m.def("f_packed", [](PackedStruct s) { return s.uint_ * 10; }); - m.def("f_nested", [](NestedStruct s) { return s.a.uint_ * 10; }); - - // test_register_dtype - m.def("register_dtype", []() { PYBIND11_NUMPY_DTYPE(SimpleStruct, bool_, uint_, float_, ldbl_); }); - - // test_str_leak - m.def("dtype_wrapper", [](py::object d) { return py::dtype::from_args(std::move(d)); }); -} diff --git a/spaces/ma-xu/LIVE/thrust/cmake/ThrustAddSubdir.cmake b/spaces/ma-xu/LIVE/thrust/cmake/ThrustAddSubdir.cmake deleted file mode 100644 index d48aa1415789f8fff6ad35b17404880c481d7b93..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/cmake/ThrustAddSubdir.cmake +++ /dev/null @@ -1,6 +0,0 @@ -find_package(Thrust REQUIRED CONFIG - NO_DEFAULT_PATH # Only check the explicit path in HINTS: - HINTS "${CMAKE_CURRENT_LIST_DIR}/.." - COMPONENTS ${THRUST_REQUIRED_SYSTEMS} - OPTIONAL_COMPONENTS ${THRUST_OPTIONAL_SYSTEMS} -) diff --git a/spaces/ma-xu/LIVE/thrust/thrust/mr/polymorphic_adaptor.h b/spaces/ma-xu/LIVE/thrust/thrust/mr/polymorphic_adaptor.h deleted file mode 100644 index d5d98bf8382e9544605a6689e4bc2611b55f960d..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/mr/polymorphic_adaptor.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2018-2019 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include "memory_resource.h" - -namespace thrust -{ -namespace mr -{ - -template -class polymorphic_adaptor_resource THRUST_FINAL : public memory_resource -{ -public: - polymorphic_adaptor_resource(memory_resource * t) : upstream_resource(t) - { - } - - virtual Pointer do_allocate(std::size_t bytes, std::size_t alignment = THRUST_MR_DEFAULT_ALIGNMENT) THRUST_OVERRIDE - { - return upstream_resource->allocate(bytes, alignment); - } - - virtual void do_deallocate(Pointer p, std::size_t bytes, std::size_t alignment) THRUST_OVERRIDE - { - return upstream_resource->deallocate(p, bytes, alignment); - } - - __host__ __device__ - virtual bool do_is_equal(const memory_resource & other) const THRUST_NOEXCEPT THRUST_OVERRIDE - { - return upstream_resource->is_equal(other); - } - -private: - memory_resource * upstream_resource; -}; - -} // end mr -} // end thrust - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/unique.h b/spaces/ma-xu/LIVE/thrust/thrust/unique.h deleted file mode 100644 index b4b2118d321374e2dac04592914d33b2003fad8a..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/unique.h +++ /dev/null @@ -1,968 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file unique.h - * \brief Move unique elements to the front of a range - */ - -#pragma once - -#include -#include -#include - -namespace thrust -{ - - -/*! \addtogroup stream_compaction - * \{ - */ - - -/*! For each group of consecutive elements in the range [first, last) - * with the same value, \p unique removes all but the first element of - * the group. The return value is an iterator \c new_last such that - * no two consecutive elements in the range [first, new_last) are - * equal. The iterators in the range [new_last, last) are all still - * dereferenceable, but the elements that they point to are unspecified. - * \p unique is stable, meaning that the relative order of elements that are - * not removed is unchanged. - * - * This version of \p unique uses \c operator== to test for equality. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the input range. - * \param last The end of the input range. - * \return The end of the unique range [first, new_last). - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam ForwardIterator is a model of Forward Iterator, - * and \p ForwardIterator is mutable, - * and \p ForwardIterator's \c value_type is a model of Equality Comparable. - * - * The following code snippet demonstrates how to use \p unique to - * compact a sequence of numbers to remove consecutive duplicates using the \p thrust::host execution policy - * for parallelization: - * - * \code - * #include - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; - * int *new_end = thrust::unique(thrust::host, A, A + N); - * // The first four values of A are now {1, 3, 2, 1} - * // Values beyond new_end are unspecified. - * \endcode - * - * \see http://www.sgi.com/tech/stl/unique.html - * \see unique_copy - */ -template -__host__ __device__ -ForwardIterator unique(const thrust::detail::execution_policy_base &exec, - ForwardIterator first, - ForwardIterator last); - - -/*! For each group of consecutive elements in the range [first, last) - * with the same value, \p unique removes all but the first element of - * the group. The return value is an iterator \c new_last such that - * no two consecutive elements in the range [first, new_last) are - * equal. The iterators in the range [new_last, last) are all still - * dereferenceable, but the elements that they point to are unspecified. - * \p unique is stable, meaning that the relative order of elements that are - * not removed is unchanged. - * - * This version of \p unique uses \c operator== to test for equality. - * - * \param first The beginning of the input range. - * \param last The end of the input range. - * \return The end of the unique range [first, new_last). - * - * \tparam ForwardIterator is a model of Forward Iterator, - * and \p ForwardIterator is mutable, - * and \p ForwardIterator's \c value_type is a model of Equality Comparable. - * - * The following code snippet demonstrates how to use \p unique to - * compact a sequence of numbers to remove consecutive duplicates. - * - * \code - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; - * int *new_end = thrust::unique(A, A + N); - * // The first four values of A are now {1, 3, 2, 1} - * // Values beyond new_end are unspecified. - * \endcode - * - * \see http://www.sgi.com/tech/stl/unique.html - * \see unique_copy - */ -template -ForwardIterator unique(ForwardIterator first, - ForwardIterator last); - - -/*! For each group of consecutive elements in the range [first, last) - * with the same value, \p unique removes all but the first element of - * the group. The return value is an iterator \c new_last such that - * no two consecutive elements in the range [first, new_last) are - * equal. The iterators in the range [new_last, last) are all still - * dereferenceable, but the elements that they point to are unspecified. - * \p unique is stable, meaning that the relative order of elements that are - * not removed is unchanged. - * - * This version of \p unique uses the function object \p binary_pred to test - * for equality. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the input range. - * \param last The end of the input range. - * \param binary_pred The binary predicate used to determine equality. - * \return The end of the unique range [first, new_last) - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam ForwardIterator is a model of Forward Iterator, - * and \p ForwardIterator is mutable, - * and \p ForwardIterator's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type and to \p BinaryPredicate's \c second_argument_type. - * \tparam BinaryPredicate is a model of Binary Predicate. - * - * The following code snippet demonstrates how to use \p unique to - * compact a sequence of numbers to remove consecutive duplicates using the \p thrust::host execution policy - * for parallelization: - * - * \code - * #include - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; - * int *new_end = thrust::unique(thrust::host, A, A + N, thrust::equal_to()); - * // The first four values of A are now {1, 3, 2, 1} - * // Values beyond new_end are unspecified. - * \endcode - * - * \see http://www.sgi.com/tech/stl/unique.html - * \see unique_copy - */ -template -__host__ __device__ -ForwardIterator unique(const thrust::detail::execution_policy_base &exec, - ForwardIterator first, - ForwardIterator last, - BinaryPredicate binary_pred); - - -/*! For each group of consecutive elements in the range [first, last) - * with the same value, \p unique removes all but the first element of - * the group. The return value is an iterator \c new_last such that - * no two consecutive elements in the range [first, new_last) are - * equal. The iterators in the range [new_last, last) are all still - * dereferenceable, but the elements that they point to are unspecified. - * \p unique is stable, meaning that the relative order of elements that are - * not removed is unchanged. - * - * This version of \p unique uses the function object \p binary_pred to test - * for equality. - * - * \param first The beginning of the input range. - * \param last The end of the input range. - * \param binary_pred The binary predicate used to determine equality. - * \return The end of the unique range [first, new_last) - * - * \tparam ForwardIterator is a model of Forward Iterator, - * and \p ForwardIterator is mutable, - * and \p ForwardIterator's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type and to \p BinaryPredicate's \c second_argument_type. - * \tparam BinaryPredicate is a model of Binary Predicate. - * - * The following code snippet demonstrates how to use \p unique to - * compact a sequence of numbers to remove consecutive duplicates. - * - * \code - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; - * int *new_end = thrust::unique(A, A + N, thrust::equal_to()); - * // The first four values of A are now {1, 3, 2, 1} - * // Values beyond new_end are unspecified. - * \endcode - * - * \see http://www.sgi.com/tech/stl/unique.html - * \see unique_copy - */ -template -ForwardIterator unique(ForwardIterator first, - ForwardIterator last, - BinaryPredicate binary_pred); - - -/*! \p unique_copy copies elements from the range [first, last) - * to a range beginning with \p result, except that in a consecutive group - * of duplicate elements only the first one is copied. The return value - * is the end of the range to which the elements are copied. - * - * The reason there are two different versions of unique_copy is that there - * are two different definitions of what it means for a consecutive group of - * elements to be duplicates. In the first version, the test is simple - * equality: the elements in a range [f, l) are duplicates if, - * for every iterator \p i in the range, either i == f or else - * *i == *(i-1). In the second, the test is an arbitrary - * \p BinaryPredicate \p binary_pred: the elements in [f, l) are - * duplicates if, for every iterator \p i in the range, either i == f - * or else binary_pred(*i, *(i-1)) is \p true. - * - * This version of \p unique_copy uses \c operator== to test for equality. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the input range. - * \param last The end of the input range. - * \param result The beginning of the output range. - * \return The end of the unique range [result, result_end). - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator is a model of Input Iterator, - * and \p InputIterator's \c value_type is a model of Equality Comparable. - * \tparam OutputIterator is a model of Output Iterator and - * and \p InputIterator's \c value_type is convertible to \c OutputIterator's \c value_type. - * - * \pre The range [first,last) and the range [result, result + (last - first)) shall not overlap. - * - * The following code snippet demonstrates how to use \p unique_copy to - * compact a sequence of numbers to remove consecutive duplicates using the \p thrust::host execution - * policy for parallelization: - * - * \code - * #include - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; - * int B[N]; - * int *result_end = thrust::unique_copy(thrust::host, A, A + N, B); - * // The first four values of B are now {1, 3, 2, 1} and (result_end - B) is 4 - * // Values beyond result_end are unspecified - * \endcode - * - * \see unique - * \see http://www.sgi.com/tech/stl/unique_copy.html - */ -template -__host__ __device__ -OutputIterator unique_copy(const thrust::detail::execution_policy_base &exec, - InputIterator first, - InputIterator last, - OutputIterator result); - - -/*! \p unique_copy copies elements from the range [first, last) - * to a range beginning with \p result, except that in a consecutive group - * of duplicate elements only the first one is copied. The return value - * is the end of the range to which the elements are copied. - * - * The reason there are two different versions of unique_copy is that there - * are two different definitions of what it means for a consecutive group of - * elements to be duplicates. In the first version, the test is simple - * equality: the elements in a range [f, l) are duplicates if, - * for every iterator \p i in the range, either i == f or else - * *i == *(i-1). In the second, the test is an arbitrary - * \p BinaryPredicate \p binary_pred: the elements in [f, l) are - * duplicates if, for every iterator \p i in the range, either i == f - * or else binary_pred(*i, *(i-1)) is \p true. - * - * This version of \p unique_copy uses \c operator== to test for equality. - * - * \param first The beginning of the input range. - * \param last The end of the input range. - * \param result The beginning of the output range. - * \return The end of the unique range [result, result_end). - * - * \tparam InputIterator is a model of Input Iterator, - * and \p InputIterator's \c value_type is a model of Equality Comparable. - * \tparam OutputIterator is a model of Output Iterator and - * and \p InputIterator's \c value_type is convertible to \c OutputIterator's \c value_type. - * - * \pre The range [first,last) and the range [result, result + (last - first)) shall not overlap. - * - * The following code snippet demonstrates how to use \p unique_copy to - * compact a sequence of numbers to remove consecutive duplicates. - * - * \code - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; - * int B[N]; - * int *result_end = thrust::unique_copy(A, A + N, B); - * // The first four values of B are now {1, 3, 2, 1} and (result_end - B) is 4 - * // Values beyond result_end are unspecified - * \endcode - * - * \see unique - * \see http://www.sgi.com/tech/stl/unique_copy.html - */ -template -OutputIterator unique_copy(InputIterator first, - InputIterator last, - OutputIterator result); - - -/*! \p unique_copy copies elements from the range [first, last) - * to a range beginning with \p result, except that in a consecutive group - * of duplicate elements only the first one is copied. The return value - * is the end of the range to which the elements are copied. - * - * This version of \p unique_copy uses the function object \c binary_pred - * to test for equality. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the input range. - * \param last The end of the input range. - * \param result The beginning of the output range. - * \param binary_pred The binary predicate used to determine equality. - * \return The end of the unique range [result, result_end). - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator is a model of Input Iterator, - * and \p InputIterator's \c value_type is a model of Equality Comparable. - * \tparam OutputIterator is a model of Output Iterator and - * and \p InputIterator's \c value_type is convertible to \c OutputIterator's \c value_type. - * \tparam BinaryPredicate is a model of Binary Predicate. - * - * \pre The range [first,last) and the range [result, result + (last - first)) shall not overlap. - * - * The following code snippet demonstrates how to use \p unique_copy to - * compact a sequence of numbers to remove consecutive duplicates using the \p thrust::host execution - * policy for parallelization: - * - * \code - * #include - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; - * int B[N]; - * int *result_end = thrust::unique_copy(thrust::host, A, A + N, B, thrust::equal_to()); - * // The first four values of B are now {1, 3, 2, 1} and (result_end - B) is 4 - * // Values beyond result_end are unspecified. - * \endcode - * - * \see unique - * \see http://www.sgi.com/tech/stl/unique_copy.html - */ -template -__host__ __device__ -OutputIterator unique_copy(const thrust::detail::execution_policy_base &exec, - InputIterator first, - InputIterator last, - OutputIterator result, - BinaryPredicate binary_pred); - - -/*! \p unique_copy copies elements from the range [first, last) - * to a range beginning with \p result, except that in a consecutive group - * of duplicate elements only the first one is copied. The return value - * is the end of the range to which the elements are copied. - * - * This version of \p unique_copy uses the function object \c binary_pred - * to test for equality. - * - * \param first The beginning of the input range. - * \param last The end of the input range. - * \param result The beginning of the output range. - * \param binary_pred The binary predicate used to determine equality. - * \return The end of the unique range [result, result_end). - * - * \tparam InputIterator is a model of Input Iterator, - * and \p InputIterator's \c value_type is a model of Equality Comparable. - * \tparam OutputIterator is a model of Output Iterator and - * and \p InputIterator's \c value_type is convertible to \c OutputIterator's \c value_type. - * \tparam BinaryPredicate is a model of Binary Predicate. - * - * \pre The range [first,last) and the range [result, result + (last - first)) shall not overlap. - * - * The following code snippet demonstrates how to use \p unique_copy to - * compact a sequence of numbers to remove consecutive duplicates. - * - * \code - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; - * int B[N]; - * int *result_end = thrust::unique_copy(A, A + N, B, thrust::equal_to()); - * // The first four values of B are now {1, 3, 2, 1} and (result_end - B) is 4 - * // Values beyond result_end are unspecified. - * \endcode - * - * \see unique - * \see http://www.sgi.com/tech/stl/unique_copy.html - */ -template -OutputIterator unique_copy(InputIterator first, - InputIterator last, - OutputIterator result, - BinaryPredicate binary_pred); - - -/*! \p unique_by_key is a generalization of \p unique to key-value pairs. - * For each group of consecutive keys in the range [keys_first, keys_last) - * that are equal, \p unique_by_key removes all but the first element of - * the group. Similarly, the corresponding values in the range - * [values_first, values_first + (keys_last - keys_first)) - * are also removed. - * - * The return value is a \p pair of iterators (new_keys_last,new_values_last) - * such that no two consecutive elements in the range [keys_first, new_keys_last) - * are equal. - * - * This version of \p unique_by_key uses \c operator== to test for equality and - * \c project1st to reduce values with equal keys. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param keys_first The beginning of the key range. - * \param keys_last The end of the key range. - * \param values_first The beginning of the value range. - * \return A pair of iterators at end of the ranges [key_first, keys_new_last) and [values_first, values_new_last). - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam ForwardIterator1 is a model of Forward Iterator, - * and \p ForwardIterator1 is mutable, - * and \p ForwardIterator's \c value_type is a model of Equality Comparable. - * \tparam ForwardIterator2 is a model of Forward Iterator, - * and \p ForwardIterator2 is mutable. - * - * \pre The range [keys_first, keys_last) and the range [values_first, values_first + (keys_last - keys_first)) shall not overlap. - * - * The following code snippet demonstrates how to use \p unique_by_key to - * compact a sequence of key/value pairs to remove consecutive duplicates using the \p thrust::host - * execution policy for parallelization: - * - * \code - * #include - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // keys - * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // values - * - * thrust::pair new_end; - * new_end = thrust::unique_by_key(thrust::host, A, A + N, B); - * - * // The first four keys in A are now {1, 3, 2, 1} and new_end.first - A is 4. - * // The first four values in B are now {9, 8, 5, 3} and new_end.second - B is 4. - * \endcode - * - * \see unique - * \see unique_by_key_copy - * \see reduce_by_key - */ -template -__host__ __device__ - thrust::pair - unique_by_key(const thrust::detail::execution_policy_base &exec, - ForwardIterator1 keys_first, - ForwardIterator1 keys_last, - ForwardIterator2 values_first); - - -/*! \p unique_by_key is a generalization of \p unique to key-value pairs. - * For each group of consecutive keys in the range [keys_first, keys_last) - * that are equal, \p unique_by_key removes all but the first element of - * the group. Similarly, the corresponding values in the range - * [values_first, values_first + (keys_last - keys_first)) - * are also removed. - * - * The return value is a \p pair of iterators (new_keys_last,new_values_last) - * such that no two consecutive elements in the range [keys_first, new_keys_last) - * are equal. - * - * This version of \p unique_by_key uses \c operator== to test for equality and - * \c project1st to reduce values with equal keys. - * - * \param keys_first The beginning of the key range. - * \param keys_last The end of the key range. - * \param values_first The beginning of the value range. - * \return A pair of iterators at end of the ranges [key_first, keys_new_last) and [values_first, values_new_last). - * - * \tparam ForwardIterator1 is a model of Forward Iterator, - * and \p ForwardIterator1 is mutable, - * and \p ForwardIterator's \c value_type is a model of Equality Comparable. - * \tparam ForwardIterator2 is a model of Forward Iterator, - * and \p ForwardIterator2 is mutable. - * - * \pre The range [keys_first, keys_last) and the range [values_first, values_first + (keys_last - keys_first)) shall not overlap. - * - * The following code snippet demonstrates how to use \p unique_by_key to - * compact a sequence of key/value pairs to remove consecutive duplicates. - * - * \code - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // keys - * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // values - * - * thrust::pair new_end; - * new_end = thrust::unique_by_key(A, A + N, B); - * - * // The first four keys in A are now {1, 3, 2, 1} and new_end.first - A is 4. - * // The first four values in B are now {9, 8, 5, 3} and new_end.second - B is 4. - * \endcode - * - * \see unique - * \see unique_by_key_copy - * \see reduce_by_key - */ -template - thrust::pair - unique_by_key(ForwardIterator1 keys_first, - ForwardIterator1 keys_last, - ForwardIterator2 values_first); - - -/*! \p unique_by_key is a generalization of \p unique to key-value pairs. - * For each group of consecutive keys in the range [keys_first, keys_last) - * that are equal, \p unique_by_key removes all but the first element of - * the group. Similarly, the corresponding values in the range - * [values_first, values_first + (keys_last - keys_first)) - * are also removed. - * - * This version of \p unique_by_key uses the function object \c binary_pred - * to test for equality and \c project1st to reduce values with equal keys. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param keys_first The beginning of the key range. - * \param keys_last The end of the key range. - * \param values_first The beginning of the value range. - * \param binary_pred The binary predicate used to determine equality. - * \return The end of the unique range [first, new_last). - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam ForwardIterator1 is a model of Forward Iterator, - * and \p ForwardIterator1 is mutable, - * and \p ForwardIterator's \c value_type is a model of Equality Comparable. - * \tparam ForwardIterator2 is a model of Forward Iterator, - * and \p ForwardIterator2 is mutable. - * \tparam BinaryPredicate is a model of Binary Predicate. - * - * \pre The range [keys_first, keys_last) and the range [values_first, values_first + (keys_last - keys_first)) shall not overlap. - * - * The following code snippet demonstrates how to use \p unique_by_key to - * compact a sequence of key/value pairs to remove consecutive duplicates using the \p thrust::host - * execution policy for parallelization: - * - * \code - * #include - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // keys - * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // values - * - * thrust::pair new_end; - * thrust::equal_to binary_pred; - * new_end = thrust::unique_by_key(thrust::host, keys, keys + N, values, binary_pred); - * - * // The first four keys in A are now {1, 3, 2, 1} and new_end.first - A is 4. - * // The first four values in B are now {9, 8, 5, 3} and new_end.second - B is 4. - * \endcode - * - * \see unique - * \see unique_by_key_copy - * \see reduce_by_key - */ -template -__host__ __device__ - thrust::pair - unique_by_key(const thrust::detail::execution_policy_base &exec, - ForwardIterator1 keys_first, - ForwardIterator1 keys_last, - ForwardIterator2 values_first, - BinaryPredicate binary_pred); - - -/*! \p unique_by_key is a generalization of \p unique to key-value pairs. - * For each group of consecutive keys in the range [keys_first, keys_last) - * that are equal, \p unique_by_key removes all but the first element of - * the group. Similarly, the corresponding values in the range - * [values_first, values_first + (keys_last - keys_first)) - * are also removed. - * - * This version of \p unique_by_key uses the function object \c binary_pred - * to test for equality and \c project1st to reduce values with equal keys. - * - * \param keys_first The beginning of the key range. - * \param keys_last The end of the key range. - * \param values_first The beginning of the value range. - * \param binary_pred The binary predicate used to determine equality. - * \return The end of the unique range [first, new_last). - * - * \tparam ForwardIterator1 is a model of Forward Iterator, - * and \p ForwardIterator1 is mutable, - * and \p ForwardIterator's \c value_type is a model of Equality Comparable. - * \tparam ForwardIterator2 is a model of Forward Iterator, - * and \p ForwardIterator2 is mutable. - * \tparam BinaryPredicate is a model of Binary Predicate. - * - * \pre The range [keys_first, keys_last) and the range [values_first, values_first + (keys_last - keys_first)) shall not overlap. - * - * The following code snippet demonstrates how to use \p unique_by_key to - * compact a sequence of key/value pairs to remove consecutive duplicates. - * - * \code - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // keys - * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // values - * - * thrust::pair new_end; - * thrust::equal_to binary_pred; - * new_end = thrust::unique_by_key(keys, keys + N, values, binary_pred); - * - * // The first four keys in A are now {1, 3, 2, 1} and new_end.first - A is 4. - * // The first four values in B are now {9, 8, 5, 3} and new_end.second - B is 4. - * \endcode - * - * \see unique - * \see unique_by_key_copy - * \see reduce_by_key - */ -template - thrust::pair - unique_by_key(ForwardIterator1 keys_first, - ForwardIterator1 keys_last, - ForwardIterator2 values_first, - BinaryPredicate binary_pred); - - -/*! \p unique_by_key_copy is a generalization of \p unique_copy to key-value pairs. - * For each group of consecutive keys in the range [keys_first, keys_last) - * that are equal, \p unique_by_key_copy copies the first element of the group to - * a range beginning with \c keys_result and the corresponding values from the range - * [values_first, values_first + (keys_last - keys_first)) are copied to a range - * beginning with \c values_result. - * - * This version of \p unique_by_key_copy uses \c operator== to test for equality and - * \c project1st to reduce values with equal keys. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param keys_first The beginning of the input key range. - * \param keys_last The end of the input key range. - * \param values_first The beginning of the input value range. - * \param keys_result The beginning of the output key range. - * \param values_result The beginning of the output value range. - * \return A pair of iterators at end of the ranges [keys_result, keys_result_last) and [values_result, values_result_last). - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator1 is a model of Input Iterator, - * \tparam InputIterator2 is a model of Input Iterator, - * \tparam OutputIterator1 is a model of Output Iterator and - * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. - * \tparam OutputIterator2 is a model of Output Iterator and - * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. - * - * \pre The input ranges shall not overlap either output range. - * - * The following code snippet demonstrates how to use \p unique_by_key_copy to - * compact a sequence of key/value pairs and with equal keys using the \p thrust::host execution policy - * for parallelization: - * - * \code - * #include - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys - * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values - * int C[N]; // output keys - * int D[N]; // output values - * - * thrust::pair new_end; - * new_end = thrust::unique_by_key_copy(thrust::host, A, A + N, B, C, D); - * - * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. - * // The first four values in D are now {9, 8, 5, 3} and new_end.second - D is 4. - * \endcode - * - * \see unique_copy - * \see unique_by_key - * \see reduce_by_key - */ -template -__host__ __device__ - thrust::pair - unique_by_key_copy(const thrust::detail::execution_policy_base &exec, - InputIterator1 keys_first, - InputIterator1 keys_last, - InputIterator2 values_first, - OutputIterator1 keys_result, - OutputIterator2 values_result); - - -/*! \p unique_by_key_copy is a generalization of \p unique_copy to key-value pairs. - * For each group of consecutive keys in the range [keys_first, keys_last) - * that are equal, \p unique_by_key_copy copies the first element of the group to - * a range beginning with \c keys_result and the corresponding values from the range - * [values_first, values_first + (keys_last - keys_first)) are copied to a range - * beginning with \c values_result. - * - * This version of \p unique_by_key_copy uses \c operator== to test for equality and - * \c project1st to reduce values with equal keys. - * - * \param keys_first The beginning of the input key range. - * \param keys_last The end of the input key range. - * \param values_first The beginning of the input value range. - * \param keys_result The beginning of the output key range. - * \param values_result The beginning of the output value range. - * \return A pair of iterators at end of the ranges [keys_result, keys_result_last) and [values_result, values_result_last). - * - * \tparam InputIterator1 is a model of Input Iterator, - * \tparam InputIterator2 is a model of Input Iterator, - * \tparam OutputIterator1 is a model of Output Iterator and - * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. - * \tparam OutputIterator2 is a model of Output Iterator and - * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. - * - * \pre The input ranges shall not overlap either output range. - * - * The following code snippet demonstrates how to use \p unique_by_key_copy to - * compact a sequence of key/value pairs and with equal keys. - * - * \code - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys - * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values - * int C[N]; // output keys - * int D[N]; // output values - * - * thrust::pair new_end; - * new_end = thrust::unique_by_key_copy(A, A + N, B, C, D); - * - * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. - * // The first four values in D are now {9, 8, 5, 3} and new_end.second - D is 4. - * \endcode - * - * \see unique_copy - * \see unique_by_key - * \see reduce_by_key - */ -template - thrust::pair - unique_by_key_copy(InputIterator1 keys_first, - InputIterator1 keys_last, - InputIterator2 values_first, - OutputIterator1 keys_result, - OutputIterator2 values_result); - - -/*! \p unique_by_key_copy is a generalization of \p unique_copy to key-value pairs. - * For each group of consecutive keys in the range [keys_first, keys_last) - * that are equal, \p unique_by_key_copy copies the first element of the group to - * a range beginning with \c keys_result and the corresponding values from the range - * [values_first, values_first + (keys_last - keys_first)) are copied to a range - * beginning with \c values_result. - * - * This version of \p unique_by_key_copy uses the function object \c binary_pred - * to test for equality and \c project1st to reduce values with equal keys. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param keys_first The beginning of the input key range. - * \param keys_last The end of the input key range. - * \param values_first The beginning of the input value range. - * \param keys_result The beginning of the output key range. - * \param values_result The beginning of the output value range. - * \param binary_pred The binary predicate used to determine equality. - * \return A pair of iterators at end of the ranges [keys_result, keys_result_last) and [values_result, values_result_last). - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator1 is a model of Input Iterator, - * \tparam InputIterator2 is a model of Input Iterator, - * \tparam OutputIterator1 is a model of Output Iterator and - * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. - * \tparam OutputIterator2 is a model of Output Iterator and - * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. - * \tparam BinaryPredicate is a model of Binary Predicate. - * - * \pre The input ranges shall not overlap either output range. - * - * The following code snippet demonstrates how to use \p unique_by_key_copy to - * compact a sequence of key/value pairs and with equal keys using the \p thrust::host execution policy for - * parallelization: - * - * \code - * #include - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys - * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values - * int C[N]; // output keys - * int D[N]; // output values - * - * thrust::pair new_end; - * thrust::equal_to binary_pred; - * new_end = thrust::unique_by_key_copy(thrust::host, A, A + N, B, C, D, binary_pred); - * - * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. - * // The first four values in D are now {9, 8, 5, 3} and new_end.second - D is 4. - * \endcode - * - * \see unique_copy - * \see unique_by_key - * \see reduce_by_key - */ -template -__host__ __device__ - thrust::pair - unique_by_key_copy(const thrust::detail::execution_policy_base &exec, - InputIterator1 keys_first, - InputIterator1 keys_last, - InputIterator2 values_first, - OutputIterator1 keys_result, - OutputIterator2 values_result, - BinaryPredicate binary_pred); - - -/*! \p unique_by_key_copy is a generalization of \p unique_copy to key-value pairs. - * For each group of consecutive keys in the range [keys_first, keys_last) - * that are equal, \p unique_by_key_copy copies the first element of the group to - * a range beginning with \c keys_result and the corresponding values from the range - * [values_first, values_first + (keys_last - keys_first)) are copied to a range - * beginning with \c values_result. - * - * This version of \p unique_by_key_copy uses the function object \c binary_pred - * to test for equality and \c project1st to reduce values with equal keys. - * - * \param keys_first The beginning of the input key range. - * \param keys_last The end of the input key range. - * \param values_first The beginning of the input value range. - * \param keys_result The beginning of the output key range. - * \param values_result The beginning of the output value range. - * \param binary_pred The binary predicate used to determine equality. - * \return A pair of iterators at end of the ranges [keys_result, keys_result_last) and [values_result, values_result_last). - * - * \tparam InputIterator1 is a model of Input Iterator, - * \tparam InputIterator2 is a model of Input Iterator, - * \tparam OutputIterator1 is a model of Output Iterator and - * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. - * \tparam OutputIterator2 is a model of Output Iterator and - * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. - * \tparam BinaryPredicate is a model of Binary Predicate. - * - * \pre The input ranges shall not overlap either output range. - * - * The following code snippet demonstrates how to use \p unique_by_key_copy to - * compact a sequence of key/value pairs and with equal keys. - * - * \code - * #include - * ... - * const int N = 7; - * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys - * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values - * int C[N]; // output keys - * int D[N]; // output values - * - * thrust::pair new_end; - * thrust::equal_to binary_pred; - * new_end = thrust::unique_by_key_copy(A, A + N, B, C, D, binary_pred); - * - * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. - * // The first four values in D are now {9, 8, 5, 3} and new_end.second - D is 4. - * \endcode - * - * \see unique_copy - * \see unique_by_key - * \see reduce_by_key - */ -template - thrust::pair - unique_by_key_copy(InputIterator1 keys_first, - InputIterator1 keys_last, - InputIterator2 values_first, - OutputIterator1 keys_result, - OutputIterator2 values_result, - BinaryPredicate binary_pred); - - -/*! \} // end stream_compaction - */ - - -} // end namespace thrust - -#include - diff --git a/spaces/manu/the-rap-god-test/README.md b/spaces/manu/the-rap-god-test/README.md deleted file mode 100644 index d4080900514c53b8fe22adea265a9ab33bf11fb3..0000000000000000000000000000000000000000 --- a/spaces/manu/the-rap-god-test/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: The Rap God Test -emoji: 😻 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/marcusj83/MusicGenbruh/audiocraft/quantization/__init__.py b/spaces/marcusj83/MusicGenbruh/audiocraft/quantization/__init__.py deleted file mode 100644 index 836d6eb518978480c6b95d6f29ce4f84a9428793..0000000000000000000000000000000000000000 --- a/spaces/marcusj83/MusicGenbruh/audiocraft/quantization/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from .vq import ResidualVectorQuantizer -from .base import BaseQuantizer, DummyQuantizer, QuantizedResult diff --git a/spaces/marioboy/neil-breen/README.md b/spaces/marioboy/neil-breen/README.md deleted file mode 100644 index 373f366cc2653d4c437693bdbe5285578a3ae627..0000000000000000000000000000000000000000 --- a/spaces/marioboy/neil-breen/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Neil Breen -emoji: 👽 -colorFrom: blue -colorTo: orange -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/marker22/Bark-Voice-Cloning/swap_voice.py b/spaces/marker22/Bark-Voice-Cloning/swap_voice.py deleted file mode 100644 index be1135be3648f9757046de1f9a4e240bd818be5a..0000000000000000000000000000000000000000 --- a/spaces/marker22/Bark-Voice-Cloning/swap_voice.py +++ /dev/null @@ -1,62 +0,0 @@ -from bark.generation import load_codec_model, generate_text_semantic, grab_best_device -from bark import SAMPLE_RATE -from encodec.utils import convert_audio -from bark.hubert.hubert_manager import HuBERTManager -from bark.hubert.pre_kmeans_hubert import CustomHubert -from bark.hubert.customtokenizer import CustomTokenizer -from bark.api import semantic_to_waveform -from scipy.io.wavfile import write as write_wav -from util.helper import create_filename -from util.settings import Settings - - -import torchaudio -import torch -import os -import gradio - -def swap_voice_from_audio(swap_audio_filename, selected_speaker, tokenizer_lang, seed, batchcount, progress=gradio.Progress(track_tqdm=True)): - use_gpu = not os.environ.get("BARK_FORCE_CPU", False) - progress(0, desc="Loading Codec") - - # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer - hubert_manager = HuBERTManager() - hubert_manager.make_sure_hubert_installed() - hubert_manager.make_sure_tokenizer_installed(tokenizer_lang=tokenizer_lang) - - # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer - # Load HuBERT for semantic tokens - - # Load the HuBERT model - device = grab_best_device(use_gpu) - hubert_model = CustomHubert(checkpoint_path='./models/hubert/hubert.pt').to(device) - model = load_codec_model(use_gpu=use_gpu) - - # Load the CustomTokenizer model - tokenizer = CustomTokenizer.load_from_checkpoint(f'./models/hubert/{tokenizer_lang}_tokenizer.pth').to(device) # Automatically uses the right layers - - progress(0.25, desc="Converting WAV") - - # Load and pre-process the audio waveform - wav, sr = torchaudio.load(swap_audio_filename) - if wav.shape[0] == 2: # Stereo to mono if needed - wav = wav.mean(0, keepdim=True) - - wav = convert_audio(wav, sr, model.sample_rate, model.channels) - wav = wav.to(device) - semantic_vectors = hubert_model.forward(wav, input_sample_hz=model.sample_rate) - semantic_tokens = tokenizer.get_token(semantic_vectors) - - audio = semantic_to_waveform( - semantic_tokens, - history_prompt=selected_speaker, - temp=0.7, - silent=False, - output_full=False) - - settings = Settings('config.yaml') - - result = create_filename(settings.output_folder_path, None, "swapvoice",".wav") - write_wav(result, SAMPLE_RATE, audio) - return result - diff --git a/spaces/matthoffner/AudioCraft_Plus/docs/MBD.md b/spaces/matthoffner/AudioCraft_Plus/docs/MBD.md deleted file mode 100644 index 296d08407bac9155380a48bdc9faa5798db32bcb..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/AudioCraft_Plus/docs/MBD.md +++ /dev/null @@ -1,117 +0,0 @@ -# MultiBand Diffusion - -AudioCraft provides the code and models for MultiBand Diffusion, [From Discrete Tokens to High Fidelity Audio using MultiBand Diffusion][arxiv]. -MultiBand diffusion is a collection of 4 models that can decode tokens from -EnCodec tokenizer into waveform audio. - - - Open In Colab - -
      - - -## Installation - -Please follow the AudioCraft installation instructions from the [README](../README.md). - - -## Usage - -We offer a number of way to use MultiBand Diffusion: -1. The MusicGen demo includes a toggle to try diffusion decoder. You can use the demo locally by running [`python -m demos.musicgen_app --share`](../demos/musicgen_app.py), or through the [MusicGen Colab](https://colab.research.google.com/drive/1JlTOjB-G0A2Hz3h8PK63vLZk4xdCI5QB?usp=sharing). -2. You can play with MusicGen by running the jupyter notebook at [`demos/musicgen_demo.ipynb`](../demos/musicgen_demo.ipynb) locally (if you have a GPU). - -## API - -We provide a simple API and pre-trained models for MusicGen and for EnCodec at 24 khz for 3 bitrates (1.5 kbps, 3 kbps and 6 kbps). - -See after a quick example for using MultiBandDiffusion with the MusicGen API: - -```python -import torchaudio -from audiocraft.models import MusicGen, MultiBandDiffusion -from audiocraft.data.audio import audio_write - -model = MusicGen.get_pretrained('facebook/musicgen-melody') -mbd = MultiBandDiffusion.get_mbd_musicgen() -model.set_generation_params(duration=8) # generate 8 seconds. -wav, tokens = model.generate_unconditional(4, return_tokens=True) # generates 4 unconditional audio samples and keep the tokens for MBD generation -descriptions = ['happy rock', 'energetic EDM', 'sad jazz'] -wav_diffusion = mbd.tokens_to_wav(tokens) -wav, tokens = model.generate(descriptions, return_tokens=True) # generates 3 samples and keep the tokens. -wav_diffusion = mbd.tokens_to_wav(tokens) -melody, sr = torchaudio.load('./assets/bach.mp3') -# Generates using the melody from the given audio and the provided descriptions, returns audio and audio tokens. -wav, tokens = model.generate_with_chroma(descriptions, melody[None].expand(3, -1, -1), sr, return_tokens=True) -wav_diffusion = mbd.tokens_to_wav(tokens) - -for idx, one_wav in enumerate(wav): - # Will save under {idx}.wav and {idx}_diffusion.wav, with loudness normalization at -14 db LUFS for comparing the methods. - audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True) - audio_write(f'{idx}_diffusion', wav_diffusion[idx].cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True) -``` - -For the compression task (and to compare with [EnCodec](https://github.com/facebookresearch/encodec)): - -```python -import torch -from audiocraft.models import MultiBandDiffusion -from encodec import EncodecModel -from audiocraft.data.audio import audio_read, audio_write - -bandwidth = 3.0 # 1.5, 3.0, 6.0 -mbd = MultiBandDiffusion.get_mbd_24khz(bw=bandwidth) -encodec = EncodecModel.get_encodec_24khz() - -somepath = '' -wav, sr = audio_read(somepath) -with torch.no_grad(): - compressed_encodec = encodec(wav) - compressed_diffusion = mbd.regenerate(wav, sample_rate=sr) - -audio_write('sample_encodec', compressed_encodec.squeeze(0).cpu(), mbd.sample_rate, strategy="loudness", loudness_compressor=True) -audio_write('sample_diffusion', compressed_diffusion.squeeze(0).cpu(), mbd.sample_rate, strategy="loudness", loudness_compressor=True) -``` - - -## Training - -The [DiffusionSolver](../audiocraft/solvers/diffusion.py) implements our diffusion training pipeline. -It generates waveform audio conditioned on the embeddings extracted from a pre-trained EnCodec model -(see [EnCodec documentation](./ENCODEC.md) for more details on how to train such model). - -Note that **we do NOT provide any of the datasets** used for training our diffusion models. -We provide a dummy dataset containing just a few examples for illustrative purposes. - -### Example configurations and grids - -One can train diffusion models as described in the paper by using this [dora grid](../audiocraft/grids/diffusion/4_bands_base_32khz.py). -```shell -# 4 bands MBD trainning -dora grid diffusion.4_bands_base_32khz -``` - -### Learn more - -Learn more about AudioCraft training pipelines in the [dedicated section](./TRAINING.md). - - -## Citation - -``` -@article{sanroman2023fromdi, - title={From Discrete Tokens to High-Fidelity Audio Using Multi-Band Diffusion}, - author={San Roman, Robin and Adi, Yossi and Deleforge, Antoine and Serizel, Romain and Synnaeve, Gabriel and Défossez, Alexandre}, - journal={arXiv preprint arXiv:}, - year={2023} -} -``` - - -## License - -See license information in the [README](../README.md). - - -[arxiv]: https://dl.fbaipublicfiles.com/encodec/Diffusion/paper.pdf -[mbd_samples]: https://ai.honu.io/papers/mbd/ diff --git a/spaces/matthoffner/chatbot/components/Chat/Temperature.tsx b/spaces/matthoffner/chatbot/components/Chat/Temperature.tsx deleted file mode 100644 index 78968a21b69748a455ae66ce91681cb96eb3db3c..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/chatbot/components/Chat/Temperature.tsx +++ /dev/null @@ -1,67 +0,0 @@ -import { FC, useContext, useState } from 'react'; - -import { useTranslation } from 'next-i18next'; - -import { DEFAULT_TEMPERATURE } from '@/utils/app/const'; - -import HomeContext from '@/pages/api/home/home.context'; - -interface Props { - label: string; - onChangeTemperature: (temperature: number) => void; -} - -export const TemperatureSlider: FC = ({ - label, - onChangeTemperature, -}) => { - const { - state: { conversations }, - } = useContext(HomeContext); - const lastConversation = conversations[conversations.length - 1]; - const [temperature, setTemperature] = useState( - lastConversation?.temperature ?? DEFAULT_TEMPERATURE, - ); - const { t } = useTranslation('chat'); - const handleChange = (event: React.ChangeEvent) => { - const newValue = parseFloat(event.target.value); - setTemperature(newValue); - onChangeTemperature(newValue); - }; - - return ( -
      - - - {t( - 'Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.', - )} - - - {temperature.toFixed(1)} - - -
        -
      • - {t('Precise')} -
      • -
      • - {t('Neutral')} -
      • -
      • - {t('Creative')} -
      • -
      -
      - ); -}; diff --git a/spaces/matthoffner/chatbot/utils/app/prompts.ts b/spaces/matthoffner/chatbot/utils/app/prompts.ts deleted file mode 100644 index 64a8b2fd71ad78983d1bdd7d988b95b1a34ea7f9..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/chatbot/utils/app/prompts.ts +++ /dev/null @@ -1,22 +0,0 @@ -import { Prompt } from '@/types/prompt'; - -export const updatePrompt = (updatedPrompt: Prompt, allPrompts: Prompt[]) => { - const updatedPrompts = allPrompts.map((c) => { - if (c.id === updatedPrompt.id) { - return updatedPrompt; - } - - return c; - }); - - savePrompts(updatedPrompts); - - return { - single: updatedPrompt, - all: updatedPrompts, - }; -}; - -export const savePrompts = (prompts: Prompt[]) => { - localStorage.setItem('prompts', JSON.stringify(prompts)); -}; diff --git a/spaces/meraGPT/meraKB/loaders/pdf.py b/spaces/meraGPT/meraKB/loaders/pdf.py deleted file mode 100644 index e76a05d277e55851f6e1586a2f46a3ad3c2e394f..0000000000000000000000000000000000000000 --- a/spaces/meraGPT/meraKB/loaders/pdf.py +++ /dev/null @@ -1,6 +0,0 @@ -from .common import process_file -from langchain.document_loaders import PyPDFLoader - - -def process_pdf(vector_store, file, stats_db): - return process_file(vector_store, file, PyPDFLoader, ".pdf", stats_db=stats_db) diff --git a/spaces/merve/anonymization/source/third_party/regl.min.js b/spaces/merve/anonymization/source/third_party/regl.min.js deleted file mode 100644 index 7ecf11321eda67a76e019d6881f42b52f3d39c78..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/source/third_party/regl.min.js +++ /dev/null @@ -1,171 +0,0 @@ -(function(Z,ka){"object"===typeof exports&&"undefined"!==typeof module?module.exports=ka():"function"===typeof define&&define.amd?define(ka):Z.createREGL=ka()})(this,function(){function Z(a,b){this.id=Db++;this.type=a;this.data=b}function ka(a){if(0===a.length)return[];var b=a.charAt(0),c=a.charAt(a.length-1);if(1>>=b;c=(255>>=c;b|=c;c=(15>>=c;b|=c;c=(3>>c>>1}function hb(){function a(a){a:{for(var b=16;268435456>=b;b*=16)if(a<=b){a=b;break a}a=0}b=c[gb(a)>>2];return 0>2].push(a)}var c=R(8,function(){return[]});return{alloc:a,free:b,allocType:function(b,c){var d=null;switch(b){case 5120:d=new Int8Array(a(c),0,c);break;case 5121:d=new Uint8Array(a(c),0,c);break;case 5122:d=new Int16Array(a(2*c),0,c);break;case 5123:d=new Uint16Array(a(2*c),0,c);break;case 5124:d=new Int32Array(a(4*c),0,c);break;case 5125:d=new Uint32Array(a(4*c),0,c);break;case 5126:d=new Float32Array(a(4*c),0,c);break;default:return null}return d.length!== -c?d.subarray(0,c):d},freeType:function(a){b(a.buffer)}}}function la(a){return!!a&&"object"===typeof a&&Array.isArray(a.shape)&&Array.isArray(a.stride)&&"number"===typeof a.offset&&a.shape.length===a.stride.length&&(Array.isArray(a.data)||O(a.data))}function ib(a,b,c,e,f,d){for(var q=0;qe&&(e=d.buffer.byteLength,5123===k?e>>=1:5125===k&&(e>>=2));d.vertCount=e;e=g;0>g&&(e=4,g=d.buffer.dimension,1===g&&(e=0),2===g&&(e=1),3===g&&(e=4));d.primType=e}function q(a){e.elementsCount--;delete n[a.id];a.buffer.destroy();a.buffer=null}var n={},v=0,k={uint8:5121,uint16:5123};b.oes_element_index_uint&&(k.uint32=5125);f.prototype.bind=function(){this.buffer.bind()};var u=[];return{create:function(a, -b){function l(a){if(a)if("number"===typeof a)g(a),h.primType=4,h.vertCount=a|0,h.type=5121;else{var b=null,c=35044,e=-1,f=-1,m=0,n=0;if(Array.isArray(a)||O(a)||la(a))b=a;else if("data"in a&&(b=a.data),"usage"in a&&(c=nb[a.usage]),"primitive"in a&&(e=Ka[a.primitive]),"count"in a&&(f=a.count|0),"type"in a&&(n=k[a.type]),"length"in a)m=a.length|0;else if(m=f,5123===n||5122===n)m*=2;else if(5125===n||5124===n)m*=4;d(h,b,c,e,f,m,n)}else g(),h.primType=4,h.vertCount=0,h.type=5121;return l}var g=c.create(null, -34963,!0),h=new f(g._buffer);e.elementsCount++;l(a);l._reglType="elements";l._elements=h;l.subdata=function(a,b){g.subdata(a,b);return l};l.destroy=function(){q(h)};return l},createStream:function(a){var b=u.pop();b||(b=new f(c.create(null,34963,!0,!1)._buffer));d(b,a,35040,-1,-1,0,0);return b},destroyStream:function(a){u.push(a)},getElements:function(a){return"function"===typeof a&&a._elements instanceof f?a._elements:null},clear:function(){I(n).forEach(q)}}}function ob(a){for(var b=G.allocType(5123, -a.length),c=0;c>>31<<15,d=(e<<1>>>24)-127,e=e>>13&1023;b[c]=-24>d?f:-14>d?f+(e+1024>>-14-d):15>=e,c.height>>=e,x(c,d[e]),a.mipmask|=1<b;++b)a.images[b]=null;return a}function ya(a){for(var b=a.images,c=0;cb){for(var c=0;c=--this.refCount&&F(this)}});q.profile&&(d.getTotalTextureSize=function(){var a=0;Object.keys(ea).forEach(function(b){a+=ea[b].stats.size});return a});return{create2D:function(b,c){function e(a,b){var c=f.texInfo;w.call(c);var d=ma();"number"===typeof a?"number"===typeof b?p(d,a|0,b|0):p(d,a|0,a|0):a?(H(c,a),P(d,a)):p(d,1,1);c.genMipmaps&&(d.mipmask=(d.width<<1)-1);f.mipmask=d.mipmask;v(f, -d);f.internalformat=d.internalformat;e.width=d.width;e.height=d.height;T(f);t(d,3553);M(c,3553);wa();ya(d);q.profile&&(f.stats.size=La(f.internalformat,f.type,d.width,d.height,c.genMipmaps,!1));e.format=ca[f.internalformat];e.type=K[f.type];e.mag=Fa[c.magFilter];e.min=pa[c.minFilter];e.wrapS=qa[c.wrapS];e.wrapT=qa[c.wrapT];return e}var f=new y(3553);ea[f.id]=f;d.textureCount++;e(b,c);e.subimage=function(a,b,c,d){b|=0;c|=0;d|=0;var y=g();v(y,f);y.width=0;y.height=0;x(y,a);y.width=y.width||(f.width>> -d)-b;y.height=y.height||(f.height>>d)-c;T(f);l(y,3553,b,c,d);wa();h(y);return e};e.resize=function(b,c){var d=b|0,g=c|0||d;if(d===f.width&&g===f.height)return e;e.width=f.width=d;e.height=f.height=g;T(f);for(var y=0;f.mipmask>>y;++y){var h=d>>y,z=g>>y;if(!h||!z)break;a.texImage2D(3553,y,f.format,h,z,0,f.format,f.type,null)}wa();q.profile&&(f.stats.size=La(f.internalformat,f.type,d,g,!1,!1));return e};e._reglType="texture2d";e._texture=f;q.profile&&(e.stats=f.stats);e.destroy=function(){f.decRef()}; -return e},createCube:function(b,c,e,f,n,r){function m(a,b,c,d,e,f){var g,da=A.texInfo;w.call(da);for(g=0;6>g;++g)F[g]=ma();if("number"===typeof a||!a)for(a=a|0||1,g=0;6>g;++g)p(F[g],a,a);else if("object"===typeof a)if(b)P(F[0],a),P(F[1],b),P(F[2],c),P(F[3],d),P(F[4],e),P(F[5],f);else if(H(da,a),k(A,a),"faces"in a)for(a=a.faces,g=0;6>g;++g)v(F[g],A),P(F[g],a[g]);else for(g=0;6>g;++g)P(F[g],a);v(A,F[0]);A.mipmask=da.genMipmaps?(F[0].width<<1)-1:F[0].mipmask;A.internalformat=F[0].internalformat;m.width= -F[0].width;m.height=F[0].height;T(A);for(g=0;6>g;++g)t(F[g],34069+g);M(da,34067);wa();q.profile&&(A.stats.size=La(A.internalformat,A.type,m.width,m.height,da.genMipmaps,!0));m.format=ca[A.internalformat];m.type=K[A.type];m.mag=Fa[da.magFilter];m.min=pa[da.minFilter];m.wrapS=qa[da.wrapS];m.wrapT=qa[da.wrapT];for(g=0;6>g;++g)ya(F[g]);return m}var A=new y(34067);ea[A.id]=A;d.cubeCount++;var F=Array(6);m(b,c,e,f,n,r);m.subimage=function(a,b,c,d,e){c|=0;d|=0;e|=0;var f=g();v(f,A);f.width=0;f.height=0; -x(f,b);f.width=f.width||(A.width>>e)-c;f.height=f.height||(A.height>>e)-d;T(A);l(f,34069+a,c,d,e);wa();h(f);return m};m.resize=function(b){b|=0;if(b!==A.width){m.width=A.width=b;m.height=A.height=b;T(A);for(var c=0;6>c;++c)for(var d=0;A.mipmask>>d;++d)a.texImage2D(34069+c,d,A.format,b>>d,b>>d,0,A.format,A.type,null);wa();q.profile&&(A.stats.size=La(A.internalformat,A.type,m.width,m.height,!1,!0));return m}};m._reglType="textureCube";m._texture=A;q.profile&&(m.stats=A.stats);m.destroy=function(){A.decRef()}; -return m},clear:function(){for(var b=0;bc;++c)if(0!==(b.mipmask&1<>c,b.height>>c,0,b.internalformat, -b.type,null);else for(var d=0;6>d;++d)a.texImage2D(34069+d,c,b.internalformat,b.width>>c,b.height>>c,0,b.internalformat,b.type,null);M(b.texInfo,b.target)})},refresh:function(){for(var b=0;bd;++d){for(p= -0;pa;++a)c[a].resize(d);b.width=b.height=d;return b},_reglType:"framebufferCube",destroy:function(){c.forEach(function(a){a.destroy()})}})},clear:function(){I(M).forEach(r)}, -restore:function(){t.cur=null;t.next=null;t.dirty=!0;I(M).forEach(function(b){b.framebuffer=a.createFramebuffer();p(b)})}})}function $a(){this.w=this.z=this.y=this.x=this.state=0;this.buffer=null;this.size=0;this.normalized=!1;this.type=5126;this.divisor=this.stride=this.offset=0}function Sb(a,b,c,e,f,d,q){function n(a){if(a!==r.currentVAO){var c=b.oes_vertex_array_object;a?c.bindVertexArrayOES(a.vao):c.bindVertexArrayOES(null);r.currentVAO=a}}function v(c){if(c!==r.currentVAO){if(c)c.bindAttrs(); -else{for(var d=b.angle_instanced_arrays,e=0;e=m.byteLength?l.subdata(m): -(l.destroy(),c.buffers[h]=null));c.buffers[h]||(l=c.buffers[h]=f.create(p,34962,!1,!0));k.buffer=f.getBuffer(l);k.size=k.buffer.dimension|0;k.normalized=!1;k.type=k.buffer.dtype;k.offset=0;k.stride=0;k.divisor=0;k.state=1;a[h]=1}else f.getBuffer(p)?(k.buffer=f.getBuffer(p),k.size=k.buffer.dimension|0,k.normalized=!1,k.type=k.buffer.dtype,k.offset=0,k.stride=0,k.divisor=0,k.state=1):f.getBuffer(p.buffer)?(k.buffer=f.getBuffer(p.buffer),k.size=(+p.size||k.buffer.dimension)|0,k.normalized=!!p.normalized|| -!1,k.type="type"in p?Ja[p.type]:k.buffer.dtype,k.offset=(p.offset||0)|0,k.stride=(p.stride||0)|0,k.divisor=(p.divisor||0)|0,k.state=1):"x"in p&&(k.x=+p.x||0,k.y=+p.y||0,k.z=+p.z||0,k.w=+p.w||0,k.state=2)}for(l=0;la&&(a=b.stats.uniformsCount)});return a},c.getMaxAttributesCount=function(){var a=0;x.forEach(function(b){b.stats.attributesCount>a&&(a=b.stats.attributesCount)});return a});return{clear:function(){var b=a.deleteShader.bind(a);I(k).forEach(b);k={};I(u).forEach(b); -u={};x.forEach(function(b){a.deleteProgram(b.program)});x.length=0;m={};c.shaderCount=0},program:function(b,d,e,f){var l=m[d];l||(l=m[d]={});var q=l[b];if(q&&(q.refCount++,!f))return q;var w=new n(d,b);c.shaderCount++;v(w,e,f);q||(l[b]=w);x.push(w);return L(w,{destroy:function(){w.refCount--;if(0>=w.refCount){a.deleteProgram(w.program);var b=x.indexOf(w);x.splice(b,1);c.shaderCount--}0>=l[w.vertId].refCount&&(a.deleteShader(u[w.vertId]),delete u[w.vertId],delete m[w.fragId][w.vertId]);Object.keys(m[w.fragId]).length|| -(a.deleteShader(k[w.fragId]),delete k[w.fragId],delete m[w.fragId])}})},restore:function(){k={};u={};for(var a=0;a"+b+"?"+e+".constant["+b+"]:0;"}).join(""),"}}else{","if(",g,"(",e,".buffer)){",k,"=",f,".createStream(",34962,",",e,".buffer);","}else{",k,"=",f,".getBuffer(",e,".buffer);","}",m,'="type" in ',e,"?",z.glTypes,"[",e,".type]:",k,".dtype;",B.normalized,"=!!", -e,".normalized;");d("size");d("offset");d("stride");d("divisor");c("}}");c.exit("if(",B.isStream,"){",f,".destroyStream(",k,");","}");return B})});return g}function F(a){var b=a["static"],c=a.dynamic,d={};Object.keys(b).forEach(function(a){var c=b[a];d[a]=w(function(a,b){return"number"===typeof c||"boolean"===typeof c?""+c:a.link(c)})});Object.keys(c).forEach(function(a){var b=c[a];d[a]=K(b,function(a,c){return a.invoke(c,b)})});return d}function A(a,b,d,e,f){function g(a){var b=p[a];b&&(ja[a]=b)} -var m=O(a,b),l=G(a,f),p=C(a,l,f),X=M(a,f),ja=y(a,f),q=H(a,f,m);g("viewport");g(h("scissor.box"));var n=0>1)",u],");")}function b(){c(t,".drawArraysInstancedANGLE(",[n,q,r,u],");")}p&&"null"!==p?v?a():(c("if(",p,"){"),a(),c("}else{"),b(),c("}")):b()}function g(){function a(){c(l+".drawElements("+[n,r,x,q+"<<(("+x+"-5121)>>1)"]+");")}function b(){c(l+".drawArrays("+[n,q,r]+");")}p&&"null"!==p?v?a():(c("if(",p,"){"),a(),c("}else{"),b(),c("}")):b()}var h=a.shared,l=h.gl,k=h.draw,m=d.draw, -p=function(){var e=m.elements,f=b;if(e){if(e.contextDep&&d.contextDynamic||e.propDep)f=c;e=e.append(a,f);m.elementsActive&&f("if("+e+")"+l+".bindBuffer(34963,"+e+".buffer.buffer);")}else e=f.def(),f(e,"=",k,".","elements",";","if(",e,"){",l,".bindBuffer(",34963,",",e,".buffer.buffer);}","else if(",h.vao,".currentVAO){",e,"=",a.shared.elements+".getElements("+h.vao,".currentVAO.elements);",na?"":"if("+e+")"+l+".bindBuffer(34963,"+e+".buffer.buffer);","}");return e}(),n=e("primitive"),q=e("offset"), -r=function(){var e=m.count,f=b;if(e){if(e.contextDep&&d.contextDynamic||e.propDep)f=c;e=e.append(a,f)}else e=f.def(k,".","count");return e}();if("number"===typeof r){if(0===r)return}else c("if(",r,"){"),c.exit("}");var u,t;W&&(u=e("instances"),t=a.instancing);var x=p+".type",v=m.elements&&xa(m.elements)&&!m.vaoActive;W&&("number"!==typeof u||0<=u)?"string"===typeof u?(c("if(",u,">0){"),f(),c("}else if(",u,"<0){"),g(),c("}")):f():g()}function ca(a,b,c,d,e){b=P();e=b.proc("body",e);W&&(b.instancing= -e.def(b.shared.extensions,".angle_instanced_arrays"));a(b,e,c,d);return b.compile().body}function Z(a,b,c,d){N(a,b);c.useVAO?c.drawVAO?b(a.shared.vao,".setVAO(",c.drawVAO.append(a,b),");"):b(a.shared.vao,".setVAO(",a.shared.vao,".targetVAO);"):(b(a.shared.vao,".setVAO(null);"),ga(a,b,c,d.attributes,function(){return!0}));Q(a,b,c,d.uniforms,function(){return!0},!1);U(a,b,b,c)}function Fa(a,b){var c=a.proc("draw",1);N(a,c);ia(a,c,b.context);S(a,c,b.framebuffer);Aa(a,c,b);I(a,c,b.state);E(a,c,b,!1,!0); -var d=b.shader.progVar.append(a,c);c(a.shared.gl,".useProgram(",d,".program);");if(b.shader.program)Z(a,c,b,b.shader.program);else{c(a.shared.vao,".setVAO(null);");var e=a.global.def("{}"),f=c.def(d,".id"),g=c.def(e,"[",f,"]");c(a.cond(g).then(g,".call(this,a0);")["else"](g,"=",e,"[",f,"]=",a.link(function(c){return ca(Z,a,b,c,1)}),"(",d,");",g,".call(this,a0);"))}0=--this.refCount&&q(this)};f.profile&&(e.getTotalRenderbufferSize=function(){var a=0;Object.keys(u).forEach(function(b){a+=u[b].stats.size});return a});return{create:function(b, -c){function l(b,c){var d=0,e=0,k=32854;"object"===typeof b&&b?("shape"in b?(e=b.shape,d=e[0]|0,e=e[1]|0):("radius"in b&&(d=e=b.radius|0),"width"in b&&(d=b.width|0),"height"in b&&(e=b.height|0)),"format"in b&&(k=n[b.format])):"number"===typeof b?(d=b|0,e="number"===typeof c?c|0:d):b||(d=e=1);if(d!==g.width||e!==g.height||k!==g.format)return l.width=g.width=d,l.height=g.height=e,g.format=k,a.bindRenderbuffer(36161,g.renderbuffer),a.renderbufferStorage(36161,k,d,e),f.profile&&(g.stats.size=Q[g.format]* -g.width*g.height),l.format=v[g.format],l}var g=new d(a.createRenderbuffer());u[g.id]=g;e.renderbufferCount++;l(b,c);l.resize=function(b,c){var d=b|0,e=c|0||d;if(d===g.width&&e===g.height)return l;l.width=g.width=d;l.height=g.height=e;a.bindRenderbuffer(36161,g.renderbuffer);a.renderbufferStorage(36161,g.format,d,e);f.profile&&(g.stats.size=Q[g.format]*g.width*g.height);return l};l._reglType="renderbuffer";l._renderbuffer=g;f.profile&&(l.stats=g.stats);l.destroy=function(){g.decRef()};return l},clear:function(){I(u).forEach(q)}, -restore:function(){I(u).forEach(function(b){b.renderbuffer=a.createRenderbuffer();a.bindRenderbuffer(36161,b.renderbuffer);a.renderbufferStorage(36161,b.format,b.width,b.height)});a.bindRenderbuffer(36161,null)}}},Za=[];Za[6408]=4;Za[6407]=3;var Ra=[];Ra[5121]=1;Ra[5126]=4;Ra[36193]=2;var Da=["x","y","z","w"],Xb="blend.func blend.equation stencil.func stencil.opFront stencil.opBack sample.coverage viewport scissor.box polygonOffset.offset".split(" "),Ga={0:0,1:1,zero:0,one:1,"src color":768,"one minus src color":769, -"src alpha":770,"one minus src alpha":771,"dst color":774,"one minus dst color":775,"dst alpha":772,"one minus dst alpha":773,"constant color":32769,"one minus constant color":32770,"constant alpha":32771,"one minus constant alpha":32772,"src alpha saturate":776},ab={never:512,less:513,"<":513,equal:514,"=":514,"==":514,"===":514,lequal:515,"<=":515,greater:516,">":516,notequal:517,"!=":517,"!==":517,gequal:518,">=":518,always:519},Ta={0:0,zero:0,keep:7680,replace:7681,increment:7682,decrement:7683, -"increment wrap":34055,"decrement wrap":34056,invert:5386},zb={cw:2304,ccw:2305},Ab=new J(!1,!1,!1,function(){}),$b=function(a,b){function c(){this.endQueryIndex=this.startQueryIndex=-1;this.sum=0;this.stats=null}function e(a,b,d){var e=q.pop()||new c;e.startQueryIndex=a;e.endQueryIndex=b;e.sum=0;e.stats=d;n.push(e)}if(!b.ext_disjoint_timer_query)return null;var f=[],d=[],q=[],n=[],v=[],k=[];return{beginQuery:function(a){var c=f.pop()||b.ext_disjoint_timer_query.createQueryEXT();b.ext_disjoint_timer_query.beginQueryEXT(35007, -c);d.push(c);e(d.length-1,d.length,a)},endQuery:function(){b.ext_disjoint_timer_query.endQueryEXT(35007)},pushScopeStats:e,update:function(){var a,c;a=d.length;if(0!==a){k.length=Math.max(k.length,a+1);v.length=Math.max(v.length,a+1);v[0]=0;var e=k[0]=0;for(c=a=0;c=E.length&&e()}var c=Bb(E,a);E[c]=b}}}function k(){var a=Q.viewport,b=Q.scissor_box;a[0]=a[1]=b[0]=b[1]=0;H.viewportWidth=H.framebufferWidth=H.drawingBufferWidth=a[2]=b[2]=l.drawingBufferWidth;H.viewportHeight=H.framebufferHeight=H.drawingBufferHeight=a[3]=b[3]=l.drawingBufferHeight}function u(){H.tick+=1;H.time=x();k();I.procs.poll()}function m(){A.refresh();k();I.procs.refresh();t&&t.update()}function x(){return(Cb()- -G)/1E3}a=Hb(a);if(!a)return null;var l=a.gl,g=l.getContextAttributes();l.isContextLost();var h=Ib(l,a);if(!h)return null;var r=Eb(),p={vaoCount:0,bufferCount:0,elementsCount:0,framebufferCount:0,shaderCount:0,textureCount:0,cubeCount:0,renderbufferCount:0,maxTextureUnits:0},w=h.extensions,t=$b(l,w),G=Cb(),C=l.drawingBufferWidth,J=l.drawingBufferHeight,H={tick:0,time:0,viewportWidth:C,viewportHeight:J,framebufferWidth:C,framebufferHeight:J,drawingBufferWidth:C,drawingBufferHeight:J,pixelRatio:a.pixelRatio}, -C={elements:null,primitive:4,count:-1,offset:0,instances:-1},M=Yb(l,w),y=Jb(l,p,a,function(a){return K.destroyBuffer(a)}),T=Kb(l,w,y,p),K=Sb(l,w,M,p,y,T,C),F=Tb(l,r,p,a),A=Nb(l,w,M,function(){I.procs.poll()},H,p,a),O=Zb(l,w,M,p,a),S=Rb(l,w,M,A,O,p),I=Wb(l,r,w,M,y,T,A,S,{},K,F,C,H,t,a),r=Ub(l,S,I.procs.poll,H,g,w,M),Q=I.next,N=l.canvas,E=[],R=[],U=[],Z=[a.onDestroy],ca=null;N&&(N.addEventListener("webglcontextlost",f,!1),N.addEventListener("webglcontextrestored",d,!1));var aa=S.setFBO=q({framebuffer:Y.define.call(null, -1,"framebuffer")});m();g=L(q,{clear:function(a){if("framebuffer"in a)if(a.framebuffer&&"framebufferCube"===a.framebuffer_reglType)for(var b=0;6>b;++b)aa(L({framebuffer:a.framebuffer.faces[b]},a),n);else aa(a,n);else n(null,a)},prop:Y.define.bind(null,1),context:Y.define.bind(null,2),"this":Y.define.bind(null,3),draw:q({}),buffer:function(a){return y.create(a,34962,!1,!1)},elements:function(a){return T.create(a,!1)},texture:A.create2D,cube:A.createCube,renderbuffer:O.create,framebuffer:S.create,framebufferCube:S.createCube, -vao:K.createVAO,attributes:g,frame:v,on:function(a,b){var c;switch(a){case "frame":return v(b);case "lost":c=R;break;case "restore":c=U;break;case "destroy":c=Z}c.push(b);return{cancel:function(){for(var a=0;a [s*a, s*b] - - var miniSel = d3.selectAll('.mini').html('').each(addMini).st({overflow: 'visible'}) - - var cColors = { - true: {true: colors.sick, false: lcolors.sick}, - false: {true: colors.well, false: lcolors.well} - } - var rColors = { - true: {true: lcolors.sick, false: llcolors.sick}, - false: {true: lcolors.well, false: llcolors.well} - } - - - function addMini(){ - var miniSel = d3.select(this) - - var type = miniSel.attr('type') - var sex = miniSel.attr('sex') - var isAll = sex == 'all' - - miniSel.st({marginBottom: sex == 'male' ? 30 : 0}) - - var data = students - .filter(d => isAll ? true : sex == 'male' ? d.isMale : !d.isMale) - - var topDatum = {} - var botDatum = {} - - if (type == 'fp'){ - topDatum.opacity = d => d.grade > d.threshold && d.isSick - botDatum.opacity = d => d.isSick - } else { - topDatum.opacity = d => d.grade > d.threshold && d.isSick - botDatum.opacity = d => d.grade > d.threshold - } - - - - var top = -s*nCols/2 + 10 - if (!isAll) top /= 2 - addGrid(miniSel.append('span'), topDatum) - miniSel.append('span.equation').text('÷').st({top, fontWeight: '', fontSize: 20}) - addGrid(miniSel.append('span'), botDatum) - miniSel.append('span.equation').text('=').st({top, fontWeight: '', fontSize: 20}) - - if (!isAll){ - var sexStr = sex == 'male' ? 'children' : 'adults' - - var coStr = `of ${sexStr}
      testing positive
      are sick` - var fpStr = `of ${sexStr}
      who are sick
      test positive` - miniSel.st({position: 'relative'}) - .append('div.axis') - .st({position: 'absolute', right: -9, textAlign: 'center', width: 95, lineHeight: 14, bottom: -15}) - .html(type == 'fp' ? fpStr : coStr) - - } - - var percentSel = miniSel.append('span.equation').st({top, marginLeft: 0}) - - function update(){ - topDatum.update() - botDatum.update() - - var percent = d3.sum(data, topDatum.opacity)/d3.sum(data, botDatum.opacity) - percentSel.text(d3.format('.0%')(percent)) - } - - miniSel.datum({update}) - - - function addGrid(gridSel, datum){ - var {opacity} = datum - - var width = s*nCols - var height = s*nCols*(isAll ? 1 : .5) - var svg = gridSel.append('svg').at({width, height}) - - var callSickSel = svg.append('rect') - .at({width, height, fill: lcolors.sick}) - - var callWellPath = svg.append('path') - .at({width, height, fill: lcolors.well}) - - - var personSel = svg.appendMany('g', data) - .translate(d => sScale(d.pos[isAll ? 'allIJ' : 'sexGroupIJ'])) - - var pad = 0 - // var rectSel = personSel.append('rect') - // .at({ - // height: s - pad, - // width: s - pad, - // // stroke: '#666', - // // strokeWidth: .1, - // }) - - - var circleSel = personSel.append('circle') - .at({r: s/4, cx: s/2 - pad/2, cy: s/2 - pad/2, fill: d => d.isSick ? colors.sick : '#777'}) - - if (!isAll){ - svg.append('path') - .translate([-1, -5]) - .at({stroke: colors.sick, d: 'M 0 0 H ' + (sex == 'male' ? 8 : 4)*s}) - } - - var geodata = {type: 'FeatureCollection'} - geodata.features = data.map(d => { - var [x, y] = sScale(d.pos[isAll ? 'allIJ' : 'sexGroupIJ']) - return { - type: 'Feature', - geometry: { - type: 'Polygon', - coordinates: [ - [[x, y], [x, y + s], [x + s, y + s], [x + s, y], [x, y]] - ] - }, - properties: {d}, - } - }) - - var topology = topojson.topology({boxes: geodata}) - var geowrap = topojson.feature(topology, topology.objects.boxes) - var path = d3.geoPath() - - var hiddenPath = svg.append('path') - .at({stroke: 'none', fill: 'rgba(255,255,255,.6)'}) - .translate(.5, 1) - - var includedPath = svg.append('path') - .at({stroke: '#000', fill: 'none'}) - .translate(.5, 1) - - - circleSel.at({fill: d => d.isSick ? colors.sick : colors.well}) - - datum.update = () => { - // rectSel.at({ - // // fill: d => rColors[d.grade > d.threshold][opacity(d)], - // // strokeWidth: d => opacity(d) ? 1 : .1, - // }) - - // circleSel.at({fill: d => cColors[d.isSick][opacity(d)]}) - - var byType = d3.nestBy(topology.objects.boxes.geometries, d => opacity(d.properties.d)) - - byType.forEach(type => { - var obj = {type: 'GeometryCollection', geometries: type} - var pathStr = path(topojson.mesh(topology, obj, (a, b) => a == b)) - - var pathSel = type.key == 'true' ? includedPath : hiddenPath - pathSel.at({d: pathStr}) - }) - - var sickBoxes = topology.objects.boxes.geometries - .filter(d => d.properties.d.grade <= d.properties.d.threshold) - var obj = {type: 'GeometryCollection', geometries: sickBoxes} - var pathStr = path(topojson.mesh(topology, obj, (a, b) => a == b)) - callWellPath.at({d: pathStr}) - } - } - - } - - - - function updateAll(){ - miniSel.each(d => d.update()) - } - - return {updateAll} -} - - - - - - - - - -if (window.init) window.init() diff --git a/spaces/merve/data-leak/source/uncertainty-calibration/graph-scroll.css b/spaces/merve/data-leak/source/uncertainty-calibration/graph-scroll.css deleted file mode 100644 index 2090579822fcb774883d54187371bc4a3440a395..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/source/uncertainty-calibration/graph-scroll.css +++ /dev/null @@ -1,129 +0,0 @@ -#container{ - position: relative; - width: auto; - } - - #sections{ - width: 340px; - } - - #sections > div{ - background: white; - opacity: .2; - margin-bottom: 200px; - line-height: 1.4em; - transition: opacity .2s; - } - #sections > div:first-child{ - opacity: 1; - } - #sections > div:last-child{ - /*padding-bottom: 80vh;*/ - padding-bottom: 80px; - margin-bottom: 0px; - } - #sections > div:first-child > h1{ - padding-top: 40px; - } - - #sections > div.graph-scroll-active{ - opacity: 1; - } - - #graph{ - margin-left: 40px; - width: 500px; - position: -webkit-sticky; - position: sticky; - top: 0px; - float: right; - height: 800px; - font-family: sans-serif; - - } - - .slider{ - font-family: 'Google Sans', sans-serif; - } - - #sections h1{ - text-align: left !important; - } - - @media (max-width: 1000px) and (min-width: 926px){ - #sections{ - margin-left: 20px; - } - } - - @media (max-width: 925px) { - #container{ - margin-left: 0px; - } - - h1{ - margin-bottom: 0px; - } - - - #graph{ - width: 100%; - margin-left: 10px; - float: none; - max-width: 500px; - margin: 0px auto; - } - - #graph > div{ - position: relative; - top: 0px; - } - #sections{ - width: auto; - position: relative; - margin: 0px auto; - pointer-events: none; - } - #sections a{ - pointer-events: all; - } - - #sections > div{ - background: rgba(255,255,255,.9); - padding: 10px; - border-top: 1px solid; - border-bottom: 1px solid; - margin-bottom: 80vh; - width: calc(100vw - 20px); - margin-left: -5px; - } - - #sections > div > *{ - max-width: 750px; - } - .mini, .slider, i, .gated{ - margin: 0px auto; - } - - #sections > div:first-child{ - opacity: 1; - margin-top: -500px; - } - - #sections > div:last-child{ - padding-bottom: 0px; - margin-bottom: 0px; - } - - - #sections h1{ - margin: 10px; - padding-top: 0px !important; - } - - #sections h3{ - margin-top: .5em; - } - - } - \ No newline at end of file diff --git a/spaces/merve/fill-in-the-blank/public/fill-in-the-blank/init-pair.js b/spaces/merve/fill-in-the-blank/public/fill-in-the-blank/init-pair.js deleted file mode 100644 index dbd16d4499ddbcc59234fcdefbf7a5cad6f91a7a..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/public/fill-in-the-blank/init-pair.js +++ /dev/null @@ -1,360 +0,0 @@ -/* Copyright 2021 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - -window.initPair = function(pair){ - var isMobile = window.innerWidth <= 820 - - var sel = d3.select('.' + pair.class).html('') - .at({role: 'graphics-document', 'aria-label': pair.ariaLabel}) - .on('keydown', function(){ - sel.classed('changed', 1) - if (d3.event.keyCode != 13) return - d3.event.preventDefault() - // return - - pair.str0 = '' - pair.str1 = '' - - updateChart() - }) - - if (!sel.node()) return - - var optionSel = sel.append('div.options') - - var inputRow = optionSel.append('div.flex-row.flex-row-textarea') - var input1Sel = inputRow.append('textarea.input-1') - .st({color: util.colors[1]}).at({cols: 30}) - input1Sel.node().value = pair.s1.replace('[MASK]', '_') - - var input0Sel = inputRow.append('textarea.input-0') - .st({color: util.colors[0]}).at({cols: 30}) - input0Sel.node().value = pair.s0.replace('[MASK]', '_') - - if (isMobile){ - sel.selectAll('textarea').on('change', updateChart) - } - - var countSel = optionSel.append('div') - .append('b').text('Number of Tokens') - .append('info').text('ⓘ').call(addLockedTooltip) - .datum('The scales are set using the top N tokens for each sentence.

      "Likelihoods" will show more than N tokens if a top completion for one sentence is unlikely for the other sentence.') - .parent().parent() - .append('div.flex-row') - .appendMany('div.button', [30, 200, 1000, 5000, 99999]) - .text(d => d > 5000 ? 'All' : d) - .st({textAlign: 'center'}) - .on('click', d => { - pair.count = d - updateChart() - }) - - var typeSel = optionSel.append('div') - .append('b').text('Chart Type') - .append('info').text('ⓘ').call(addLockedTooltip) - .datum('"Likelihoods" shows the logits from both models plotted directly with a shared linear scale.

      To better contrast the outputs, "Differences" shows logitA - logitB on the y-axis and mean(logitA, logitB) on the x-axis with separate linear scales.') - .parent().parent() - .append('div.flex-row') - .appendMany('div.button', ['Likelihoods', 'Differences']) - .text(d => d) - .st({textAlign: 'center'}) - .on('click', d => { - pair.type = d - updateChart() - }) - - var modelSel = optionSel.append('div') - .st({display: pair.model == 'BERT' ? 'none' : ''}) - .append('b').text('Model') - .parent() - .append('div.flex-row') - .appendMany('div.button', ['BERT', 'Zari']) - .text(d => d) - .st({textAlign: 'center'}) - .on('click', d => { - pair.model = d - updateChart() - }) - - // TODO add loading spinner - var updateSel = optionSel - .append('div.flex-row') - .append('div.button.update').on('click', updateChart) - .text('Update') - .st({display: isMobile ? 'none' : ''}) - - var warningSel = optionSel.append('div.warning') - .text('⚠️Some of the text this model was trained on includes harmful stereotypes. This is a tool to uncover these associations—not an endorsement of them.') - - var resetSel = optionSel.append('div.reset') - .html(' Reset') - .on('click', () => { - pair = JSON.parse(pair.pairStr) - pair.pairStr = JSON.stringify(pair) - - input0Sel.node().value = pair.s0 - input1Sel.node().value = pair.s1 - - updateChart(true) - }) - - if (pair.alts){ - d3.select('.' + pair.class + '-alts').html('') - .classed('alt-block', 1).st({display: 'block'}) - .appendMany('span.p-button-link', pair.alts) - .html(d => d.str) - .on('click', d => { - input0Sel.node().value = d.s0 - input1Sel.node().value = d.s1 - - updateChart() - }) - } - - - var margin = {bottom: 50, left: 25, top: 5, right: 20} - var graphSel = sel.append('div.graph') - var totalWidth = graphSel.node().offsetWidth - var width = totalWidth - margin.left - margin.right - - var c = d3.conventions({ - sel: graphSel.append('div').st({marginTop: isMobile ? 20 : -5}), - width, - height: width, - margin, - layers: 'sdds', - }) - - - var nTicks = 4 - var tickScale = d3.scaleLinear().range([0, c.width]) - c.svg.appendMany('path.bg-tick', d3.range(nTicks + 1)) - .at({d: d => `M ${.5 + Math.round(tickScale(d/nTicks))} 0 V ${c.height}`}) - c.svg.appendMany('path.bg-tick', d3.range(nTicks + 1)) - .at({d: d => `M 0 ${.5 + Math.round(tickScale(d/nTicks))} H ${c.width}`}) - - - var annotationSel = c.layers[1].appendMany('div.annotations', pair.annotations) - .translate(d => d.pos) - .html(d => d.str) - .st({color: d => d.color, width: 250, postion: 'absolute'}) - - var scatter = window.initScatter(c) - - updateChart(true) - - - async function updateChart(isFirst){ - sel.classed('changed', 0) - warningSel.st({opacity: isFirst ? 0 : 1}) - resetSel.st({opacity: isFirst ? 0 : 1}) - annotationSel.st({opacity: isFirst ? 1 : 0}) - - countSel.classed('active', d => d == pair.count) - typeSel.classed('active', d => d == pair.type) - modelSel.classed('active', d => d == pair.model) - - function getStr(sel){ - return sel.node().value.replace('_', '[MASK]') - } - - var modelPath = pair.model == 'Zari' ? 'embed_zari_cda' : 'embed' - - pair.s0 = input0Sel.node().value.replace('_', '[MASK]') - pair.s1 = input1Sel.node().value.replace('_', '[MASK]') - - updateSel.classed('loading', 1) - var vals0 = await post(modelPath, {sentence: pair.s0}) - var vals1 = await post(modelPath, {sentence: pair.s1}) - updateSel.classed('loading', 0) - - - var allTokens = vals0.map((v0, i) => { - return {word: tokenizer.vocab[i], v0, i, v1: vals1[i]} - }) - allTokens.forEach(d => { - d.dif = d.v0 - d.v1 - d.meanV = (d.v0 + d.v1) / 2 - d.isVisible = false - }) - - _.sortBy(allTokens, d => -d.v1).forEach((d, i) => d.v1i = i) - _.sortBy(allTokens, d => -d.v0).forEach((d, i) => d.v0i = i) - - var topTokens = allTokens.filter(d => d.v0i <= pair.count || d.v1i <= pair.count) - - - var logitExtent = d3.extent(topTokens.map(d => d.v0).concat(topTokens.map(d => d.v1))) - - var tokens = allTokens - .filter(d => logitExtent[0] <= d.v0 && logitExtent[0] <= d.v1) - - var mag = logitExtent[1] - logitExtent[0] - logitExtent = [logitExtent[0] - mag*.002, logitExtent[1] + mag*.002] - - if (pair.type == 'Differences') tokens = _.sortBy(allTokens, d => -d.meanV).slice(0, pair.count) - - tokens.forEach(d => { - d.isVisible = true - }) - - var maxDif = d3.max(d3.extent(tokens, d => d.dif).map(Math.abs)) - var color = palette(-maxDif*.8, maxDif*.8) - - updateSentenceLabels() - - if (pair.type == 'Likelihoods'){ - drawXY() - } else{ - drawRotated() - } - - sel.classed('is-xy', pair.type == 'Likelihoods') - sel.classed('is-rotate', pair.type != 'Likelihoods') - - - function drawXY(){ - c.x.domain(logitExtent) - c.y.domain(logitExtent) - - d3.drawAxis(c) - - var s = {30: 4, 200: 3, 1000: 3}[pair.count] || 2 - var scatterData = allTokens.map(d => { - var x = c.x(d.v0) - var y = c.y(d.v1) - var fill = color(d.dif) - var dif = d.dif - var word = d.word - var show = '' - var isVisible = d.isVisible - - return {x, y, s, dif, fill, word, show, isVisible} - }) - - var textCandidates = _.sortBy(scatterData.filter(d => d.isVisible), d => d.dif) - d3.nestBy(textCandidates.slice(0, 1000), d => Math.round(d.y/10)) - .forEach(d => d[0].show = 'uf') - d3.nestBy(textCandidates.reverse().slice(0, 1000), d => Math.round(d.y/10)) - .forEach(d => d[0].show = 'lr') - - logitExtent.pair = pair - scatter.draw(c, scatterData, true) - - c.svg.selectAppend('text.x-axis-label.xy-only') - .translate([c.width/2, c.height + 24]) - .text(pair.label0 ? ' __ likelihood, ' + pair.label0 + ' sentence →' : '__ likelihood, sentence two →') - .st({fill: util.colors[0]}) - .at({textAnchor: 'middle'}) - - - c.svg.selectAppend('g.y-axis-label.xy-only') - .translate([c.width + 20, c.height/2]) - .selectAppend('text') - .text(pair.label1 ? ' __ likelihood, ' + pair.label1 + ' sentence →' : '__ likelihood, sentence one →') - .st({fill: util.colors[1]}) - .at({textAnchor: 'middle', transform: 'rotate(-90)'}) - } - - function drawRotated(){ - c.x.domain(d3.extent(tokens, d => d.meanV)) - c.y.domain([maxDif, -maxDif]) - - d3.drawAxis(c) - - var scatterData = allTokens.map(d => { - var x = c.x(d.meanV) - var y = c.y(d.dif) - var fill = color(d.dif) - var word = d.word - var show = '' - var isVisible = d.isVisible - - return {x, y, s: 2, fill, word, show, isVisible} - }) - - scatterData.forEach(d => { - d.dx = d.x - c.width/2 - d.dy = d.y - c.height/2 - }) - - var textCandidates = _.sortBy(scatterData, d => -d.dx*d.dx - d.dy*d.dy) - .filter(d => d.isVisible) - .slice(0, 5000) - d3.nestBy(textCandidates, d => Math.round(12*Math.atan2(d.dx, d.dy))) - .map(d => d[0]) - .forEach(d => d.show = (d.dy < 0 ? 'u' : 'l') + (d.dx < 0 ? 'l' : 'r')) - - scatter.draw(c, scatterData, false) - - c.svg.selectAppend('text.rotate-only.x-axis-label') - .translate([c.width/2, c.height + 24]) - .text('__ likelihood, both sentences →') - .at({textAnchor: 'middle'}) - .st({fill: '#000'}) - - c.svg.selectAll('g.rotate-only.sent-1,g.rotate-only.sent-1').remove() - c.svg.selectAppend('g.rotate-only.sent-1') - .translate([c.width + 20, c.height/2]) - .append('text') - .text(`Higher likelihood, ${pair.label1 ? pair.label1 + ' sentence ' : 'sentence one'} →`) - .at({textAnchor: 'start', transform: 'rotate(-90)', x: 20}) - .st({fill: util.colors[1]}) - - c.svg.selectAppend('g.rotate-only.sent-1') - .translate([c.width + 20, c.height/2 + 0]) - .append('text') - .text(`← Higher likelihood, ${pair.label0 ? pair.label0 + ' sentence ' : 'sentence two'}`) - .at({textAnchor: 'end', transform: 'rotate(-90)', x: -20}) - .st({fill: util.colors[0]}) - } - } - - function updateSentenceLabels(){ - var t0 = tokenizer.tokenize(pair.s0) - var t1 = tokenizer.tokenize(pair.s1) - - var i = 0 - while (t0[i] == t1[i] && i < t0.length) i++ - - var j = 1 - while (t0[t0.length - j] == t1[t1.length - j] && j < t0.length) j++ - - pair.label0 = tokens2origStr(t0, pair.s0) - pair.label1 = tokens2origStr(t1, pair.s1) - - function tokens2origStr(t, s){ - var tokenStr = tokenizer.decode(t.slice(i, -j + 1)).trim() - var lowerStr = s.toLowerCase() - - var startI = lowerStr.indexOf(tokenStr) - return s.slice(startI, startI + tokenStr.length) - } - - if ( - !pair.label0.length || - !pair.label1.length || - pair.label0.length > 15 || - pair.label1.length > 15){ - pair.label0 = '' - pair.label1 = '' - } - - // console.log(i, j, pair.label0, pair.label1) - } -} - -if (window.init) init() diff --git a/spaces/merve/fill-in-the-blank/server-side/fill-in-the-blank/scatter-plot-colab/spearman-compare/list.css b/spaces/merve/fill-in-the-blank/server-side/fill-in-the-blank/scatter-plot-colab/spearman-compare/list.css deleted file mode 100644 index e96290cccb2fafb91f272e4fc6b1245b8bf06764..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/server-side/fill-in-the-blank/scatter-plot-colab/spearman-compare/list.css +++ /dev/null @@ -1,46 +0,0 @@ -.list{ - padding: 0px !important; - line-height: 16px; - background: #eee; -} -.list table{ - -webkit-border-vertical-spacing: 0px; - -webkit-border-horizontal-spacing: 0px; - width: 100%; -} -.list tr{ - padding-bottom: 4px !important; -} -.list .header{ - text-align: left; - position: sticky; - top: 0px; - background: #000; - padding-top: 0px; -} -th{ - border-spacing: 0px; - margin-top: px; - padding-top: 0px; - font-weight: 500; - color: #fff; - -} - -td.num{ - padding-right: 20px; - color: #999; -} - -tr.sentence{ - color: #333; - scroll-margin-top: 17px -} -tr.sentence.active{ - background: rgba(255,0,255,.1); -} - -tr td{ - border-bottom: 3px solid #eee; -} - diff --git a/spaces/merve/fill-in-the-blank/source/uncertainty-calibration/draw_calibrationcurve.js b/spaces/merve/fill-in-the-blank/source/uncertainty-calibration/draw_calibrationcurve.js deleted file mode 100644 index c7992a7c79b1a5187bc3f267350869904c836626..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/source/uncertainty-calibration/draw_calibrationcurve.js +++ /dev/null @@ -1,102 +0,0 @@ - -window.drawCalibrationCurve = function (graphSel, fig_height, fig_width){ - var width = Math.min(fig_height, fig_width) - var sel = graphSel - .append('div').st({textAlign: 'center'}) - .append('div').st({display: 'inline-block'}) - - var c = d3.conventions({ - sel, - width, - height: width, - margin: {top: 40} - }); - - c.svg.parent() - - //TODO(nthain) Who owns the buckets? We have at least 2 instances, reduce to 1 - var buckets = d3.pairs(window.weatherGraph.thresholds) - buckets.forEach(bucket => { - bucket.val = d3.mean(bucket, d => d.origVal) - }) - - c.xAxis.tickValues(buckets.map(d => d.val)).tickFormat(d3.format('.2f')) - c.yAxis.tickValues(buckets.map(d => d.val)).tickFormat(d3.format('.2f')) - d3.drawAxis(c) - window.util.ggPlotBg(c) - - window.util.addAxisLabel(c, 'Calibrated Model Score', 'Probability of Rain') - - var eceSel = c.svg.append('g.ece') - var eceBox = eceSel.append('rect.val-box') - .at({width: 55, height: 20, x: c.width/2 + 72.5, y: -35, rx: 3, ry: 3}) - var eceText = eceSel.append('text.big-text') - .at({y: -20, x: c.width/2-30, textAnchor: 'middle'}) - var eceVal = eceSel.append('text.val-text') - .at({y: -20, x: c.width/2+100, textAnchor: 'middle'}) - - c.svg.append('path') - .at({ - d: ['M', 0, c.height, 'L', c.width, 0].join(' '), - stroke: '#555', - strokeDasharray: '3 3', - }) - - var bucketSel = c.svg.appendMany('g.bucket', buckets) - - var circleSel = bucketSel.append('circle') - .at({fillOpacity: .4, fill: 'steelblue'}) - - var pathSel = bucketSel.append('path') - .at({stroke: 'steelblue', strokeWidth: 3}) - - var bucketText = bucketSel.append('text').text('8 / 10') - .at({textAnchor: 'start', dy: '.33em', fontSize: 10, fill: '#000'}) - - - // function remap_score(s) { - // // new_score = min_threshold_new + (old_score-min_threshold_old)(max_threshold_new-min_threshold_new)/(max_threshold_old-min_threshold_old) - // //find index less than score - // } - - function renderBuckets(){ - var filter_rain = window.slides.slide?.filter_rain - - buckets.forEach(bucket => { - bucket.data = weatherdata - .filter(d => bucket[0].val <= d.score && d.score <= bucket[1].val) - .filter(d => !filter_rain || !d.is_filter) - - bucket.nPositive = d3.sum(bucket.data, d => d.label) - bucket.percent = bucket.nPositive/bucket.data.length - - if (isNaN(bucket.percent)) bucket.percent = bucket[0].val - }) - - var ece = d3.sum(buckets, d => d.data.length*Math.abs(d.val - d.percent)) - ece = ece/d3.sum(buckets, d => d.data.length) - - eceText.text('Expected Calibration Error: ') - eceVal.text(d3.format('.3f')(ece)) - - var rScale = d3.scaleSqrt().domain([0, 50]).range([0, 20]) - - bucketSel - .st({opacity: d => d.data.length}) - .filter(d => d.data.length) - .translate(d => [c.x(d.val), c.y(d.percent)]) - - circleSel - .at({r: d => rScale(d.data.length)}) - - pathSel.at({d: d => 'M 0 0 V ' + (c.y(d.val) - c.y(d.percent))}) - - bucketText - .text(d => `${d.nPositive} / ${d.data.length}`) - .at({x: d => rScale(d.data.length) + 2}) - } - - return {renderBuckets, c, buckets, calibrationDataFn: () => console.log('test')} -} - -if (window.init) window.init() diff --git a/spaces/mfrashad/CharacterGAN/netdissect/actviz.py b/spaces/mfrashad/CharacterGAN/netdissect/actviz.py deleted file mode 100644 index 060ea13d589544ce936ac7c7bc20cd35194d0ae9..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/CharacterGAN/netdissect/actviz.py +++ /dev/null @@ -1,187 +0,0 @@ -import os -import numpy -from scipy.interpolate import RectBivariateSpline - -def activation_visualization(image, data, level, alpha=0.5, source_shape=None, - crop=False, zoom=None, border=2, negate=False, return_mask=False, - **kwargs): - """ - Makes a visualiztion image of activation data overlaid on the image. - Params: - image The original image. - data The single channel feature map. - alpha The darkening to apply in inactive regions of the image. - level The threshold of activation levels to highlight. - """ - if len(image.shape) == 2: - # Puff up grayscale image to RGB. - image = image[:,:,None] * numpy.array([[[1, 1, 1]]]) - surface = activation_surface(data, target_shape=image.shape[:2], - source_shape=source_shape, **kwargs) - if negate: - surface = -surface - level = -level - if crop: - # crop to source_shape - if source_shape is not None: - ch, cw = ((t - s) // 2 for s, t in zip( - source_shape, image.shape[:2])) - image = image[ch:ch+source_shape[0], cw:cw+source_shape[1]] - surface = surface[ch:ch+source_shape[0], cw:cw+source_shape[1]] - if crop is True: - crop = surface.shape - elif not hasattr(crop, '__len__'): - crop = (crop, crop) - if zoom is not None: - source_rect = best_sub_rect(surface >= level, crop, zoom, - pad=border) - else: - source_rect = (0, surface.shape[0], 0, surface.shape[1]) - image = zoom_image(image, source_rect, crop) - surface = zoom_image(surface, source_rect, crop) - mask = (surface >= level) - # Add a yellow border at the edge of the mask for contrast - result = (mask[:, :, None] * (1 - alpha) + alpha) * image - if border: - edge = mask_border(mask)[:,:,None] - result = numpy.maximum(edge * numpy.array([[[200, 200, 0]]]), result) - if not return_mask: - return result - mask_image = (1 - mask[:, :, None]) * numpy.array( - [[[0, 0, 0, 255 * (1 - alpha)]]], dtype=numpy.uint8) - if border: - mask_image = numpy.maximum(edge * numpy.array([[[200, 200, 0, 255]]]), - mask_image) - return result, mask_image - -def activation_surface(data, target_shape=None, source_shape=None, - scale_offset=None, deg=1, pad=True): - """ - Generates an upsampled activation sample. - Params: - target_shape Shape of the output array. - source_shape The centered shape of the output to match with data - when upscaling. Defaults to the whole target_shape. - scale_offset The amount by which to scale, then offset data - dimensions to end up with target dimensions. A pair of pairs. - deg Degree of interpolation to apply (1 = linear, etc). - pad True to zero-pad the edge instead of doing a funny edge interp. - """ - # Default is that nothing is resized. - if target_shape is None: - target_shape = data.shape - # Make a default scale_offset to fill the image if there isn't one - if scale_offset is None: - scale = tuple(float(ts) / ds - for ts, ds in zip(target_shape, data.shape)) - offset = tuple(0.5 * s - 0.5 for s in scale) - else: - scale, offset = (v for v in zip(*scale_offset)) - # Now we adjust offsets to take into account cropping and so on - if source_shape is not None: - offset = tuple(o + (ts - ss) / 2.0 - for o, ss, ts in zip(offset, source_shape, target_shape)) - # Pad the edge with zeros for sensible edge behavior - if pad: - zeropad = numpy.zeros( - (data.shape[0] + 2, data.shape[1] + 2), dtype=data.dtype) - zeropad[1:-1, 1:-1] = data - data = zeropad - offset = tuple((o - s) for o, s in zip(offset, scale)) - # Upsample linearly - ty, tx = (numpy.arange(ts) for ts in target_shape) - sy, sx = (numpy.arange(ss) * s + o - for ss, s, o in zip(data.shape, scale, offset)) - levels = RectBivariateSpline( - sy, sx, data, kx=deg, ky=deg)(ty, tx, grid=True) - # Return the mask. - return levels - -def mask_border(mask, border=2): - """Given a mask computes a border mask""" - from scipy import ndimage - struct = ndimage.generate_binary_structure(2, 2) - erosion = numpy.ones((mask.shape[0] + 10, mask.shape[1] + 10), dtype='int') - erosion[5:5+mask.shape[0], 5:5+mask.shape[1]] = ~mask - for _ in range(border): - erosion = ndimage.binary_erosion(erosion, struct) - return ~mask ^ erosion[5:5+mask.shape[0], 5:5+mask.shape[1]] - -def bounding_rect(mask, pad=0): - """Returns (r, b, l, r) boundaries so that all nonzero pixels in mask - have locations (i, j) with t <= i < b, and l <= j < r.""" - nz = mask.nonzero() - if len(nz[0]) == 0: - # print('no pixels') - return (0, mask.shape[0], 0, mask.shape[1]) - (t, b), (l, r) = [(max(0, p.min() - pad), min(s, p.max() + 1 + pad)) - for p, s in zip(nz, mask.shape)] - return (t, b, l, r) - -def best_sub_rect(mask, shape, max_zoom=None, pad=2): - """Finds the smallest subrectangle containing all the nonzeros of mask, - matching the aspect ratio of shape, and where the zoom-up ratio is no - more than max_zoom""" - t, b, l, r = bounding_rect(mask, pad=pad) - height = max(b - t, int(round(float(shape[0]) * (r - l) / shape[1]))) - if max_zoom is not None: - height = int(max(round(float(shape[0]) / max_zoom), height)) - width = int(round(float(shape[1]) * height / shape[0])) - nt = min(mask.shape[0] - height, max(0, (b + t - height) // 2)) - nb = nt + height - nl = min(mask.shape[1] - width, max(0, (r + l - width) // 2)) - nr = nl + width - return (nt, nb, nl, nr) - -def zoom_image(img, source_rect, target_shape=None): - """Zooms pixels from the source_rect of img to target_shape.""" - import warnings - from scipy.ndimage import zoom - if target_shape is None: - target_shape = img.shape - st, sb, sl, sr = source_rect - source = img[st:sb, sl:sr] - if source.shape == target_shape: - return source - zoom_tuple = tuple(float(t) / s - for t, s in zip(target_shape, source.shape[:2]) - ) + (1,) * (img.ndim - 2) - with warnings.catch_warnings(): - warnings.simplefilter('ignore', UserWarning) # "output shape of zoom" - target = zoom(source, zoom_tuple) - assert target.shape[:2] == target_shape, (target.shape, target_shape) - return target - -def scale_offset(dilations): - if len(dilations) == 0: - return (1, 0) - scale, offset = scale_offset(dilations[1:]) - kernel, stride, padding = dilations[0] - scale *= stride - offset *= stride - offset += (kernel - 1) / 2.0 - padding - return scale, offset - -def choose_level(feature_map, percentile=0.8): - ''' - Chooses the top 80% level (or whatever the level chosen). - ''' - data_range = numpy.sort(feature_map.flatten()) - return numpy.interp( - percentile, numpy.linspace(0, 1, len(data_range)), data_range) - -def dilations(modulelist): - result = [] - for module in modulelist: - settings = tuple(getattr(module, n, d) - for n, d in (('kernel_size', 1), ('stride', 1), ('padding', 0))) - settings = (((s, s) if not isinstance(s, tuple) else s) - for s in settings) - if settings != ((1, 1), (1, 1), (0, 0)): - result.append(zip(*settings)) - return zip(*result) - -def grid_scale_offset(modulelist): - '''Returns (yscale, yoffset), (xscale, xoffset) given a list of modules''' - return tuple(scale_offset(d) for d in dilations(modulelist)) - diff --git a/spaces/michelecafagna26/High-Level-Dataset-explorer/README.md b/spaces/michelecafagna26/High-Level-Dataset-explorer/README.md deleted file mode 100644 index d8ff8c0dffbda3e5a1655cbb1f39869a466fc197..0000000000000000000000000000000000000000 --- a/spaces/michelecafagna26/High-Level-Dataset-explorer/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: High Level Dataset Explorer -emoji: 💩 -colorFrom: pink -colorTo: yellow -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mike-ravkine/can-ai-code-compare/Dockerfile b/spaces/mike-ravkine/can-ai-code-compare/Dockerfile deleted file mode 100644 index fc7904c40a709fe893dff3e9f64783e131e0dc3b..0000000000000000000000000000000000000000 --- a/spaces/mike-ravkine/can-ai-code-compare/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM python:3.9 - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -RUN git clone https://github.com/the-crypt-keeper/can-ai-code.git /code/can_ai_code - -WORKDIR /code/can_ai_code - -CMD ["streamlit", "run", "compare-app.py", "--server.address", "0.0.0.0", "--server.port", "7860"] diff --git a/spaces/mshukor/UnIVAL/fairseq/scripts/test_fsdp.sh b/spaces/mshukor/UnIVAL/fairseq/scripts/test_fsdp.sh deleted file mode 100644 index 1f428a035e4474427ded991f8e8307ea59f61f69..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/scripts/test_fsdp.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -rm -rf fsdp_dummy -mkdir -p fsdp_dummy -CUDA_VISIBLE_DEVICES=0,1,2,3 fairseq-train /private/home/sshleifer/data-bin/stories_mmap \ - --ddp-backend fully_sharded --fp16 --fp16-init-scale 4 \ - --cpu-offload --checkpoint-activations \ - --task language_modeling --tokens-per-sample 256 --batch-size 8 \ - --arch transformer_lm_gpt2_tiny \ - --optimizer cpu_adam --adam-betas "(0.9,0.98)" \ - --lr 0.0001 --lr-scheduler polynomial_decay --warmup-updates 5 --total-num-update 10 \ - --max-update 5 --log-format json --log-interval 1 \ - --save-interval-updates 5 --save-dir fsdp_dummy --disable-validation \ - --restore-file x.pt "$@" - -# Now we try to load the checkpoint -CUDA_VISIBLE_DEVICES=0,1 fairseq-train /private/home/sshleifer/data-bin/stories_mmap \ - --ddp-backend fully_sharded --fp16 --fp16-init-scale 4 \ - --cpu-offload --checkpoint-activations \ - --task language_modeling --tokens-per-sample 256 --batch-size 8 \ - --arch transformer_lm_gpt2_tiny \ - --optimizer cpu_adam --adam-betas "(0.9,0.98)" \ - --lr 0.0001 --lr-scheduler polynomial_decay --warmup-updates 5 --total-num-update 10 \ - --max-update 2 --log-format json --log-interval 1 \ - --save-interval-updates 2 --save-dir fsdp_dummy diff --git a/spaces/msmilauer/AutoGPT-duplicated2/benchmark/__init__.py b/spaces/msmilauer/AutoGPT-duplicated2/benchmark/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/mygyasir/Real-Time-Voice-Cloning/vocoder_train.py b/spaces/mygyasir/Real-Time-Voice-Cloning/vocoder_train.py deleted file mode 100644 index d712ffa3e6c92a091aa18dc90f0027f46940e400..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/Real-Time-Voice-Cloning/vocoder_train.py +++ /dev/null @@ -1,56 +0,0 @@ -from utils.argutils import print_args -from vocoder.train import train -from pathlib import Path -import argparse - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Trains the vocoder from the synthesizer audios and the GTA synthesized mels, " - "or ground truth mels.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - parser.add_argument("run_id", type=str, help= \ - "Name for this model instance. If a model state from the same run ID was previously " - "saved, the training will restart from there. Pass -f to overwrite saved states and " - "restart from scratch.") - parser.add_argument("datasets_root", type=str, help= \ - "Path to the directory containing your SV2TTS directory. Specifying --syn_dir or --voc_dir " - "will take priority over this argument.") - parser.add_argument("--syn_dir", type=str, default=argparse.SUPPRESS, help= \ - "Path to the synthesizer directory that contains the ground truth mel spectrograms, " - "the wavs and the embeds. Defaults to /SV2TTS/synthesizer/.") - parser.add_argument("--voc_dir", type=str, default=argparse.SUPPRESS, help= \ - "Path to the vocoder directory that contains the GTA synthesized mel spectrograms. " - "Defaults to /SV2TTS/vocoder/. Unused if --ground_truth is passed.") - parser.add_argument("-m", "--models_dir", type=str, default="vocoder/saved_models/", help=\ - "Path to the directory that will contain the saved model weights, as well as backups " - "of those weights and wavs generated during training.") - parser.add_argument("-g", "--ground_truth", action="store_true", help= \ - "Train on ground truth spectrograms (/SV2TTS/synthesizer/mels).") - parser.add_argument("-s", "--save_every", type=int, default=1000, help= \ - "Number of steps between updates of the model on the disk. Set to 0 to never save the " - "model.") - parser.add_argument("-b", "--backup_every", type=int, default=25000, help= \ - "Number of steps between backups of the model. Set to 0 to never make backups of the " - "model.") - parser.add_argument("-f", "--force_restart", action="store_true", help= \ - "Do not load any saved model and restart from scratch.") - args = parser.parse_args() - - # Process the arguments - if not hasattr(args, "syn_dir"): - args.syn_dir = Path(args.datasets_root, "SV2TTS", "synthesizer") - args.syn_dir = Path(args.syn_dir) - if not hasattr(args, "voc_dir"): - args.voc_dir = Path(args.datasets_root, "SV2TTS", "vocoder") - args.voc_dir = Path(args.voc_dir) - del args.datasets_root - args.models_dir = Path(args.models_dir) - args.models_dir.mkdir(exist_ok=True) - - # Run the training - print_args(args, parser) - train(**vars(args)) - \ No newline at end of file diff --git a/spaces/nakamura196/yolov5-ndl-layout/ultralytics/yolov5/utils/aws/resume.py b/spaces/nakamura196/yolov5-ndl-layout/ultralytics/yolov5/utils/aws/resume.py deleted file mode 100644 index b21731c979a121ab8227280351b70d6062efd983..0000000000000000000000000000000000000000 --- a/spaces/nakamura196/yolov5-ndl-layout/ultralytics/yolov5/utils/aws/resume.py +++ /dev/null @@ -1,40 +0,0 @@ -# Resume all interrupted trainings in yolov5/ dir including DDP trainings -# Usage: $ python utils/aws/resume.py - -import os -import sys -from pathlib import Path - -import torch -import yaml - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[2] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -port = 0 # --master_port -path = Path('').resolve() -for last in path.rglob('*/**/last.pt'): - ckpt = torch.load(last) - if ckpt['optimizer'] is None: - continue - - # Load opt.yaml - with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: - opt = yaml.safe_load(f) - - # Get device count - d = opt['device'].split(',') # devices - nd = len(d) # number of devices - ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel - - if ddp: # multi-GPU - port += 1 - cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' - else: # single-GPU - cmd = f'python train.py --resume {last}' - - cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread - print(cmd) - os.system(cmd) diff --git a/spaces/nateraw/yolov6/README.md b/spaces/nateraw/yolov6/README.md deleted file mode 100644 index 2d3bd2e2dac886bc0c6024e0b1c33cdcf3795ac1..0000000000000000000000000000000000000000 --- a/spaces/nateraw/yolov6/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Yolov6 -emoji: 🔥😎🔥 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.1.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Brorsoft Mxf Converter Keygen [TOP] Software.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Brorsoft Mxf Converter Keygen [TOP] Software.md deleted file mode 100644 index ce0851ae26a2d6c376d2d175c6d4ae221e3194e5..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Brorsoft Mxf Converter Keygen [TOP] Software.md +++ /dev/null @@ -1,85 +0,0 @@ - -

      Brorsoft MXF Converter Keygen Software: A Review

      -

      If you are looking for a way to convert your MXF videos to other popular formats, you may have come across Brorsoft MXF Converter, a powerful and professional video converter that can handle MXF files from various sources. But what if you don't want to pay for the full version of this software? Is there a way to get it for free? In this article, we will review Brorsoft MXF Converter keygen software, a tool that claims to generate a serial number for activating Brorsoft MXF Converter. We will also discuss the pros and cons of using this tool, and some alternatives that you can consider.

      -

      What is Brorsoft MXF Converter?

      -

      Brorsoft MXF Converter is a program for converting MXF videos. It is handy for users having cameras such as Panasonic P2HD, Canon XF, Sony XDCAM HD, etc. that shoots videos in MXF format. You can use this software to convert your MXF videos to various formats, such as MP4, MOV, AVI, WMV, MKV, FLV, etc. You can also edit your videos before converting them, such as trimming, cropping, adding effects, subtitles, watermarks, etc. You can also adjust the video parameters, such as resolution, bitrate, frame rate, etc. to suit your needs.

      -

      brorsoft mxf converter keygen software


      Download File ✑ ✑ ✑ https://urlcod.com/2uIcfj



      -

      Features and benefits of Brorsoft MXF Converter

      -

      Some of the features and benefits of Brorsoft MXF Converter are:

      -
        -
      • It supports converting MXF files from different sources, such as Panasonic P2 camcorders (AG-HPX255/HPX250/HPX370/HPX500 etc), Canon XF series cameras (XF100/XF105/XF300/XF305 etc), Sony XDCAM series cameras (PMW-EX1/EX3/EX1R/EX3R/F3 etc), and other devices that record MXF videos.
      • -
      • It supports converting MXF files to various output formats, such as MP4, MOV, AVI, WMV, MKV, FLV, etc. You can also choose from different presets for devices like iPhone, iPad, Android phones and tablets, game consoles like PS4 and Xbox One, smart TVs like Samsung TV and LG TV, etc.
      • -
      • It supports editing your MXF videos before converting them. You can trim the unwanted parts of your videos, crop the black bars or unwanted areas of your videos, add effects like brightness, contrast, saturation, etc. to enhance your videos, add subtitles or watermarks to personalize your videos, etc.
      • -
      • It supports adjusting the video parameters of your output files. You can change the resolution, bitrate, frame rate, aspect ratio, audio codec, audio bitrate, audio channels, etc. of your output files to optimize them for your devices or platforms.
      • -
      • It supports batch conversion and fast conversion speed. You can convert multiple MXF files at once with this software. It also uses advanced technology to accelerate the conversion process and save your time.
      • -
      -

      Supported input and output formats of Brorsoft MXF Converter

      -

      The following table shows the supported input and output formats of Brorsoft MXF Converter:

      - - - - - - - - - - - - - - - - - - - - - -
      Input FormatsOutput Formats
      MXF, P2 MXF, XAVC MXF, etc.MP4, MOV, AVI, WMV, MKV, FLV, etc.
      iPhone, iPad, Android, PS4, Xbox One, Samsung TV, LG TV, etc.
      ProRes, DNxHD, AIC, etc.
      MP3, WAV, WMA, AAC, AC3, etc.
      -

      What is a keygen software?

      -

      A keygen software is a tool that generates a serial number or a license key for activating a software program. It is usually used by people who want to use a software program without paying for it. A keygen software is often distributed by hackers or crackers who crack the security of the software program and create a keygen software for it. A keygen software is usually downloaded from the internet or shared through peer-to-peer networks.

      -

      How does a keygen software work?

      -

      A keygen software works by mimicking the algorithm or the logic that the software program uses to generate and verify a serial number or a license key. A keygen software tries to produce a valid serial number or a license key that can pass the verification process of the software program and unlock its full features. A keygen software may also modify the registry or the files of the software program to bypass the verification process.

      -

      Risks and drawbacks of using a keygen software

      -

      Some of the risks and drawbacks of using a keygen software are:

      -
        -
      • It is illegal and unethical to use a keygen software. It violates the intellectual property rights of the software developer and may result in legal consequences or penalties. It also deprives the software developer of their deserved revenue and discourages them from creating more quality products.
      • -
      • It is unsafe and risky to use a keygen software. It may contain viruses, malware, spyware, or other harmful programs that can damage your computer or steal your personal information. It may also expose your computer to hackers or cybercriminals who can access your files or data. It may also cause instability or compatibility issues with your system or other programs.
      • -
      • It is unreliable and ineffective to use a keygen software. It may not work properly or at all with the software program. It may fail to generate a valid serial number or a license key or it may generate an invalid or expired one. It may also be detected and blocked by the software program or its updates. It may also cause errors or bugs in the software program or its functions.
      • -
      -

      How to use Brorsoft MXF Converter keygen software?

      -

      If you still want to use Brorsoft MXF Converter keygen software despite the risks and drawbacks mentioned above, you can follow these steps:

      -

      Download and install Brorsoft MXF Converter

      -

      You can download Brorsoft MXF Converter from its official website or from other sources on the internet. You can then install it on your computer by following the instructions on the screen. You can choose to install it as a trial version or as a full version with a fake serial number.

      -

      -

      Download and run Brorsoft MXF Converter keygen software

      -

      You can download Brorsoft MXF Converter keygen software from various websites or forums that offer it. You should be careful and cautious when downloading it as it may contain viruses or malware that can harm your computer. You should also scan it with an antivirus program before running it. You can then run it on your computer by double-clicking on it or by right-clicking on it and choosing "Run as administrator".

      -

      Generate and enter the serial number

      -

      You can generate a serial number for Brorsoft MXF Converter by clicking on the "Generate" button on the keygen software. You should copy the serial number and paste it into the registration window of Brorsoft MXF Converter. You should also enter your name and email address in the corresponding fields. You should then click on the "Register" button to activate Brorsoft MXF Converter.

      -

      Convert your MXF videos with Brorsoft MXF Converter

      -

      You can now use Brorsoft MXF Converter to convert your MXF videos to other formats. You can add your MXF videos to the program by clicking on the "Add" button or by dragging and dropping them into the main interface. You can then choose the output format that you want from the "Format" drop-down menu. You can also click on the "Settings" button to adjust the video parameters of your output files. You can also edit your MXF videos by clicking on the "Edit" button and using the built-in video editor. You can then click on the "Convert" button to start the conversion process. You can check the progress and status of the conversion on the main interface. You can also choose to shut down your computer or open the output folder after the conversion is done.

      -

      Alternatives to Brorsoft MXF Converter keygen software

      -

      If you are not comfortable or satisfied with using Brorsoft MXF Converter keygen software, you can consider some alternatives that are more legal, safe, and reliable. Here are some of them:

      -

      Buy the official license of Brorsoft MXF Converter

      -

      The best and most recommended alternative to Brorsoft MXF Converter keygen software is to buy the official license of Brorsoft MXF Converter from its official website. You can choose from different plans and prices that suit your budget and needs. You can also enjoy free updates, technical support, and customer service from the developer. You can also avoid any legal, security, or performance issues that may arise from using a keygen software.

      -

      Use other free or paid MXF converters

      -

      Another alternative to Brorsoft MXF Converter keygen software is to use other free or paid MXF converters that are available on the internet. You can search for them online and compare their features, compatibility, quality, speed, and reviews. Some examples of free MXF converters are VLC Media Player, HandBrake, FFmpeg, etc. Some examples of paid MXF converters are Wondershare UniConverter, Aiseesoft MXF Converter, Pavtube MXF Converter, etc.

      -

      Conclusion

      -

      In this article, we have reviewed Brorsoft MXF Converter keygen software, a tool that claims to generate a serial number for activating Brorsoft MXF Converter. We have also discussed the pros and cons of using this tool, and some alternatives that you can consider. We hope that this article has helped you understand more about Brorsoft MXF Converter keygen software and make an informed decision.

      -

      FAQs

      -

      Here are some frequently asked questions about Brorsoft MXF Converter keygen software:

      -
        -
      • What is MXF?
        -MXF stands for Material eXchange Format, a file format for storing and exchanging video, audio, and metadata. It is commonly used by professional camcorders and cameras that record high-quality videos.
      • -
      • Why do I need to convert MXF files?
        -You may need to convert MXF files because they are not compatible with most media players, devices, or platforms. You may also want to convert them to reduce their file size, improve their quality, or edit them.
      • -
      • Is Brorsoft MXF Converter keygen software safe to use?
        -No, Brorsoft MXF Converter keygen software is not safe to use. It may contain viruses or malware that can harm your computer or steal your personal information. It may also expose your computer to hackers or cybercriminals who can access your files or data. It may also cause instability or compatibility issues with your system or other programs.
      • -
      • Is Brorsoft MXF Converter keygen software legal to use?
        -No, Brorsoft MXF Converter keygen software is not legal to use. It violates the intellectual property rights of the software developer and may result in legal consequences or penalties. It also deprives the software developer of their deserved revenue and discourages them from creating more quality products.
      • -
      • Where can I get Brorsoft MXF Converter keygen software?
        -You can get Brorsoft MXF Converter keygen software from various websites or forums that offer it. However, we do not recommend you to do so as it is illegal, unsafe, and unreliable to use.
      • -

      b2dd77e56b
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail.md deleted file mode 100644 index 525390772db7dc53a2597f43e4967a91c34520e7..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail.md +++ /dev/null @@ -1,28 +0,0 @@ - -

      Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail: A Powerful Set of User Interface Components for Windows Developers

      -

      Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail is a collection of 11 components that provide Windows developers with a full range of highly customizable user interface elements, such as docking panes, ribbons, toolbars, menus, calendars, charts, grids, and more. These components can help you create professional applications that have a modern and attractive appearance, similar to Visual Studio, Office, and Explorer.

      -

      Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail


      Download File 🗸 https://urlcod.com/2uI9xU



      -

      Some of the features of Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail are:

      -
        -
      • Calendar: A sophisticated Outlook-style calendar and date selection component that supports multiple views, appointments, reminders, and holidays.
      • -
      • Chart: A versatile chart component that can create any charting style with only a few lines of code, such as bar, line, pie, area, bubble, and candlestick charts.
      • -
      • Command Bars: A comprehensive component that includes customizable menus, toolbars, and ribbons that can be docked, floated, or hidden.
      • -
      • Controls: A set of ready-to-use components that have been thoroughly designed and tested, such as buttons, edit controls, list boxes, combo boxes, tree controls, and more.
      • -
      • Docking Pane: A component that allows you to create tear-off tabs and auto-hide windows that can be docked to any side of the application workspace.
      • -
      • Property Grid: A component that provides an easy way to create a hierarchical list of editable properties and represent any data type or sub-item.
      • -
      • Grid Control: A component that includes an easy-to-create Office-style report that can group and sort data in a flat or hierarchical format.
      • -
      • Shortcut Bar: A component that has several options to choose from, such as expandable navigation bar and list, client pane, and style themes.
      • -
      • Skin Framework: A component that incorporates an application skinning framework technology that can be used with Windows themes.
      • -
      • Syntax Edit: A component that provides users with a highly sophisticated text editor control that supports syntax colorization and features.
      • -
      • Task Panel: A component that is an Office task panel similar to what is seen in Microsoft Office, Explorer, and Visual Studio toolbox.
      • -
      -

      Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail supports a wide range of platforms and environments, such as Visual Studio 2022/2019/2017/2015/2013/2012/2010/2008/2005/2003/2002, Visual Basic 6.0, Microsoft Access, Windows 11/10/8.1/8/7/Vista/XP/Me/2000/98 SE/98/95, Windows Server 2022/2019/2016/2012 R2/2012/2008 R2/2008/2003 R2.[^1^] [^2^]

      -

      If you are looking for a robust set of components that can enhance your Windows applications with a full set of highly customizable user interface elements, you should consider Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail as a reliable and affordable solution.

      -

      - -

      Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail is easy to use and can be integrated into your project with minimal code. You can customize the appearance and behavior of the components using various properties, methods, and events. You can also use the built-in designers and editors to visually design your user interface at design time or runtime.

      -

      Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail comes with a comprehensive documentation and sample code that demonstrate how to use the components in various scenarios. You can also access the online support forum and ticket system to get help from the Codejock team and other users.

      -

      Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail is a cost-effective solution that offers a lot of value for your money. You can purchase a single developer license for $699 or a four developer license for $2097. You can also get a 30-day trial version for free to evaluate the product before buying it.

      -

      If you want to create professional applications that incorporate a full set of highly customizable user interface components, you should give Codejock Xtreme Suite Pro ActiveX 15.3.1 Retail a try. You will be impressed by the quality and performance of the components and the ease of use and flexibility of the product.

      cec2833e83
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Scanxl Professional 350 License Keyrar.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Scanxl Professional 350 License Keyrar.md deleted file mode 100644 index c3177e7e6d8269e4db7043796f3469628ea11171..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Scanxl Professional 350 License Keyrar.md +++ /dev/null @@ -1,113 +0,0 @@ -
      -

      Scanxl Professional 350 License Keyrar: A Comprehensive Review

      -

      If you own a car, you know how important it is to keep it in good condition and avoid any problems that may affect its performance, safety, or fuel efficiency. However, sometimes you may encounter some issues that are not easy to diagnose or fix by yourself. That's where Scanxl Professional 350 comes in handy.

      -

      Scanxl Professional 350 is a software that allows you to connect your computer to your car's onboard diagnostic system (OBD-II) using an ELM or J2534 interface. With this software, you can access various data from your car's sensors, perform diagnostic tests, clear trouble codes, and more. You can also view, graph, record, and playback data streams in real-time or save them for later analysis.

      -

      Scanxl Professional 350 License Keyrar


      Download File »»» https://urlcod.com/2uIcA7



      -

      In this article, we will review Scanxl Professional 350 in detail and show you its features, benefits, installation process, usage tips, pros and cons, and FAQs. By the end of this article, you will have a clear idea of what this software can do for you and whether it is worth buying or not.

      -

      Features and Benefits of Scanxl Professional 350

      -

      Scanxl Professional 350 is a powerful and versatile software that offers many features and benefits for car owners, enthusiasts, mechanics, and technicians. Here are some of them:

      -
        -
      • Supports all ELM compatible and J2534 compliant OBD-II interfaces: You can use Scanxl Professional 350 with any interface that supports ELM or J2534 protocols. This means you can use it with most generic or branded interfaces available on the market. You can also use it with wireless interfaces such as Bluetooth or WiFi.
      • -
      • Provides access to manufacturer-specific and enhanced data: Unlike some basic OBD-II software that only show generic data, Scanxl Professional 350 can access manufacturer-specific and enhanced data that are unique to each vehicle make and model. This means you can get more accurate and detailed information about your car's condition and performance.
      • -
      • Allows you to view, graph, record, and playback data streams: Scanxl Professional 350 lets you monitor various data items from your car's sensors in real-time. You can view them in numerical, graphical, or dashboard formats. You can also record and save the data streams for later analysis or comparison. You can playback the recorded data and see how they change over time or under different driving conditions.
      • -
      • Supports custom sensors, dashboards, and virtual gauges: Scanxl Professional 350 allows you to create your own custom sensors, dashboards, and virtual gauges using the built-in editor. You can define the parameters, units, formulas, ranges, colors, and styles of your custom items. You can also import or export them to share with other users.
      • -
      • Includes built-in generic and enhanced DTC databases: Scanxl Professional 350 includes comprehensive databases of generic and enhanced diagnostic trouble codes (DTCs) that cover most vehicle makes and models. You can easily scan your car for any DTCs and see their definitions, causes, symptoms, and possible solutions. You can also clear the DTCs and reset the check engine light if needed.
      • -
      • Enables you to perform bi-directional controls and tests: Scanxl Professional 350 enables you to perform bi-directional controls and tests on your car's systems and components. This means you can send commands or requests to your car's ECU and see how it responds. For example, you can test the fuel injectors, oxygen sensors, idle speed, throttle position, etc.
      • -
      -

      How to Install and Activate Scanxl Professional 350

      -

      Installing and activating Scanxl Professional 350 is easy and straightforward. Here are the steps you need to follow:

      -
        -
      1. Download the software from the official website or a trusted source: You can download Scanxl Professional 350 from the official website of Palmer Performance Engineering, the developer of the software. Alternatively, you can download it from a trusted source that provides a valid license key or a crack or patch to activate the software.
      2. -
      3. Extract the RAR file using a password if required: After downloading the software, you will get a RAR file that contains the setup file and other files. You may need to use a password to extract the RAR file. The password may be provided by the source where you downloaded the software from.
      4. -
      5. Run the setup file and follow the instructions: Once you extract the RAR file, you will see a setup file named ScanXL_Setup.exe. Run this file and follow the instructions on the screen to install the software on your computer. You may need to agree to the terms and conditions and choose a destination folder for the installation.
      6. -
      7. Enter the license key when prompted or use a crack or patch if available: During the installation process, you may be asked to enter a license key to activate the software. If you have a valid license key, enter it in the box and click Next. If you do not have a license key, you may need to use a crack or patch that is provided by the source where you downloaded the software from. A crack or patch is a file that modifies or replaces some files in the software folder to bypass the activation process. Follow the instructions on how to use the crack or patch if available.
      8. -
      9. Restart your computer and connect your interface to your vehicle: After completing the installation and activation process, restart your computer to make sure the software works properly. Then, connect your ELM or J2534 interface to your computer using a USB cable or a wireless connection. Connect the other end of your interface to your vehicle's OBD-II port, which is usually located under the dashboard near the steering wheel.
      10. -
      -

      How to Use Scanxl Professional 350

      -

      Using Scanxl Professional 350 is simple and intuitive. Here are some basic steps you need to follow:

      -
        -
      1. Launch the software and select your interface and protocol: After connecting your interface to your computer and vehicle, launch Scanxl Professional 350 from your desktop or start menu. The software will automatically detect your interface type and protocol. If not, you can manually select them from the drop-down menus on the top left corner of the main window.
      2. -
      3. Choose the vehicle make, model, year, and engine type: After selecting your interface and protocol, you need to choose the vehicle make, model, year, and engine type from the drop-down menus on the top right corner of the main window. This will help the software to access the correct data from your vehicle's ECU.
      4. -
      5. Select the data items you want to monitor or scan: After choosing your vehicle information, you need to select the data items you want to monitor or scan from the list on the left side of the main window. You can choose from generic or enhanced data items, or custom sensors if you have created any. You can also use the search box to find a specific data item by name or description.
      6. -
      7. View the data in real-time or save it for later analysis: After selecting the data items, you can view them in real-time on the right side of the main window. You can switch between different views such as numerical, graphical, or dashboard. You can also adjust the settings such as units, scales, colors, and styles of each view. You can also record and save the data streams for later analysis or comparison by clicking on the record button on the bottom left corner of the main window.
      8. -
      9. Perform diagnostic tests and clear trouble codes if needed: After viewing the data, you can perform diagnostic tests and clear trouble codes if needed by clicking on the scan button on the bottom right corner of the main window. This will open a new window where you can see all the DTCs that are stored in your vehicle's ECU. You can see their definitions, causes, symptoms, and possible solutions by clicking on each code. You can also clear the DTCs and reset the check engine light by clicking on the clear button.
      10. -
      -

      Pros and Cons of Scanxl Professional 350

      -

      Scanxl Professional 350 is a great software that can help you diagnose and monitor your vehicle's performance. However, like any software, it has its pros and cons. Here are some of them:

      -

      - - - - - - - - - -
      ProsCons
        -
      • Easy to use: Scanxl Professional 350 has a user-friendly interface that makes it easy to navigate and operate. You can easily select your interface, vehicle, data items, views, and tests with a few clicks.
      • -
      • Powerful: Scanxl Professional 350 is a powerful software that can access various data from your vehicle's sensors, perform diagnostic tests, clear trouble codes, and more. You can also view, graph, record, and playback data streams in real-time or save them for later analysis.
      • -
      • Versatile: Scanxl Professional 350 is a versatile software that supports all ELM compatible and J2534 compliant OBD-II interfaces. You can use it with most generic or branded interfaces available on the market. You can also use it with wireless interfaces such as Bluetooth or WiFi.
      • -
      • Compatible: Scanxl Professional 350 is a compatible software that provides access to manufacturer-specific and enhanced data that are unique to each vehicle make and model. This means you can get more accurate and detailed information about your car's condition and performance.
      • -
      • Affordable: Scanxl Professional 350 is an affordable software that costs only $169.95 for a single license. You can also get a free trial version that lasts for 30 days. Compared to other similar software that may cost hundreds or thousands of dollars, Scanxl Professional 350 is a great value for money.
      • -
        -
      • Requires an interface: Scanxl Professional 350 requires an ELM or J2534 interface to connect your computer to your vehicle's OBD-II port. This means you need to buy an interface separately if you do not have one already. The price of an interface may vary depending on its quality and features.
      • -
      • May not work with some vehicles: Scanxl Professional 350 may not work with some vehicles that have different or proprietary OBD-II protocols or systems. This means you may not be able to access some data or functions from your vehicle's ECU. You may need to check the compatibility list of Scanxl Professional 350 before buying it.
      • -
      • May need updates: Scanxl Professional 350 may need updates to keep up with the latest changes and improvements in OBD-II technology and standards. This means you may need to download and install new versions of the software periodically to ensure its functionality and accuracy.
      • -
      -

      Conclusion and Recommendations

      -

      In conclusion, Scanxl Professional 350 is a software that allows you to diagnose and monitor your vehicle's performance using an ELM or J2534 interface. It has many features and benefits such as supporting all ELM compatible and J2534 compliant OBD-II interfaces, providing access to manufacturer-specific and enhanced data, allowing you to view, graph, record, and playback data streams, supporting custom sensors, dashboards, and virtual gauges, including built-in generic and enhanced DTC databases, and enabling you to perform bi-directional controls and tests.

      -

      However, Scanxl Professional 350 also has some drawbacks such as requiring an interface, may not work with some vehicles, and may need updates. Therefore, you need to weigh the pros and cons of this software before buying it.

      -

      In our opinion, Scanxl Professional 350 is a worthwhile investment for anyone who wants to diagnose and monitor their vehicle's performance. It is easy to use, powerful, versatile, compatible, and affordable. It can help you save time, money, and hassle by avoiding unnecessary trips to the mechanic or the dealer. It can also help you improve your driving skills, safety, and fuel efficiency by giving you valuable insights into your car's condition and performance.

      -

      Here are some tips and advice for using Scanxl Professional 350 effectively:

      -
        -
      • Make sure you have a compatible ELM or J2534 interface that works with your vehicle's OBD-II port. You can check the compatibility list of Scanxl Professional 350 on the official website or contact the support team if you have any doubts.
      • -
      • Make sure you have a reliable and secure source to download the software from. You can use the official website of Palmer Performance Engineering or a trusted source that provides a valid license key or a crack or patch to activate the software.
      • -
      • Make sure you update the software regularly to ensure its functionality and accuracy. You can check for updates on the official website or use the built-in updater in the software.
      • -
      • Make sure you read the user manual and watch the tutorial videos on the official website or on YouTube to learn how to use the software properly. You can also use the help menu in the software or contact the support team if you have any questions or issues.
      • -
      • Make sure you follow the safety precautions and guidelines when using the software. Do not use the software while driving or in a dangerous situation. Do not rely on the software for critical decisions or actions. Do not modify or tamper with your vehicle's ECU or systems without proper knowledge or authorization.
      • -
      -

      FAQs

      -

      Here are some frequently asked questions about Scanxl Professional 350:

      -
        -
      1. What are the system requirements for Scanxl Professional 350?
      2. -

        The system requirements for Scanxl Professional 350 are as follows:

        -
          -
        • Windows XP/Vista/7/8/10 (32-bit or 64-bit)
        • -
        • Pentium III 1 GHz or higher processor
        • -
        • 256 MB of RAM or higher
        • -
        • 50 MB of free hard disk space
        • -
        • An ELM compatible or J2534 compliant OBD-II interface
        • -
        • A USB port or a wireless connection for the interface
        • -
        -
      3. What are the differences between Scanxl Standard and Scanxl Professional?
      4. -

        The differences between Scanxl Standard and Scanxl Professional are as follows:

        -
          -
        • Scanxl Standard is a basic OBD-II software that only supports ELM compatible interfaces and generic data items. It does not support J2534 interfaces, manufacturer-specific and enhanced data items, custom sensors, dashboards, virtual gauges, bi-directional controls and tests, etc.
        • -
        • Scanxl Professional is an advanced OBD-II software that supports both ELM compatible and J2534 compliant interfaces and generic, manufacturer-specific, and enhanced data items. It also supports custom sensors, dashboards, virtual gauges, bi-directional controls and tests, etc.
        • -
        • Scanxl Standard costs $89.95 for a single license while Scanxl Professional costs $169.95 for a single license.
        • -
        -
      5. How can I update Scanxl Professional 350 to the latest version?
      6. -

        You can update Scanxl Professional 350 to the latest version by using one of these methods:

        -
          -
        • You can use the built-in updater in the software by clicking on the update button on the top right corner of the main window. This will check for any available updates online and download them automatically.
        • -
        • You can visit the official website of Palmer Performance Engineering and download the latest version of the software and install it on your computer. You may need to enter your license key again to activate the software.
        • -
        • You can contact the support team of Palmer Performance Engineering and ask them to send you the latest version of the software via email or other means. You may need to provide your license key or proof of purchase to verify your identity.
        • -
        -
      7. How can I contact the support team if I have any issues or questions?
      8. -

        You can contact the support team of Palmer Performance Engineering if you have any issues or questions about Scanxl Professional 350 by using one of these methods:

        -
          -
        • You can use the online form on the official website of Palmer Performance Engineering and submit your query or feedback. You will need to provide your name, email address, subject, and message.
        • -
        • You can send an email to support@palmerperformance.com and describe your issue or question in detail. You may need to attach some screenshots or files to illustrate your problem.
        • -
        • You can call the toll-free number 1-888-628-6274 and speak to a customer service representative. You may need to provide your license key or proof of purchase to verify your identity.
        • -
        -
      9. Where can I find more information and resources on Scanxl Professional 350?
      10. -

        You can find more information and resources on Scanxl Professional 350 by using one of these methods:

        -
          -
        • You can visit the official website of Palmer Performance Engineering and browse through the product page, features, screenshots, videos, testimonials, FAQs, etc.
        • -
        • You can visit the online forum of Palmer Performance Engineering and join the community of Scanxl Professional 350 users. You can ask questions, share tips, exchange ideas, etc.
        • -
        • You can visit the online store of Palmer Performance Engineering and buy additional products, accessories, or services related to Scanxl Professional 350. You can also check the latest offers, discounts, and deals.
        • -
        -

        I hope this article has helped you understand Scanxl Professional 350 better and decide whether it is suitable for you or not. If you have any comments or suggestions, please feel free to leave them below. Thank you for reading!

        b2dd77e56b
        -
        -
        \ No newline at end of file diff --git a/spaces/ngxson/poet-cat/frontend/styles/chat.css b/spaces/ngxson/poet-cat/frontend/styles/chat.css deleted file mode 100644 index 0056c23be0c81a96c34639f1ade6e1b0a187e89d..0000000000000000000000000000000000000000 --- a/spaces/ngxson/poet-cat/frontend/styles/chat.css +++ /dev/null @@ -1,163 +0,0 @@ -body { - background: #fff; - margin-top: 10px; -} - -.chat-box { - height: 100%; - width: 100%; - background-color: #fff; - overflow: hidden; -} - -.chats { - padding: 30px 15px -} - -.chat-avatar { - float: right -} - -.chat-avatar .avatar { - width: 30px; - -webkit-box-shadow: 0 2px 2px 0 rgba(0, 0, 0, 0.2), 0 6px 10px 0 rgba(0, 0, 0, 0.3); - box-shadow: 0 2px 2px 0 rgba(0, 0, 0, 0.2), 0 6px 10px 0 rgba(0, 0, 0, 0.3); -} - -.chat-body { - display: block; - margin: 10px 30px 0 0; - overflow: hidden -} - -.chat.right .chat-body { - margin: 10px 0 0 0; -} - -.chat-body:first-child { - margin-top: 0 -} - -.chat-content { - position: relative; - display: block; - float: right; - padding: 8px 15px; - margin: 0 20px 10px 0; - clear: both; - color: #2c2323; - background-color: #e7d9d9; - border-radius: 4px; - -webkit-box-shadow: 0 1px 4px 0 rgba(0, 0, 0, 0.37); - box-shadow: 0 1px 4px 0 rgba(0, 0, 0, 0.37); -} - -.chat-content:before { - position: absolute; - top: 10px; - right: -10px; - width: 0; - height: 0; - content: ''; - border: 5px solid transparent; - border-left-color: #dfe9ef -} - -.chat-content>p:last-child { - margin-bottom: 0 -} - -.chat-content+.chat-content:before { - border-color: transparent -} - -.chat-time { - display: block; - margin-top: 8px; - color: rgba(255, 255, 255, .6) -} - -.chat-left .chat-avatar { - float: left -} - -.chat-left .chat-body { - margin-right: 0; - margin-left: 30px -} - -.chat-left .chat-content { - float: left; - margin: 0 0 10px 20px; - color: #fff; - background-color: #7a3016 -} - -.chat-left .chat-content:before { - right: auto; - left: -10px; - border-right-color: #7a3016; - border-left-color: transparent -} - -.chat-left .chat-content+.chat-content:before { - border-color: transparent -} - -.chat-left .chat-time { - color: #a3afb7 -} - -.panel-footer { - padding: 0 30px 15px; - background-color: transparent; - border-top: 1px solid transparent; - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} - -.avatar img { - width: 100%; - max-width: 100%; - height: auto; - border: 0 none; - border-radius: 1000px; -} - -.chat-avatar .avatar { - width: 30px; -} - -.avatar { - position: relative; - display: inline-block; - width: 40px; - white-space: nowrap; - border-radius: 1000px; - vertical-align: bottom; -} - - - - - -.chats { - height: calc(70vh - 3em); - overflow-y: auto; -} - -.error { - margin: 0 0; - color: #a40d0d; -} - -.typing-dots { - margin: 0 0 0 50px; - opacity: 0.5; -} - -.guide { - padding-top: 15vh; - text-align: center; - font-size: 1.2em; -} diff --git "a/spaces/nickmuchi/Earnings-Call-Analysis-Whisperer/pages/2_Earnings_Summarization_\360\237\223\226_.py" "b/spaces/nickmuchi/Earnings-Call-Analysis-Whisperer/pages/2_Earnings_Summarization_\360\237\223\226_.py" deleted file mode 100644 index 6dfda5fb87fc5a834a71d3865099bbe4efd6568c..0000000000000000000000000000000000000000 --- "a/spaces/nickmuchi/Earnings-Call-Analysis-Whisperer/pages/2_Earnings_Summarization_\360\237\223\226_.py" +++ /dev/null @@ -1,51 +0,0 @@ -import streamlit as st -from functions import * - -# st.set_page_config(page_title="Earnings Summarization", page_icon="📖") -st.sidebar.header("Earnings Summarization") -st.markdown("## Earnings Summarization with Flan-T5-Base-SamSun") - -max_len= st.slider("Maximum length of the summarized text",min_value=70,max_value=200,step=10,value=100) -min_len= st.slider("Minimum length of the summarized text",min_value=20,max_value=200,step=10) - -st.markdown("####") - -st.subheader("Summarized Earnings Call with matched Entities") - -if "earnings_passages" not in st.session_state: - st.session_state["earnings_passages"] = '' - -if st.session_state['earnings_passages']: - - with st.spinner("Summarizing and matching entities, this takes a few seconds..."): - - try: - text_to_summarize = chunk_and_preprocess_text(st.session_state['earnings_passages']) - print(text_to_summarize) - summarized_text = summarize_text(text_to_summarize,max_len=max_len,min_len=min_len) - - - except IndexError: - try: - - text_to_summarize = chunk_and_preprocess_text(st.session_state['earnings_passages']) - summarized_text = summarize_text(text_to_summarize,max_len=max_len,min_len=min_len) - - - except IndexError: - - text_to_summarize = chunk_and_preprocess_text(st.session_state['earnings_passages']) - summarized_text = summarize_text(text_to_summarize,max_len=max_len,min_len=min_len) - - entity_match_html = highlight_entities(text_to_summarize,summarized_text) - st.markdown("####") - - with st.expander(label='Summarized Earnings Call',expanded=True): - st.write(entity_match_html, unsafe_allow_html=True) - - st.markdown("####") - - summary_downloader(summarized_text) - -else: - st.write("No text to summarize detected, please ensure you have entered the YouTube URL on the Sentiment Analysis page") \ No newline at end of file diff --git a/spaces/nightfury/Stable_Diffusion_2/style.css b/spaces/nightfury/Stable_Diffusion_2/style.css deleted file mode 100644 index 57ac874613ad432d3129fa1757249a319a601f3e..0000000000000000000000000000000000000000 --- a/spaces/nightfury/Stable_Diffusion_2/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/configs/common/data/constants.py b/spaces/nikitaPDL2023/assignment4/detectron2/configs/common/data/constants.py deleted file mode 100644 index be11cb5ac7c32a260af96ed27c32ed767b2f2bcd..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/configs/common/data/constants.py +++ /dev/null @@ -1,9 +0,0 @@ -constants = dict( - imagenet_rgb256_mean=[123.675, 116.28, 103.53], - imagenet_rgb256_std=[58.395, 57.12, 57.375], - imagenet_bgr256_mean=[103.530, 116.280, 123.675], - # When using pre-trained models in Detectron1 or any MSRA models, - # std has been absorbed into its conv1 weights, so the std needs to be set 1. - # Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) - imagenet_bgr256_std=[1.0, 1.0, 1.0], -) diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/tests/layers/test_nms_rotated.py b/spaces/nikitaPDL2023/assignment4/detectron2/tests/layers/test_nms_rotated.py deleted file mode 100644 index 4b45384892ab2a7cb20871cf19374f1bd08907ce..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/tests/layers/test_nms_rotated.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from __future__ import absolute_import, division, print_function, unicode_literals -import numpy as np -import unittest -from copy import deepcopy -import torch -from torchvision import ops - -from detectron2.layers import batched_nms, batched_nms_rotated, nms_rotated -from detectron2.utils.testing import random_boxes - - -def nms_edit_distance(keep1, keep2): - """ - Compare the "keep" result of two nms call. - They are allowed to be different in terms of edit distance - due to floating point precision issues, e.g., - if a box happen to have an IoU of 0.5 with another box, - one implentation may choose to keep it while another may discard it. - """ - keep1, keep2 = keep1.cpu(), keep2.cpu() - if torch.equal(keep1, keep2): - # they should be equal most of the time - return 0 - keep1, keep2 = tuple(keep1), tuple(keep2) - m, n = len(keep1), len(keep2) - - # edit distance with DP - f = [np.arange(n + 1), np.arange(n + 1)] - for i in range(m): - cur_row = i % 2 - other_row = (i + 1) % 2 - f[other_row][0] = i + 1 - for j in range(n): - f[other_row][j + 1] = ( - f[cur_row][j] - if keep1[i] == keep2[j] - else min(min(f[cur_row][j], f[cur_row][j + 1]), f[other_row][j]) + 1 - ) - return f[m % 2][n] - - -class TestNMSRotated(unittest.TestCase): - def reference_horizontal_nms(self, boxes, scores, iou_threshold): - """ - Args: - box_scores (N, 5): boxes in corner-form and probabilities. - (Note here 5 == 4 + 1, i.e., 4-dim horizontal box + 1-dim prob) - iou_threshold: intersection over union threshold. - Returns: - picked: a list of indexes of the kept boxes - """ - picked = [] - _, indexes = scores.sort(descending=True) - while len(indexes) > 0: - current = indexes[0] - picked.append(current.item()) - if len(indexes) == 1: - break - current_box = boxes[current, :] - indexes = indexes[1:] - rest_boxes = boxes[indexes, :] - iou = ops.box_iou(rest_boxes, current_box.unsqueeze(0)).squeeze(1) - indexes = indexes[iou <= iou_threshold] - - return torch.as_tensor(picked) - - def _create_tensors(self, N, device="cpu"): - boxes = random_boxes(N, 200, device=device) - scores = torch.rand(N, device=device) - return boxes, scores - - def test_batched_nms_rotated_0_degree_cpu(self, device="cpu"): - N = 2000 - num_classes = 50 - boxes, scores = self._create_tensors(N, device=device) - idxs = torch.randint(0, num_classes, (N,)) - rotated_boxes = torch.zeros(N, 5, device=device) - rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 - rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 - rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] - rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] - err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}" - for iou in [0.2, 0.5, 0.8]: - backup = boxes.clone() - keep_ref = batched_nms(boxes, scores, idxs, iou) - assert torch.allclose(boxes, backup), "boxes modified by batched_nms" - backup = rotated_boxes.clone() - keep = batched_nms_rotated(rotated_boxes, scores, idxs, iou) - assert torch.allclose( - rotated_boxes, backup - ), "rotated_boxes modified by batched_nms_rotated" - # Occasionally the gap can be large if there are many IOU on the threshold boundary - self.assertLessEqual(nms_edit_distance(keep, keep_ref), 5, err_msg.format(iou)) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_batched_nms_rotated_0_degree_cuda(self): - self.test_batched_nms_rotated_0_degree_cpu(device="cuda") - - def test_nms_rotated_0_degree_cpu(self, device="cpu"): - N = 1000 - boxes, scores = self._create_tensors(N, device=device) - rotated_boxes = torch.zeros(N, 5, device=device) - rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 - rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 - rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] - rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] - err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" - for iou in [0.2, 0.5, 0.8]: - keep_ref = self.reference_horizontal_nms(boxes, scores, iou) - keep = nms_rotated(rotated_boxes, scores, iou) - self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou)) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_nms_rotated_0_degree_cuda(self): - self.test_nms_rotated_0_degree_cpu(device="cuda") - - def test_nms_rotated_90_degrees_cpu(self): - N = 1000 - boxes, scores = self._create_tensors(N) - rotated_boxes = torch.zeros(N, 5) - rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 - rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 - # Note for rotated_boxes[:, 2] and rotated_boxes[:, 3]: - # widths and heights are intentionally swapped here for 90 degrees case - # so that the reference horizontal nms could be used - rotated_boxes[:, 2] = boxes[:, 3] - boxes[:, 1] - rotated_boxes[:, 3] = boxes[:, 2] - boxes[:, 0] - - rotated_boxes[:, 4] = torch.ones(N) * 90 - err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" - for iou in [0.2, 0.5, 0.8]: - keep_ref = self.reference_horizontal_nms(boxes, scores, iou) - keep = nms_rotated(rotated_boxes, scores, iou) - self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou)) - - def test_nms_rotated_180_degrees_cpu(self): - N = 1000 - boxes, scores = self._create_tensors(N) - rotated_boxes = torch.zeros(N, 5) - rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 - rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 - rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] - rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] - rotated_boxes[:, 4] = torch.ones(N) * 180 - err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" - for iou in [0.2, 0.5, 0.8]: - keep_ref = self.reference_horizontal_nms(boxes, scores, iou) - keep = nms_rotated(rotated_boxes, scores, iou) - self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou)) - - -class TestScriptable(unittest.TestCase): - def setUp(self): - class TestingModule(torch.nn.Module): - def forward(self, boxes, scores, threshold): - return nms_rotated(boxes, scores, threshold) - - self.module = TestingModule() - - def test_scriptable_cpu(self): - m = deepcopy(self.module).cpu() - _ = torch.jit.script(m) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_scriptable_cuda(self): - m = deepcopy(self.module).cuda() - _ = torch.jit.script(m) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/nmenezes0/fast-ai-example/app.py b/spaces/nmenezes0/fast-ai-example/app.py deleted file mode 100644 index 0f218989901e66451b94538ce9691560c6cd62ad..0000000000000000000000000000000000000000 --- a/spaces/nmenezes0/fast-ai-example/app.py +++ /dev/null @@ -1,28 +0,0 @@ -import gradio as gr -from fastai.vision.all import PILImage, load_learner - - -def predict(img): - learn = load_learner("export.pkl") - labels = learn.dls.vocab - img = PILImage.create(img) - _, _, probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} - - -description = "An example model following the Fast AI course - bactrian or dromedary" -article = "

        Fast AI course

        " - - -iface = gr.Interface( - fn=predict, - inputs=gr.inputs.Image(shape=(512, 512)), - outputs=gr.outputs.Label(num_top_classes=2), - title="Camels classifier", - description=description, - article=article, - examples=["bactrian.jpg", "dromedary.jpg"], - # interpretation="default", - allow_flagging="never", -).launch(enable_queue=True) -iface.launch() diff --git a/spaces/nmitchko/AI-in-Healthcare/Developer Meetup in Boston Generative AI Use Cases in Healthcare _files/en_005.js b/spaces/nmitchko/AI-in-Healthcare/Developer Meetup in Boston Generative AI Use Cases in Healthcare _files/en_005.js deleted file mode 100644 index 4b682363530f4d44ce1401e69dbb557b8d8282e6..0000000000000000000000000000000000000000 --- a/spaces/nmitchko/AI-in-Healthcare/Developer Meetup in Boston Generative AI Use Cases in Healthcare _files/en_005.js +++ /dev/null @@ -1,25 +0,0 @@ -CKEDITOR.plugins.setLang('youtube', 'en', { - button : 'Embed YouTube Video', - title : 'Embed YouTube Video', - txtEmbed : 'Paste Embed Code Here', - txtUrl : 'Paste YouTube Video URL', - txtWidth : 'Width', - txtHeight : 'Height', - chkRelated : 'Show suggested videos at the video\'s end', - txtStartAt : 'Start at (ss or mm:ss or hh:mm:ss)', - chkPrivacy : 'Enable privacy-enhanced mode', - chkOlderCode : 'Use old embed code', - chkAutoplay: 'Autoplay', - chkControls: 'Show player controls', - noCode : 'You must input an embed code or URL', - invalidEmbed : 'The embed code you\'ve entered doesn\'t appear to be valid', - invalidUrl : 'The URL you\'ve entered doesn\'t appear to be valid', - or : 'or', - noWidth : 'You must inform the width', - invalidWidth : 'Inform a valid width', - noHeight : 'You must inform the height', - invalidHeight : 'Inform a valid height', - invalidTime : 'Inform a valid start time', - txtResponsive : 'Make Responsive (ignore width and height, fit to width)', - txtNoEmbed : 'Video image and link only' -}); diff --git a/spaces/odettecantswim/rvc-mlbb/infer_pack/models_onnx.py b/spaces/odettecantswim/rvc-mlbb/infer_pack/models_onnx.py deleted file mode 100644 index 3cdae2f7f8591a1e43b1d8520baa37b7e9744d72..0000000000000000000000000000000000000000 --- a/spaces/odettecantswim/rvc-mlbb/infer_pack/models_onnx.py +++ /dev/null @@ -1,849 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, pitch, nsff0, sid, rnd, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds.unsqueeze(0)).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/ofikodar/chatgpt-resume-builder/src/exceptions.py b/spaces/ofikodar/chatgpt-resume-builder/src/exceptions.py deleted file mode 100644 index 59b97b4bee15c2bd38365d2af9018c1f344be1db..0000000000000000000000000000000000000000 --- a/spaces/ofikodar/chatgpt-resume-builder/src/exceptions.py +++ /dev/null @@ -1,13 +0,0 @@ -class PDFSizeException(Exception): - """Raised when the input value is less than 3""" - pass - - -class ChatbotInitException(Exception): - """Raised when there's a problem with chabot init""" - pass - - -class ChatbotAPIException(Exception): - """Raised when there's a problem with openai api""" - pass diff --git a/spaces/olanigan/YoutubeAssistant/README.md b/spaces/olanigan/YoutubeAssistant/README.md deleted file mode 100644 index ee69684cc5ca0c82f0b555100e621a47714214fa..0000000000000000000000000000000000000000 --- a/spaces/olanigan/YoutubeAssistant/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: YoutubeAssistant -emoji: 🐨 -colorFrom: blue -colorTo: indigo -sdk: streamlit -sdk_version: 1.27.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/p-baleine/metaanalyser/metaanalyser/__init__.py b/spaces/p-baleine/metaanalyser/metaanalyser/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/p-baleine/metaanalyser/metaanalyser/chains/section/prompt.py b/spaces/p-baleine/metaanalyser/metaanalyser/chains/section/prompt.py deleted file mode 100644 index 67cc4ff2bf9ef0744db80f69b6d9110ba44a3db4..0000000000000000000000000000000000000000 --- a/spaces/p-baleine/metaanalyser/metaanalyser/chains/section/prompt.py +++ /dev/null @@ -1,33 +0,0 @@ -from langchain.prompts import ( - ChatPromptTemplate, - SystemMessagePromptTemplate, - HumanMessagePromptTemplate, -) - - -system_template = """You are a research scientist and intereseted in {categories}. You are working on writing a systematic review regarding \"{query}\". - -The outline of the systematic review is as follows: - ------ -Title: {title} -{outline}""" -system_prompt = SystemMessagePromptTemplate.from_template(system_template) - -human_template = """Write the "{section_title}" section of this systematic review based on the following list of snippets or abstracts of relative papers. - ------ -{snippets} ------ - -This systematic review should adhere to the following overview: - -{overview} - -Write the "{section_title}: {section_description}" section with respect to this overview. Write the text in markdown format. The title of this section should bu suffixed with {section_level} level markdown title (`{md_title_suffix}`). The text of the section should be based on a snippet or abstact and should be clearly cited. The citation should be written at the end of the sentence in the form `[^]` where `ID` refers to the citation_id.""" -human_prompt = HumanMessagePromptTemplate.from_template(human_template) - -SECTION_PROMPT = ChatPromptTemplate.from_messages([ - system_prompt, - human_prompt, -]) diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/research_projects/lora/README.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/research_projects/lora/README.md deleted file mode 100644 index b5d72403166f9b4017751c3d47f79a9eb3f535d8..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/research_projects/lora/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# Stable Diffusion text-to-image fine-tuning -This extended LoRA training script was authored by [haofanwang](https://github.com/haofanwang). -This is an experimental LoRA extension of [this example](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py). We further support add LoRA layers for text encoder. - -## Training with LoRA - -Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. - -In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - -- Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114). -- Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable. -- LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter. - -[cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. - -With LoRA, it's possible to fine-tune Stable Diffusion on a custom image-caption pair dataset -on consumer GPUs like Tesla T4, Tesla V100. - -### Training - -First, you need to set up your development environment as is explained in the [installation section](#installing-the-dependencies). Make sure to set the `MODEL_NAME` and `DATASET_NAME` environment variables. Here, we will use [Stable Diffusion v1-4](https://hf.co/CompVis/stable-diffusion-v1-4) and the [Pokemons dataset](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions). - -**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** - -**___Note: It is quite useful to monitor the training progress by regularly generating sample images during training. [Weights and Biases](https://docs.wandb.ai/quickstart) is a nice solution to easily see generating images during training. All you need to do is to run `pip install wandb` before training to automatically log images.___** - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export DATASET_NAME="lambdalabs/pokemon-blip-captions" -``` - -For this example we want to directly store the trained LoRA embeddings on the Hub, so -we need to be logged in and add the `--push_to_hub` flag. - -```bash -huggingface-cli login -``` - -Now we can start training! - -```bash -accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --dataset_name=$DATASET_NAME --caption_column="text" \ - --resolution=512 --random_flip \ - --train_batch_size=1 \ - --num_train_epochs=100 --checkpointing_steps=5000 \ - --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ - --seed=42 \ - --output_dir="sd-pokemon-model-lora" \ - --validation_prompt="cute dragon creature" --report_to="wandb" - --use_peft \ - --lora_r=4 --lora_alpha=32 \ - --lora_text_encoder_r=4 --lora_text_encoder_alpha=32 -``` - -The above command will also run inference as fine-tuning progresses and log the results to Weights and Biases. - -**___Note: When using LoRA we can use a much higher learning rate compared to non-LoRA fine-tuning. Here we use *1e-4* instead of the usual *1e-5*. Also, by using LoRA, it's possible to run `train_text_to_image_lora.py` in consumer GPUs like T4 or V100.___** - -The final LoRA embedding weights have been uploaded to [sayakpaul/sd-model-finetuned-lora-t4](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4). **___Note: [The final weights](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4/blob/main/pytorch_lora_weights.bin) are only 3 MB in size, which is orders of magnitudes smaller than the original model.___** - -You can check some inference samples that were logged during the course of the fine-tuning process [here](https://wandb.ai/sayakpaul/text2image-fine-tune/runs/q4lc0xsw). - -### Inference - -Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline` after loading the trained LoRA weights. You -need to pass the `output_dir` for loading the LoRA weights which, in this case, is `sd-pokemon-model-lora`. - -```python -from diffusers import StableDiffusionPipeline -import torch - -model_path = "sayakpaul/sd-model-finetuned-lora-t4" -pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) -pipe.unet.load_attn_procs(model_path) -pipe.to("cuda") - -prompt = "A pokemon with green eyes and red legs." -image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5).images[0] -image.save("pokemon.png") -``` \ No newline at end of file diff --git a/spaces/patrawtf/shopify_csv_qa/app/__init__.py b/spaces/patrawtf/shopify_csv_qa/app/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/wheel/__main__.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/wheel/__main__.py deleted file mode 100644 index 0be74537494dc2cf18c2e3b318ffd22b886aef6b..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/wheel/__main__.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -Wheel command line tool (enable python -m wheel syntax) -""" - -from __future__ import annotations - -import sys - - -def main(): # needed for console script - if __package__ == "": - # To be able to run 'python wheel-0.9.whl/wheel': - import os.path - - path = os.path.dirname(os.path.dirname(__file__)) - sys.path[0:0] = [path] - import wheel.cli - - sys.exit(wheel.cli.main()) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/spaces/pmgautam/english-to-nepali-translation/app.py b/spaces/pmgautam/english-to-nepali-translation/app.py deleted file mode 100644 index af9fddd89a4477545fc38d8857a20af989eb227e..0000000000000000000000000000000000000000 --- a/spaces/pmgautam/english-to-nepali-translation/app.py +++ /dev/null @@ -1,38 +0,0 @@ -# imports -import gradio as gr -import pandas as pd -import torch -from transformers import AutoModelForSeq2SeqLM, AutoTokenizer - -# select GPU if available -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -# setup model and tokenizer -model = AutoModelForSeq2SeqLM.from_pretrained( - "facebook/nllb-200-distilled-600M").to(device) -tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M") - - -def predict(text): - """_summary_ - predict function to do translation task - """ - text = [text] - inputs = tokenizer(text, return_tensors="pt", padding=True).to(device) - - translated_tokens = model.generate( - **inputs, forced_bos_token_id=tokenizer.lang_code_to_id["npi_Deva"], max_length=30 - ) - return tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0] - - -# call gradio interface -examples = ["use this example to see translation in nepali", - "this text is to test english to nepali translation"] -gr.Interface(fn=predict, - inputs=gr.Textbox(label="Input text"), - outputs=gr.Textbox(label="Output text"), - title="English-to-Nepali Translation", - article="Author **Pramesh Gautam**, Follow me on [Twitter](https://twitter.com/pmgautam_)", - css="footer {visibility: hidden}", - examples=examples).launch() diff --git a/spaces/priyankachinni/priyagenai/app.py b/spaces/priyankachinni/priyagenai/app.py deleted file mode 100644 index a362dcc7d0ddd1eee86961f1bc3db6d894fbd3d5..0000000000000000000000000000000000000000 --- a/spaces/priyankachinni/priyagenai/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """You are a helpful assistant to answer all user queries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiohttp/abc.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiohttp/abc.py deleted file mode 100644 index 44a3bda34665a5e3b67fba9acc1e545a37b16617..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiohttp/abc.py +++ /dev/null @@ -1,207 +0,0 @@ -import asyncio -import logging -from abc import ABC, abstractmethod -from collections.abc import Sized -from http.cookies import BaseCookie, Morsel -from typing import ( - TYPE_CHECKING, - Any, - Awaitable, - Callable, - Dict, - Generator, - Iterable, - List, - Optional, - Tuple, -) - -from multidict import CIMultiDict -from yarl import URL - -from .helpers import get_running_loop -from .typedefs import LooseCookies - -if TYPE_CHECKING: # pragma: no cover - from .web_app import Application - from .web_exceptions import HTTPException - from .web_request import BaseRequest, Request - from .web_response import StreamResponse -else: - BaseRequest = Request = Application = StreamResponse = None - HTTPException = None - - -class AbstractRouter(ABC): - def __init__(self) -> None: - self._frozen = False - - def post_init(self, app: Application) -> None: - """Post init stage. - - Not an abstract method for sake of backward compatibility, - but if the router wants to be aware of the application - it can override this. - """ - - @property - def frozen(self) -> bool: - return self._frozen - - def freeze(self) -> None: - """Freeze router.""" - self._frozen = True - - @abstractmethod - async def resolve(self, request: Request) -> "AbstractMatchInfo": - """Return MATCH_INFO for given request""" - - -class AbstractMatchInfo(ABC): - @property # pragma: no branch - @abstractmethod - def handler(self) -> Callable[[Request], Awaitable[StreamResponse]]: - """Execute matched request handler""" - - @property - @abstractmethod - def expect_handler(self) -> Callable[[Request], Awaitable[None]]: - """Expect handler for 100-continue processing""" - - @property # pragma: no branch - @abstractmethod - def http_exception(self) -> Optional[HTTPException]: - """HTTPException instance raised on router's resolving, or None""" - - @abstractmethod # pragma: no branch - def get_info(self) -> Dict[str, Any]: - """Return a dict with additional info useful for introspection""" - - @property # pragma: no branch - @abstractmethod - def apps(self) -> Tuple[Application, ...]: - """Stack of nested applications. - - Top level application is left-most element. - - """ - - @abstractmethod - def add_app(self, app: Application) -> None: - """Add application to the nested apps stack.""" - - @abstractmethod - def freeze(self) -> None: - """Freeze the match info. - - The method is called after route resolution. - - After the call .add_app() is forbidden. - - """ - - -class AbstractView(ABC): - """Abstract class based view.""" - - def __init__(self, request: Request) -> None: - self._request = request - - @property - def request(self) -> Request: - """Request instance.""" - return self._request - - @abstractmethod - def __await__(self) -> Generator[Any, None, StreamResponse]: - """Execute the view handler.""" - - -class AbstractResolver(ABC): - """Abstract DNS resolver.""" - - @abstractmethod - async def resolve(self, host: str, port: int, family: int) -> List[Dict[str, Any]]: - """Return IP address for given hostname""" - - @abstractmethod - async def close(self) -> None: - """Release resolver""" - - -if TYPE_CHECKING: # pragma: no cover - IterableBase = Iterable[Morsel[str]] -else: - IterableBase = Iterable - - -ClearCookiePredicate = Callable[["Morsel[str]"], bool] - - -class AbstractCookieJar(Sized, IterableBase): - """Abstract Cookie Jar.""" - - def __init__(self, *, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: - self._loop = get_running_loop(loop) - - @abstractmethod - def clear(self, predicate: Optional[ClearCookiePredicate] = None) -> None: - """Clear all cookies if no predicate is passed.""" - - @abstractmethod - def clear_domain(self, domain: str) -> None: - """Clear all cookies for domain and all subdomains.""" - - @abstractmethod - def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None: - """Update cookies.""" - - @abstractmethod - def filter_cookies(self, request_url: URL) -> "BaseCookie[str]": - """Return the jar's cookies filtered by their attributes.""" - - -class AbstractStreamWriter(ABC): - """Abstract stream writer.""" - - buffer_size = 0 - output_size = 0 - length: Optional[int] = 0 - - @abstractmethod - async def write(self, chunk: bytes) -> None: - """Write chunk into stream.""" - - @abstractmethod - async def write_eof(self, chunk: bytes = b"") -> None: - """Write last chunk.""" - - @abstractmethod - async def drain(self) -> None: - """Flush the write buffer.""" - - @abstractmethod - def enable_compression(self, encoding: str = "deflate") -> None: - """Enable HTTP body compression""" - - @abstractmethod - def enable_chunking(self) -> None: - """Enable HTTP chunked mode""" - - @abstractmethod - async def write_headers( - self, status_line: str, headers: "CIMultiDict[str]" - ) -> None: - """Write HTTP headers""" - - -class AbstractAccessLogger(ABC): - """Abstract writer to access log.""" - - def __init__(self, logger: logging.Logger, log_format: str) -> None: - self.logger = logger - self.log_format = log_format - - @abstractmethod - def log(self, request: BaseRequest, response: StreamResponse, time: float) -> None: - """Emit log to logger.""" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/attr/_make.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/attr/_make.py deleted file mode 100644 index d72f738eeca66ea96ec836f57720a7f5d6ec5169..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/attr/_make.py +++ /dev/null @@ -1,2987 +0,0 @@ -# SPDX-License-Identifier: MIT - -import copy -import enum -import linecache -import sys -import types -import typing - -from operator import itemgetter - -# We need to import _compat itself in addition to the _compat members to avoid -# having the thread-local in the globals here. -from . import _compat, _config, setters -from ._compat import ( - PY310, - _AnnotationExtractor, - get_generic_base, - set_closure_cell, -) -from .exceptions import ( - DefaultAlreadySetError, - FrozenInstanceError, - NotAnAttrsClassError, - UnannotatedAttributeError, -) - - -# This is used at least twice, so cache it here. -_obj_setattr = object.__setattr__ -_init_converter_pat = "__attr_converter_%s" -_init_factory_pat = "__attr_factory_%s" -_classvar_prefixes = ( - "typing.ClassVar", - "t.ClassVar", - "ClassVar", - "typing_extensions.ClassVar", -) -# we don't use a double-underscore prefix because that triggers -# name mangling when trying to create a slot for the field -# (when slots=True) -_hash_cache_field = "_attrs_cached_hash" - -_empty_metadata_singleton = types.MappingProxyType({}) - -# Unique object for unequivocal getattr() defaults. -_sentinel = object() - -_ng_default_on_setattr = setters.pipe(setters.convert, setters.validate) - - -class _Nothing(enum.Enum): - """ - Sentinel to indicate the lack of a value when ``None`` is ambiguous. - - If extending attrs, you can use ``typing.Literal[NOTHING]`` to show - that a value may be ``NOTHING``. - - .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. - .. versionchanged:: 22.2.0 ``NOTHING`` is now an ``enum.Enum`` variant. - """ - - NOTHING = enum.auto() - - def __repr__(self): - return "NOTHING" - - def __bool__(self): - return False - - -NOTHING = _Nothing.NOTHING -""" -Sentinel to indicate the lack of a value when ``None`` is ambiguous. -""" - - -class _CacheHashWrapper(int): - """ - An integer subclass that pickles / copies as None - - This is used for non-slots classes with ``cache_hash=True``, to avoid - serializing a potentially (even likely) invalid hash value. Since ``None`` - is the default value for uncalculated hashes, whenever this is copied, - the copy's value for the hash should automatically reset. - - See GH #613 for more details. - """ - - def __reduce__(self, _none_constructor=type(None), _args=()): - return _none_constructor, _args - - -def attrib( - default=NOTHING, - validator=None, - repr=True, - cmp=None, - hash=None, - init=True, - metadata=None, - type=None, - converter=None, - factory=None, - kw_only=False, - eq=None, - order=None, - on_setattr=None, - alias=None, -): - """ - Create a new attribute on a class. - - .. warning:: - - Does *not* do anything unless the class is also decorated with - `attr.s` / `attrs.define` / et cetera! - - Please consider using `attrs.field` in new code (``attr.ib`` will *never* - go away, though). - - :param default: A value that is used if an *attrs*-generated ``__init__`` - is used and no value is passed while instantiating or the attribute is - excluded using ``init=False``. - - If the value is an instance of `attrs.Factory`, its callable will be - used to construct a new value (useful for mutable data types like lists - or dicts). - - If a default is not set (or set manually to `attrs.NOTHING`), a value - *must* be supplied when instantiating; otherwise a `TypeError` - will be raised. - - The default can also be set using decorator notation as shown below. - - :type default: Any value - - :param callable factory: Syntactic sugar for - ``default=attr.Factory(factory)``. - - :param validator: `callable` that is called by *attrs*-generated - ``__init__`` methods after the instance has been initialized. They - receive the initialized instance, the :func:`~attrs.Attribute`, and the - passed value. - - The return value is *not* inspected so the validator has to throw an - exception itself. - - If a `list` is passed, its items are treated as validators and must - all pass. - - Validators can be globally disabled and re-enabled using - `attrs.validators.get_disabled` / `attrs.validators.set_disabled`. - - The validator can also be set using decorator notation as shown below. - - :type validator: `callable` or a `list` of `callable`\\ s. - - :param repr: Include this attribute in the generated ``__repr__`` - method. If ``True``, include the attribute; if ``False``, omit it. By - default, the built-in ``repr()`` function is used. To override how the - attribute value is formatted, pass a ``callable`` that takes a single - value and returns a string. Note that the resulting string is used - as-is, i.e. it will be used directly *instead* of calling ``repr()`` - (the default). - :type repr: a `bool` or a `callable` to use a custom function. - - :param eq: If ``True`` (default), include this attribute in the - generated ``__eq__`` and ``__ne__`` methods that check two instances - for equality. To override how the attribute value is compared, - pass a ``callable`` that takes a single value and returns the value - to be compared. - :type eq: a `bool` or a `callable`. - - :param order: If ``True`` (default), include this attributes in the - generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. - To override how the attribute value is ordered, - pass a ``callable`` that takes a single value and returns the value - to be ordered. - :type order: a `bool` or a `callable`. - - :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the - same value. Must not be mixed with *eq* or *order*. - :type cmp: a `bool` or a `callable`. - - :param Optional[bool] hash: Include this attribute in the generated - ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This - is the correct behavior according the Python spec. Setting this value - to anything else than ``None`` is *discouraged*. - :param bool init: Include this attribute in the generated ``__init__`` - method. It is possible to set this to ``False`` and set a default - value. In that case this attributed is unconditionally initialized - with the specified default value or factory. - :param callable converter: `callable` that is called by - *attrs*-generated ``__init__`` methods to convert attribute's value - to the desired format. It is given the passed-in value, and the - returned value will be used as the new value of the attribute. The - value is converted before being passed to the validator, if any. - :param metadata: An arbitrary mapping, to be used by third-party - components. See `extending-metadata`. - - :param type: The type of the attribute. Nowadays, the preferred method to - specify the type is using a variable annotation (see :pep:`526`). - This argument is provided for backward compatibility. - Regardless of the approach used, the type will be stored on - ``Attribute.type``. - - Please note that *attrs* doesn't do anything with this metadata by - itself. You can use it as part of your own code or for - `static type checking `. - :param kw_only: Make this attribute keyword-only in the generated - ``__init__`` (if ``init`` is ``False``, this parameter is ignored). - :param on_setattr: Allows to overwrite the *on_setattr* setting from - `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used. - Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this - attribute -- regardless of the setting in `attr.s`. - :type on_setattr: `callable`, or a list of callables, or `None`, or - `attrs.setters.NO_OP` - :param Optional[str] alias: Override this attribute's parameter name in the - generated ``__init__`` method. If left `None`, default to ``name`` - stripped of leading underscores. See `private-attributes`. - - .. versionadded:: 15.2.0 *convert* - .. versionadded:: 16.3.0 *metadata* - .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. - .. versionchanged:: 17.1.0 - *hash* is ``None`` and therefore mirrors *eq* by default. - .. versionadded:: 17.3.0 *type* - .. deprecated:: 17.4.0 *convert* - .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated - *convert* to achieve consistency with other noun-based arguments. - .. versionadded:: 18.1.0 - ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. - .. versionadded:: 18.2.0 *kw_only* - .. versionchanged:: 19.2.0 *convert* keyword argument removed. - .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. - .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. - .. versionadded:: 19.2.0 *eq* and *order* - .. versionadded:: 20.1.0 *on_setattr* - .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 - .. versionchanged:: 21.1.0 - *eq*, *order*, and *cmp* also accept a custom callable - .. versionchanged:: 21.1.0 *cmp* undeprecated - .. versionadded:: 22.2.0 *alias* - """ - eq, eq_key, order, order_key = _determine_attrib_eq_order( - cmp, eq, order, True - ) - - if hash is not None and hash is not True and hash is not False: - raise TypeError( - "Invalid value for hash. Must be True, False, or None." - ) - - if factory is not None: - if default is not NOTHING: - raise ValueError( - "The `default` and `factory` arguments are mutually " - "exclusive." - ) - if not callable(factory): - raise ValueError("The `factory` argument must be a callable.") - default = Factory(factory) - - if metadata is None: - metadata = {} - - # Apply syntactic sugar by auto-wrapping. - if isinstance(on_setattr, (list, tuple)): - on_setattr = setters.pipe(*on_setattr) - - if validator and isinstance(validator, (list, tuple)): - validator = and_(*validator) - - if converter and isinstance(converter, (list, tuple)): - converter = pipe(*converter) - - return _CountingAttr( - default=default, - validator=validator, - repr=repr, - cmp=None, - hash=hash, - init=init, - converter=converter, - metadata=metadata, - type=type, - kw_only=kw_only, - eq=eq, - eq_key=eq_key, - order=order, - order_key=order_key, - on_setattr=on_setattr, - alias=alias, - ) - - -def _compile_and_eval(script, globs, locs=None, filename=""): - """ - "Exec" the script with the given global (globs) and local (locs) variables. - """ - bytecode = compile(script, filename, "exec") - eval(bytecode, globs, locs) - - -def _make_method(name, script, filename, globs): - """ - Create the method with the script given and return the method object. - """ - locs = {} - - # In order of debuggers like PDB being able to step through the code, - # we add a fake linecache entry. - count = 1 - base_filename = filename - while True: - linecache_tuple = ( - len(script), - None, - script.splitlines(True), - filename, - ) - old_val = linecache.cache.setdefault(filename, linecache_tuple) - if old_val == linecache_tuple: - break - else: - filename = f"{base_filename[:-1]}-{count}>" - count += 1 - - _compile_and_eval(script, globs, locs, filename) - - return locs[name] - - -def _make_attr_tuple_class(cls_name, attr_names): - """ - Create a tuple subclass to hold `Attribute`s for an `attrs` class. - - The subclass is a bare tuple with properties for names. - - class MyClassAttributes(tuple): - __slots__ = () - x = property(itemgetter(0)) - """ - attr_class_name = f"{cls_name}Attributes" - attr_class_template = [ - f"class {attr_class_name}(tuple):", - " __slots__ = ()", - ] - if attr_names: - for i, attr_name in enumerate(attr_names): - attr_class_template.append( - f" {attr_name} = _attrs_property(_attrs_itemgetter({i}))" - ) - else: - attr_class_template.append(" pass") - globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} - _compile_and_eval("\n".join(attr_class_template), globs) - return globs[attr_class_name] - - -# Tuple class for extracted attributes from a class definition. -# `base_attrs` is a subset of `attrs`. -_Attributes = _make_attr_tuple_class( - "_Attributes", - [ - # all attributes to build dunder methods for - "attrs", - # attributes that have been inherited - "base_attrs", - # map inherited attributes to their originating classes - "base_attrs_map", - ], -) - - -def _is_class_var(annot): - """ - Check whether *annot* is a typing.ClassVar. - - The string comparison hack is used to avoid evaluating all string - annotations which would put attrs-based classes at a performance - disadvantage compared to plain old classes. - """ - annot = str(annot) - - # Annotation can be quoted. - if annot.startswith(("'", '"')) and annot.endswith(("'", '"')): - annot = annot[1:-1] - - return annot.startswith(_classvar_prefixes) - - -def _has_own_attribute(cls, attrib_name): - """ - Check whether *cls* defines *attrib_name* (and doesn't just inherit it). - """ - attr = getattr(cls, attrib_name, _sentinel) - if attr is _sentinel: - return False - - for base_cls in cls.__mro__[1:]: - a = getattr(base_cls, attrib_name, None) - if attr is a: - return False - - return True - - -def _get_annotations(cls): - """ - Get annotations for *cls*. - """ - if _has_own_attribute(cls, "__annotations__"): - return cls.__annotations__ - - return {} - - -def _collect_base_attrs(cls, taken_attr_names): - """ - Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. - """ - base_attrs = [] - base_attr_map = {} # A dictionary of base attrs to their classes. - - # Traverse the MRO and collect attributes. - for base_cls in reversed(cls.__mro__[1:-1]): - for a in getattr(base_cls, "__attrs_attrs__", []): - if a.inherited or a.name in taken_attr_names: - continue - - a = a.evolve(inherited=True) - base_attrs.append(a) - base_attr_map[a.name] = base_cls - - # For each name, only keep the freshest definition i.e. the furthest at the - # back. base_attr_map is fine because it gets overwritten with every new - # instance. - filtered = [] - seen = set() - for a in reversed(base_attrs): - if a.name in seen: - continue - filtered.insert(0, a) - seen.add(a.name) - - return filtered, base_attr_map - - -def _collect_base_attrs_broken(cls, taken_attr_names): - """ - Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. - - N.B. *taken_attr_names* will be mutated. - - Adhere to the old incorrect behavior. - - Notably it collects from the front and considers inherited attributes which - leads to the buggy behavior reported in #428. - """ - base_attrs = [] - base_attr_map = {} # A dictionary of base attrs to their classes. - - # Traverse the MRO and collect attributes. - for base_cls in cls.__mro__[1:-1]: - for a in getattr(base_cls, "__attrs_attrs__", []): - if a.name in taken_attr_names: - continue - - a = a.evolve(inherited=True) - taken_attr_names.add(a.name) - base_attrs.append(a) - base_attr_map[a.name] = base_cls - - return base_attrs, base_attr_map - - -def _transform_attrs( - cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer -): - """ - Transform all `_CountingAttr`s on a class into `Attribute`s. - - If *these* is passed, use that and don't look for them on the class. - - *collect_by_mro* is True, collect them in the correct MRO order, otherwise - use the old -- incorrect -- order. See #428. - - Return an `_Attributes`. - """ - cd = cls.__dict__ - anns = _get_annotations(cls) - - if these is not None: - ca_list = [(name, ca) for name, ca in these.items()] - elif auto_attribs is True: - ca_names = { - name - for name, attr in cd.items() - if isinstance(attr, _CountingAttr) - } - ca_list = [] - annot_names = set() - for attr_name, type in anns.items(): - if _is_class_var(type): - continue - annot_names.add(attr_name) - a = cd.get(attr_name, NOTHING) - - if not isinstance(a, _CountingAttr): - if a is NOTHING: - a = attrib() - else: - a = attrib(default=a) - ca_list.append((attr_name, a)) - - unannotated = ca_names - annot_names - if len(unannotated) > 0: - raise UnannotatedAttributeError( - "The following `attr.ib`s lack a type annotation: " - + ", ".join( - sorted(unannotated, key=lambda n: cd.get(n).counter) - ) - + "." - ) - else: - ca_list = sorted( - ( - (name, attr) - for name, attr in cd.items() - if isinstance(attr, _CountingAttr) - ), - key=lambda e: e[1].counter, - ) - - own_attrs = [ - Attribute.from_counting_attr( - name=attr_name, ca=ca, type=anns.get(attr_name) - ) - for attr_name, ca in ca_list - ] - - if collect_by_mro: - base_attrs, base_attr_map = _collect_base_attrs( - cls, {a.name for a in own_attrs} - ) - else: - base_attrs, base_attr_map = _collect_base_attrs_broken( - cls, {a.name for a in own_attrs} - ) - - if kw_only: - own_attrs = [a.evolve(kw_only=True) for a in own_attrs] - base_attrs = [a.evolve(kw_only=True) for a in base_attrs] - - attrs = base_attrs + own_attrs - - # Mandatory vs non-mandatory attr order only matters when they are part of - # the __init__ signature and when they aren't kw_only (which are moved to - # the end and can be mandatory or non-mandatory in any order, as they will - # be specified as keyword args anyway). Check the order of those attrs: - had_default = False - for a in (a for a in attrs if a.init is not False and a.kw_only is False): - if had_default is True and a.default is NOTHING: - raise ValueError( - "No mandatory attributes allowed after an attribute with a " - f"default value or factory. Attribute in question: {a!r}" - ) - - if had_default is False and a.default is not NOTHING: - had_default = True - - if field_transformer is not None: - attrs = field_transformer(cls, attrs) - - # Resolve default field alias after executing field_transformer. - # This allows field_transformer to differentiate between explicit vs - # default aliases and supply their own defaults. - attrs = [ - a.evolve(alias=_default_init_alias_for(a.name)) if not a.alias else a - for a in attrs - ] - - # Create AttrsClass *after* applying the field_transformer since it may - # add or remove attributes! - attr_names = [a.name for a in attrs] - AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) - - return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map)) - - -def _frozen_setattrs(self, name, value): - """ - Attached to frozen classes as __setattr__. - """ - if isinstance(self, BaseException) and name in ( - "__cause__", - "__context__", - "__traceback__", - ): - BaseException.__setattr__(self, name, value) - return - - raise FrozenInstanceError() - - -def _frozen_delattrs(self, name): - """ - Attached to frozen classes as __delattr__. - """ - raise FrozenInstanceError() - - -class _ClassBuilder: - """ - Iteratively build *one* class. - """ - - __slots__ = ( - "_attr_names", - "_attrs", - "_base_attr_map", - "_base_names", - "_cache_hash", - "_cls", - "_cls_dict", - "_delete_attribs", - "_frozen", - "_has_pre_init", - "_has_post_init", - "_is_exc", - "_on_setattr", - "_slots", - "_weakref_slot", - "_wrote_own_setattr", - "_has_custom_setattr", - ) - - def __init__( - self, - cls, - these, - slots, - frozen, - weakref_slot, - getstate_setstate, - auto_attribs, - kw_only, - cache_hash, - is_exc, - collect_by_mro, - on_setattr, - has_custom_setattr, - field_transformer, - ): - attrs, base_attrs, base_map = _transform_attrs( - cls, - these, - auto_attribs, - kw_only, - collect_by_mro, - field_transformer, - ) - - self._cls = cls - self._cls_dict = dict(cls.__dict__) if slots else {} - self._attrs = attrs - self._base_names = {a.name for a in base_attrs} - self._base_attr_map = base_map - self._attr_names = tuple(a.name for a in attrs) - self._slots = slots - self._frozen = frozen - self._weakref_slot = weakref_slot - self._cache_hash = cache_hash - self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False)) - self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) - self._delete_attribs = not bool(these) - self._is_exc = is_exc - self._on_setattr = on_setattr - - self._has_custom_setattr = has_custom_setattr - self._wrote_own_setattr = False - - self._cls_dict["__attrs_attrs__"] = self._attrs - - if frozen: - self._cls_dict["__setattr__"] = _frozen_setattrs - self._cls_dict["__delattr__"] = _frozen_delattrs - - self._wrote_own_setattr = True - elif on_setattr in ( - _ng_default_on_setattr, - setters.validate, - setters.convert, - ): - has_validator = has_converter = False - for a in attrs: - if a.validator is not None: - has_validator = True - if a.converter is not None: - has_converter = True - - if has_validator and has_converter: - break - if ( - ( - on_setattr == _ng_default_on_setattr - and not (has_validator or has_converter) - ) - or (on_setattr == setters.validate and not has_validator) - or (on_setattr == setters.convert and not has_converter) - ): - # If class-level on_setattr is set to convert + validate, but - # there's no field to convert or validate, pretend like there's - # no on_setattr. - self._on_setattr = None - - if getstate_setstate: - ( - self._cls_dict["__getstate__"], - self._cls_dict["__setstate__"], - ) = self._make_getstate_setstate() - - def __repr__(self): - return f"<_ClassBuilder(cls={self._cls.__name__})>" - - if PY310: - import abc - - def build_class(self): - """ - Finalize class based on the accumulated configuration. - - Builder cannot be used after calling this method. - """ - if self._slots is True: - return self._create_slots_class() - - return self.abc.update_abstractmethods( - self._patch_original_class() - ) - - else: - - def build_class(self): - """ - Finalize class based on the accumulated configuration. - - Builder cannot be used after calling this method. - """ - if self._slots is True: - return self._create_slots_class() - - return self._patch_original_class() - - def _patch_original_class(self): - """ - Apply accumulated methods and return the class. - """ - cls = self._cls - base_names = self._base_names - - # Clean class of attribute definitions (`attr.ib()`s). - if self._delete_attribs: - for name in self._attr_names: - if ( - name not in base_names - and getattr(cls, name, _sentinel) is not _sentinel - ): - try: - delattr(cls, name) - except AttributeError: - # This can happen if a base class defines a class - # variable and we want to set an attribute with the - # same name by using only a type annotation. - pass - - # Attach our dunder methods. - for name, value in self._cls_dict.items(): - setattr(cls, name, value) - - # If we've inherited an attrs __setattr__ and don't write our own, - # reset it to object's. - if not self._wrote_own_setattr and getattr( - cls, "__attrs_own_setattr__", False - ): - cls.__attrs_own_setattr__ = False - - if not self._has_custom_setattr: - cls.__setattr__ = _obj_setattr - - return cls - - def _create_slots_class(self): - """ - Build and return a new class with a `__slots__` attribute. - """ - cd = { - k: v - for k, v in self._cls_dict.items() - if k not in tuple(self._attr_names) + ("__dict__", "__weakref__") - } - - # If our class doesn't have its own implementation of __setattr__ - # (either from the user or by us), check the bases, if one of them has - # an attrs-made __setattr__, that needs to be reset. We don't walk the - # MRO because we only care about our immediate base classes. - # XXX: This can be confused by subclassing a slotted attrs class with - # XXX: a non-attrs class and subclass the resulting class with an attrs - # XXX: class. See `test_slotted_confused` for details. For now that's - # XXX: OK with us. - if not self._wrote_own_setattr: - cd["__attrs_own_setattr__"] = False - - if not self._has_custom_setattr: - for base_cls in self._cls.__bases__: - if base_cls.__dict__.get("__attrs_own_setattr__", False): - cd["__setattr__"] = _obj_setattr - break - - # Traverse the MRO to collect existing slots - # and check for an existing __weakref__. - existing_slots = dict() - weakref_inherited = False - for base_cls in self._cls.__mro__[1:-1]: - if base_cls.__dict__.get("__weakref__", None) is not None: - weakref_inherited = True - existing_slots.update( - { - name: getattr(base_cls, name) - for name in getattr(base_cls, "__slots__", []) - } - ) - - base_names = set(self._base_names) - - names = self._attr_names - if ( - self._weakref_slot - and "__weakref__" not in getattr(self._cls, "__slots__", ()) - and "__weakref__" not in names - and not weakref_inherited - ): - names += ("__weakref__",) - - # We only add the names of attributes that aren't inherited. - # Setting __slots__ to inherited attributes wastes memory. - slot_names = [name for name in names if name not in base_names] - # There are slots for attributes from current class - # that are defined in parent classes. - # As their descriptors may be overridden by a child class, - # we collect them here and update the class dict - reused_slots = { - slot: slot_descriptor - for slot, slot_descriptor in existing_slots.items() - if slot in slot_names - } - slot_names = [name for name in slot_names if name not in reused_slots] - cd.update(reused_slots) - if self._cache_hash: - slot_names.append(_hash_cache_field) - cd["__slots__"] = tuple(slot_names) - - cd["__qualname__"] = self._cls.__qualname__ - - # Create new class based on old class and our methods. - cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) - - # The following is a fix for - # . - # If a method mentions `__class__` or uses the no-arg super(), the - # compiler will bake a reference to the class in the method itself - # as `method.__closure__`. Since we replace the class with a - # clone, we rewrite these references so it keeps working. - for item in cls.__dict__.values(): - if isinstance(item, (classmethod, staticmethod)): - # Class- and staticmethods hide their functions inside. - # These might need to be rewritten as well. - closure_cells = getattr(item.__func__, "__closure__", None) - elif isinstance(item, property): - # Workaround for property `super()` shortcut (PY3-only). - # There is no universal way for other descriptors. - closure_cells = getattr(item.fget, "__closure__", None) - else: - closure_cells = getattr(item, "__closure__", None) - - if not closure_cells: # Catch None or the empty list. - continue - for cell in closure_cells: - try: - match = cell.cell_contents is self._cls - except ValueError: # ValueError: Cell is empty - pass - else: - if match: - set_closure_cell(cell, cls) - - return cls - - def add_repr(self, ns): - self._cls_dict["__repr__"] = self._add_method_dunders( - _make_repr(self._attrs, ns, self._cls) - ) - return self - - def add_str(self): - repr = self._cls_dict.get("__repr__") - if repr is None: - raise ValueError( - "__str__ can only be generated if a __repr__ exists." - ) - - def __str__(self): - return self.__repr__() - - self._cls_dict["__str__"] = self._add_method_dunders(__str__) - return self - - def _make_getstate_setstate(self): - """ - Create custom __setstate__ and __getstate__ methods. - """ - # __weakref__ is not writable. - state_attr_names = tuple( - an for an in self._attr_names if an != "__weakref__" - ) - - def slots_getstate(self): - """ - Automatically created by attrs. - """ - return {name: getattr(self, name) for name in state_attr_names} - - hash_caching_enabled = self._cache_hash - - def slots_setstate(self, state): - """ - Automatically created by attrs. - """ - __bound_setattr = _obj_setattr.__get__(self) - if isinstance(state, tuple): - # Backward compatibility with attrs instances pickled with - # attrs versions before v22.2.0 which stored tuples. - for name, value in zip(state_attr_names, state): - __bound_setattr(name, value) - else: - for name in state_attr_names: - if name in state: - __bound_setattr(name, state[name]) - - # The hash code cache is not included when the object is - # serialized, but it still needs to be initialized to None to - # indicate that the first call to __hash__ should be a cache - # miss. - if hash_caching_enabled: - __bound_setattr(_hash_cache_field, None) - - return slots_getstate, slots_setstate - - def make_unhashable(self): - self._cls_dict["__hash__"] = None - return self - - def add_hash(self): - self._cls_dict["__hash__"] = self._add_method_dunders( - _make_hash( - self._cls, - self._attrs, - frozen=self._frozen, - cache_hash=self._cache_hash, - ) - ) - - return self - - def add_init(self): - self._cls_dict["__init__"] = self._add_method_dunders( - _make_init( - self._cls, - self._attrs, - self._has_pre_init, - self._has_post_init, - self._frozen, - self._slots, - self._cache_hash, - self._base_attr_map, - self._is_exc, - self._on_setattr, - attrs_init=False, - ) - ) - - return self - - def add_match_args(self): - self._cls_dict["__match_args__"] = tuple( - field.name - for field in self._attrs - if field.init and not field.kw_only - ) - - def add_attrs_init(self): - self._cls_dict["__attrs_init__"] = self._add_method_dunders( - _make_init( - self._cls, - self._attrs, - self._has_pre_init, - self._has_post_init, - self._frozen, - self._slots, - self._cache_hash, - self._base_attr_map, - self._is_exc, - self._on_setattr, - attrs_init=True, - ) - ) - - return self - - def add_eq(self): - cd = self._cls_dict - - cd["__eq__"] = self._add_method_dunders( - _make_eq(self._cls, self._attrs) - ) - cd["__ne__"] = self._add_method_dunders(_make_ne()) - - return self - - def add_order(self): - cd = self._cls_dict - - cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( - self._add_method_dunders(meth) - for meth in _make_order(self._cls, self._attrs) - ) - - return self - - def add_setattr(self): - if self._frozen: - return self - - sa_attrs = {} - for a in self._attrs: - on_setattr = a.on_setattr or self._on_setattr - if on_setattr and on_setattr is not setters.NO_OP: - sa_attrs[a.name] = a, on_setattr - - if not sa_attrs: - return self - - if self._has_custom_setattr: - # We need to write a __setattr__ but there already is one! - raise ValueError( - "Can't combine custom __setattr__ with on_setattr hooks." - ) - - # docstring comes from _add_method_dunders - def __setattr__(self, name, val): - try: - a, hook = sa_attrs[name] - except KeyError: - nval = val - else: - nval = hook(self, a, val) - - _obj_setattr(self, name, nval) - - self._cls_dict["__attrs_own_setattr__"] = True - self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) - self._wrote_own_setattr = True - - return self - - def _add_method_dunders(self, method): - """ - Add __module__ and __qualname__ to a *method* if possible. - """ - try: - method.__module__ = self._cls.__module__ - except AttributeError: - pass - - try: - method.__qualname__ = ".".join( - (self._cls.__qualname__, method.__name__) - ) - except AttributeError: - pass - - try: - method.__doc__ = ( - "Method generated by attrs for class " - f"{self._cls.__qualname__}." - ) - except AttributeError: - pass - - return method - - -def _determine_attrs_eq_order(cmp, eq, order, default_eq): - """ - Validate the combination of *cmp*, *eq*, and *order*. Derive the effective - values of eq and order. If *eq* is None, set it to *default_eq*. - """ - if cmp is not None and any((eq is not None, order is not None)): - raise ValueError("Don't mix `cmp` with `eq' and `order`.") - - # cmp takes precedence due to bw-compatibility. - if cmp is not None: - return cmp, cmp - - # If left None, equality is set to the specified default and ordering - # mirrors equality. - if eq is None: - eq = default_eq - - if order is None: - order = eq - - if eq is False and order is True: - raise ValueError("`order` can only be True if `eq` is True too.") - - return eq, order - - -def _determine_attrib_eq_order(cmp, eq, order, default_eq): - """ - Validate the combination of *cmp*, *eq*, and *order*. Derive the effective - values of eq and order. If *eq* is None, set it to *default_eq*. - """ - if cmp is not None and any((eq is not None, order is not None)): - raise ValueError("Don't mix `cmp` with `eq' and `order`.") - - def decide_callable_or_boolean(value): - """ - Decide whether a key function is used. - """ - if callable(value): - value, key = True, value - else: - key = None - return value, key - - # cmp takes precedence due to bw-compatibility. - if cmp is not None: - cmp, cmp_key = decide_callable_or_boolean(cmp) - return cmp, cmp_key, cmp, cmp_key - - # If left None, equality is set to the specified default and ordering - # mirrors equality. - if eq is None: - eq, eq_key = default_eq, None - else: - eq, eq_key = decide_callable_or_boolean(eq) - - if order is None: - order, order_key = eq, eq_key - else: - order, order_key = decide_callable_or_boolean(order) - - if eq is False and order is True: - raise ValueError("`order` can only be True if `eq` is True too.") - - return eq, eq_key, order, order_key - - -def _determine_whether_to_implement( - cls, flag, auto_detect, dunders, default=True -): - """ - Check whether we should implement a set of methods for *cls*. - - *flag* is the argument passed into @attr.s like 'init', *auto_detect* the - same as passed into @attr.s and *dunders* is a tuple of attribute names - whose presence signal that the user has implemented it themselves. - - Return *default* if no reason for either for or against is found. - """ - if flag is True or flag is False: - return flag - - if flag is None and auto_detect is False: - return default - - # Logically, flag is None and auto_detect is True here. - for dunder in dunders: - if _has_own_attribute(cls, dunder): - return False - - return default - - -def attrs( - maybe_cls=None, - these=None, - repr_ns=None, - repr=None, - cmp=None, - hash=None, - init=None, - slots=False, - frozen=False, - weakref_slot=True, - str=False, - auto_attribs=False, - kw_only=False, - cache_hash=False, - auto_exc=False, - eq=None, - order=None, - auto_detect=False, - collect_by_mro=False, - getstate_setstate=None, - on_setattr=None, - field_transformer=None, - match_args=True, - unsafe_hash=None, -): - r""" - A class decorator that adds :term:`dunder methods` according to the - specified attributes using `attr.ib` or the *these* argument. - - Please consider using `attrs.define` / `attrs.frozen` in new code - (``attr.s`` will *never* go away, though). - - :param these: A dictionary of name to `attr.ib` mappings. This is - useful to avoid the definition of your attributes within the class body - because you can't (e.g. if you want to add ``__repr__`` methods to - Django models) or don't want to. - - If *these* is not ``None``, *attrs* will *not* search the class body - for attributes and will *not* remove any attributes from it. - - The order is deduced from the order of the attributes inside *these*. - - :type these: `dict` of `str` to `attr.ib` - - :param str repr_ns: When using nested classes, there's no way in Python 2 - to automatically detect that. Therefore it's possible to set the - namespace explicitly for a more meaningful ``repr`` output. - :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*, - *order*, and *hash* arguments explicitly, assume they are set to - ``True`` **unless any** of the involved methods for one of the - arguments is implemented in the *current* class (i.e. it is *not* - inherited from some base class). - - So for example by implementing ``__eq__`` on a class yourself, - *attrs* will deduce ``eq=False`` and will create *neither* - ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible - ``__ne__`` by default, so it *should* be enough to only implement - ``__eq__`` in most cases). - - .. warning:: - - If you prevent *attrs* from creating the ordering methods for you - (``order=False``, e.g. by implementing ``__le__``), it becomes - *your* responsibility to make sure its ordering is sound. The best - way is to use the `functools.total_ordering` decorator. - - - Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*, - *cmp*, or *hash* overrides whatever *auto_detect* would determine. - - :param bool repr: Create a ``__repr__`` method with a human readable - representation of *attrs* attributes.. - :param bool str: Create a ``__str__`` method that is identical to - ``__repr__``. This is usually not necessary except for - `Exception`\ s. - :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__`` - and ``__ne__`` methods that check two instances for equality. - - They compare the instances as if they were tuples of their *attrs* - attributes if and only if the types of both classes are *identical*! - :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``, - ``__gt__``, and ``__ge__`` methods that behave like *eq* above and - allow instances to be ordered. If ``None`` (default) mirror value of - *eq*. - :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq* - and *order* to the same value. Must not be mixed with *eq* or *order*. - :param Optional[bool] unsafe_hash: If ``None`` (default), the ``__hash__`` - method is generated according how *eq* and *frozen* are set. - - 1. If *both* are True, *attrs* will generate a ``__hash__`` for you. - 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to - None, marking it unhashable (which it is). - 3. If *eq* is False, ``__hash__`` will be left untouched meaning the - ``__hash__`` method of the base class will be used (if base class is - ``object``, this means it will fall back to id-based hashing.). - - Although not recommended, you can decide for yourself and force - *attrs* to create one (e.g. if the class is immutable even though you - didn't freeze it programmatically) by passing ``True`` or not. Both of - these cases are rather special and should be used carefully. - - See our documentation on `hashing`, Python's documentation on - `object.__hash__`, and the `GitHub issue that led to the default \ - behavior `_ for more - details. - :param Optional[bool] hash: Alias for *unsafe_hash*. *unsafe_hash* takes - precedence. - :param bool init: Create a ``__init__`` method that initializes the - *attrs* attributes. Leading underscores are stripped for the argument - name. If a ``__attrs_pre_init__`` method exists on the class, it will - be called before the class is initialized. If a ``__attrs_post_init__`` - method exists on the class, it will be called after the class is fully - initialized. - - If ``init`` is ``False``, an ``__attrs_init__`` method will be - injected instead. This allows you to define a custom ``__init__`` - method that can do pre-init work such as ``super().__init__()``, - and then call ``__attrs_init__()`` and ``__attrs_post_init__()``. - :param bool slots: Create a :term:`slotted class ` that's - more memory-efficient. Slotted classes are generally superior to the - default dict classes, but have some gotchas you should know about, so - we encourage you to read the :term:`glossary entry `. - :param bool frozen: Make instances immutable after initialization. If - someone attempts to modify a frozen instance, - `attrs.exceptions.FrozenInstanceError` is raised. - - .. note:: - - 1. This is achieved by installing a custom ``__setattr__`` method - on your class, so you can't implement your own. - - 2. True immutability is impossible in Python. - - 3. This *does* have a minor a runtime performance `impact - ` when initializing new instances. In other words: - ``__init__`` is slightly slower with ``frozen=True``. - - 4. If a class is frozen, you cannot modify ``self`` in - ``__attrs_post_init__`` or a self-written ``__init__``. You can - circumvent that limitation by using - ``object.__setattr__(self, "attribute_name", value)``. - - 5. Subclasses of a frozen class are frozen too. - - :param bool weakref_slot: Make instances weak-referenceable. This has no - effect unless ``slots`` is also enabled. - :param bool auto_attribs: If ``True``, collect :pep:`526`-annotated - attributes from the class body. - - In this case, you **must** annotate every field. If *attrs* - encounters a field that is set to an `attr.ib` but lacks a type - annotation, an `attr.exceptions.UnannotatedAttributeError` is - raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't - want to set a type. - - If you assign a value to those attributes (e.g. ``x: int = 42``), that - value becomes the default value like if it were passed using - ``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also - works as expected in most cases (see warning below). - - Attributes annotated as `typing.ClassVar`, and attributes that are - neither annotated nor set to an `attr.ib` are **ignored**. - - .. warning:: - For features that use the attribute name to create decorators (e.g. - :ref:`validators `), you still *must* assign `attr.ib` - to them. Otherwise Python will either not find the name or try to - use the default value to call e.g. ``validator`` on it. - - These errors can be quite confusing and probably the most common bug - report on our bug tracker. - - :param bool kw_only: Make all attributes keyword-only - in the generated ``__init__`` (if ``init`` is ``False``, this - parameter is ignored). - :param bool cache_hash: Ensure that the object's hash code is computed - only once and stored on the object. If this is set to ``True``, - hashing must be either explicitly or implicitly enabled for this - class. If the hash code is cached, avoid any reassignments of - fields involved in hash code computation or mutations of the objects - those fields point to after object creation. If such changes occur, - the behavior of the object's hash code is undefined. - :param bool auto_exc: If the class subclasses `BaseException` - (which implicitly includes any subclass of any exception), the - following happens to behave like a well-behaved Python exceptions - class: - - - the values for *eq*, *order*, and *hash* are ignored and the - instances compare and hash by the instance's ids (N.B. *attrs* will - *not* remove existing implementations of ``__hash__`` or the equality - methods. It just won't add own ones.), - - all attributes that are either passed into ``__init__`` or have a - default value are additionally available as a tuple in the ``args`` - attribute, - - the value of *str* is ignored leaving ``__str__`` to base classes. - :param bool collect_by_mro: Setting this to `True` fixes the way *attrs* - collects attributes from base classes. The default behavior is - incorrect in certain cases of multiple inheritance. It should be on by - default but is kept off for backward-compatibility. - - See issue `#428 `_ for - more details. - - :param Optional[bool] getstate_setstate: - .. note:: - This is usually only interesting for slotted classes and you should - probably just set *auto_detect* to `True`. - - If `True`, ``__getstate__`` and - ``__setstate__`` are generated and attached to the class. This is - necessary for slotted classes to be pickleable. If left `None`, it's - `True` by default for slotted classes and ``False`` for dict classes. - - If *auto_detect* is `True`, and *getstate_setstate* is left `None`, - and **either** ``__getstate__`` or ``__setstate__`` is detected directly - on the class (i.e. not inherited), it is set to `False` (this is usually - what you want). - - :param on_setattr: A callable that is run whenever the user attempts to set - an attribute (either by assignment like ``i.x = 42`` or by using - `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments - as validators: the instance, the attribute that is being modified, and - the new value. - - If no exception is raised, the attribute is set to the return value of - the callable. - - If a list of callables is passed, they're automatically wrapped in an - `attrs.setters.pipe`. - :type on_setattr: `callable`, or a list of callables, or `None`, or - `attrs.setters.NO_OP` - - :param Optional[callable] field_transformer: - A function that is called with the original class object and all - fields right before *attrs* finalizes the class. You can use - this, e.g., to automatically add converters or validators to - fields based on their types. See `transform-fields` for more details. - - :param bool match_args: - If `True` (default), set ``__match_args__`` on the class to support - :pep:`634` (Structural Pattern Matching). It is a tuple of all - non-keyword-only ``__init__`` parameter names on Python 3.10 and later. - Ignored on older Python versions. - - .. versionadded:: 16.0.0 *slots* - .. versionadded:: 16.1.0 *frozen* - .. versionadded:: 16.3.0 *str* - .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. - .. versionchanged:: 17.1.0 - *hash* supports ``None`` as value which is also the default now. - .. versionadded:: 17.3.0 *auto_attribs* - .. versionchanged:: 18.1.0 - If *these* is passed, no attributes are deleted from the class body. - .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. - .. versionadded:: 18.2.0 *weakref_slot* - .. deprecated:: 18.2.0 - ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a - `DeprecationWarning` if the classes compared are subclasses of - each other. ``__eq`` and ``__ne__`` never tried to compared subclasses - to each other. - .. versionchanged:: 19.2.0 - ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider - subclasses comparable anymore. - .. versionadded:: 18.2.0 *kw_only* - .. versionadded:: 18.2.0 *cache_hash* - .. versionadded:: 19.1.0 *auto_exc* - .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. - .. versionadded:: 19.2.0 *eq* and *order* - .. versionadded:: 20.1.0 *auto_detect* - .. versionadded:: 20.1.0 *collect_by_mro* - .. versionadded:: 20.1.0 *getstate_setstate* - .. versionadded:: 20.1.0 *on_setattr* - .. versionadded:: 20.3.0 *field_transformer* - .. versionchanged:: 21.1.0 - ``init=False`` injects ``__attrs_init__`` - .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` - .. versionchanged:: 21.1.0 *cmp* undeprecated - .. versionadded:: 21.3.0 *match_args* - .. versionadded:: 22.2.0 - *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). - """ - eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) - - # unsafe_hash takes precedence due to PEP 681. - if unsafe_hash is not None: - hash = unsafe_hash - - if isinstance(on_setattr, (list, tuple)): - on_setattr = setters.pipe(*on_setattr) - - def wrap(cls): - is_frozen = frozen or _has_frozen_base_class(cls) - is_exc = auto_exc is True and issubclass(cls, BaseException) - has_own_setattr = auto_detect and _has_own_attribute( - cls, "__setattr__" - ) - - if has_own_setattr and is_frozen: - raise ValueError("Can't freeze a class with a custom __setattr__.") - - builder = _ClassBuilder( - cls, - these, - slots, - is_frozen, - weakref_slot, - _determine_whether_to_implement( - cls, - getstate_setstate, - auto_detect, - ("__getstate__", "__setstate__"), - default=slots, - ), - auto_attribs, - kw_only, - cache_hash, - is_exc, - collect_by_mro, - on_setattr, - has_own_setattr, - field_transformer, - ) - if _determine_whether_to_implement( - cls, repr, auto_detect, ("__repr__",) - ): - builder.add_repr(repr_ns) - if str is True: - builder.add_str() - - eq = _determine_whether_to_implement( - cls, eq_, auto_detect, ("__eq__", "__ne__") - ) - if not is_exc and eq is True: - builder.add_eq() - if not is_exc and _determine_whether_to_implement( - cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__") - ): - builder.add_order() - - builder.add_setattr() - - nonlocal hash - if ( - hash is None - and auto_detect is True - and _has_own_attribute(cls, "__hash__") - ): - hash = False - - if hash is not True and hash is not False and hash is not None: - # Can't use `hash in` because 1 == True for example. - raise TypeError( - "Invalid value for hash. Must be True, False, or None." - ) - elif hash is False or (hash is None and eq is False) or is_exc: - # Don't do anything. Should fall back to __object__'s __hash__ - # which is by id. - if cache_hash: - raise TypeError( - "Invalid value for cache_hash. To use hash caching," - " hashing must be either explicitly or implicitly " - "enabled." - ) - elif hash is True or ( - hash is None and eq is True and is_frozen is True - ): - # Build a __hash__ if told so, or if it's safe. - builder.add_hash() - else: - # Raise TypeError on attempts to hash. - if cache_hash: - raise TypeError( - "Invalid value for cache_hash. To use hash caching," - " hashing must be either explicitly or implicitly " - "enabled." - ) - builder.make_unhashable() - - if _determine_whether_to_implement( - cls, init, auto_detect, ("__init__",) - ): - builder.add_init() - else: - builder.add_attrs_init() - if cache_hash: - raise TypeError( - "Invalid value for cache_hash. To use hash caching," - " init must be True." - ) - - if ( - PY310 - and match_args - and not _has_own_attribute(cls, "__match_args__") - ): - builder.add_match_args() - - return builder.build_class() - - # maybe_cls's type depends on the usage of the decorator. It's a class - # if it's used as `@attrs` but ``None`` if used as `@attrs()`. - if maybe_cls is None: - return wrap - else: - return wrap(maybe_cls) - - -_attrs = attrs -""" -Internal alias so we can use it in functions that take an argument called -*attrs*. -""" - - -def _has_frozen_base_class(cls): - """ - Check whether *cls* has a frozen ancestor by looking at its - __setattr__. - """ - return cls.__setattr__ is _frozen_setattrs - - -def _generate_unique_filename(cls, func_name): - """ - Create a "filename" suitable for a function being generated. - """ - return ( - f"" - ) - - -def _make_hash(cls, attrs, frozen, cache_hash): - attrs = tuple( - a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) - ) - - tab = " " - - unique_filename = _generate_unique_filename(cls, "hash") - type_hash = hash(unique_filename) - # If eq is custom generated, we need to include the functions in globs - globs = {} - - hash_def = "def __hash__(self" - hash_func = "hash((" - closing_braces = "))" - if not cache_hash: - hash_def += "):" - else: - hash_def += ", *" - - hash_def += ( - ", _cache_wrapper=" - + "__import__('attr._make')._make._CacheHashWrapper):" - ) - hash_func = "_cache_wrapper(" + hash_func - closing_braces += ")" - - method_lines = [hash_def] - - def append_hash_computation_lines(prefix, indent): - """ - Generate the code for actually computing the hash code. - Below this will either be returned directly or used to compute - a value which is then cached, depending on the value of cache_hash - """ - - method_lines.extend( - [ - indent + prefix + hash_func, - indent + f" {type_hash},", - ] - ) - - for a in attrs: - if a.eq_key: - cmp_name = f"_{a.name}_key" - globs[cmp_name] = a.eq_key - method_lines.append( - indent + f" {cmp_name}(self.{a.name})," - ) - else: - method_lines.append(indent + f" self.{a.name},") - - method_lines.append(indent + " " + closing_braces) - - if cache_hash: - method_lines.append(tab + f"if self.{_hash_cache_field} is None:") - if frozen: - append_hash_computation_lines( - f"object.__setattr__(self, '{_hash_cache_field}', ", tab * 2 - ) - method_lines.append(tab * 2 + ")") # close __setattr__ - else: - append_hash_computation_lines( - f"self.{_hash_cache_field} = ", tab * 2 - ) - method_lines.append(tab + f"return self.{_hash_cache_field}") - else: - append_hash_computation_lines("return ", tab) - - script = "\n".join(method_lines) - return _make_method("__hash__", script, unique_filename, globs) - - -def _add_hash(cls, attrs): - """ - Add a hash method to *cls*. - """ - cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False) - return cls - - -def _make_ne(): - """ - Create __ne__ method. - """ - - def __ne__(self, other): - """ - Check equality and either forward a NotImplemented or - return the result negated. - """ - result = self.__eq__(other) - if result is NotImplemented: - return NotImplemented - - return not result - - return __ne__ - - -def _make_eq(cls, attrs): - """ - Create __eq__ method for *cls* with *attrs*. - """ - attrs = [a for a in attrs if a.eq] - - unique_filename = _generate_unique_filename(cls, "eq") - lines = [ - "def __eq__(self, other):", - " if other.__class__ is not self.__class__:", - " return NotImplemented", - ] - - # We can't just do a big self.x = other.x and... clause due to - # irregularities like nan == nan is false but (nan,) == (nan,) is true. - globs = {} - if attrs: - lines.append(" return (") - others = [" ) == ("] - for a in attrs: - if a.eq_key: - cmp_name = f"_{a.name}_key" - # Add the key function to the global namespace - # of the evaluated function. - globs[cmp_name] = a.eq_key - lines.append(f" {cmp_name}(self.{a.name}),") - others.append(f" {cmp_name}(other.{a.name}),") - else: - lines.append(f" self.{a.name},") - others.append(f" other.{a.name},") - - lines += others + [" )"] - else: - lines.append(" return True") - - script = "\n".join(lines) - - return _make_method("__eq__", script, unique_filename, globs) - - -def _make_order(cls, attrs): - """ - Create ordering methods for *cls* with *attrs*. - """ - attrs = [a for a in attrs if a.order] - - def attrs_to_tuple(obj): - """ - Save us some typing. - """ - return tuple( - key(value) if key else value - for value, key in ( - (getattr(obj, a.name), a.order_key) for a in attrs - ) - ) - - def __lt__(self, other): - """ - Automatically created by attrs. - """ - if other.__class__ is self.__class__: - return attrs_to_tuple(self) < attrs_to_tuple(other) - - return NotImplemented - - def __le__(self, other): - """ - Automatically created by attrs. - """ - if other.__class__ is self.__class__: - return attrs_to_tuple(self) <= attrs_to_tuple(other) - - return NotImplemented - - def __gt__(self, other): - """ - Automatically created by attrs. - """ - if other.__class__ is self.__class__: - return attrs_to_tuple(self) > attrs_to_tuple(other) - - return NotImplemented - - def __ge__(self, other): - """ - Automatically created by attrs. - """ - if other.__class__ is self.__class__: - return attrs_to_tuple(self) >= attrs_to_tuple(other) - - return NotImplemented - - return __lt__, __le__, __gt__, __ge__ - - -def _add_eq(cls, attrs=None): - """ - Add equality methods to *cls* with *attrs*. - """ - if attrs is None: - attrs = cls.__attrs_attrs__ - - cls.__eq__ = _make_eq(cls, attrs) - cls.__ne__ = _make_ne() - - return cls - - -def _make_repr(attrs, ns, cls): - unique_filename = _generate_unique_filename(cls, "repr") - # Figure out which attributes to include, and which function to use to - # format them. The a.repr value can be either bool or a custom - # callable. - attr_names_with_reprs = tuple( - (a.name, (repr if a.repr is True else a.repr), a.init) - for a in attrs - if a.repr is not False - ) - globs = { - name + "_repr": r for name, r, _ in attr_names_with_reprs if r != repr - } - globs["_compat"] = _compat - globs["AttributeError"] = AttributeError - globs["NOTHING"] = NOTHING - attribute_fragments = [] - for name, r, i in attr_names_with_reprs: - accessor = ( - "self." + name if i else 'getattr(self, "' + name + '", NOTHING)' - ) - fragment = ( - "%s={%s!r}" % (name, accessor) - if r == repr - else "%s={%s_repr(%s)}" % (name, name, accessor) - ) - attribute_fragments.append(fragment) - repr_fragment = ", ".join(attribute_fragments) - - if ns is None: - cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' - else: - cls_name_fragment = ns + ".{self.__class__.__name__}" - - lines = [ - "def __repr__(self):", - " try:", - " already_repring = _compat.repr_context.already_repring", - " except AttributeError:", - " already_repring = {id(self),}", - " _compat.repr_context.already_repring = already_repring", - " else:", - " if id(self) in already_repring:", - " return '...'", - " else:", - " already_repring.add(id(self))", - " try:", - f" return f'{cls_name_fragment}({repr_fragment})'", - " finally:", - " already_repring.remove(id(self))", - ] - - return _make_method( - "__repr__", "\n".join(lines), unique_filename, globs=globs - ) - - -def _add_repr(cls, ns=None, attrs=None): - """ - Add a repr method to *cls*. - """ - if attrs is None: - attrs = cls.__attrs_attrs__ - - cls.__repr__ = _make_repr(attrs, ns, cls) - return cls - - -def fields(cls): - """ - Return the tuple of *attrs* attributes for a class. - - The tuple also allows accessing the fields by their names (see below for - examples). - - :param type cls: Class to introspect. - - :raise TypeError: If *cls* is not a class. - :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* - class. - - :rtype: tuple (with name accessors) of `attrs.Attribute` - - .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields - by name. - .. versionchanged:: 23.1.0 Add support for generic classes. - """ - generic_base = get_generic_base(cls) - - if generic_base is None and not isinstance(cls, type): - raise TypeError("Passed object must be a class.") - - attrs = getattr(cls, "__attrs_attrs__", None) - - if attrs is None: - if generic_base is not None: - attrs = getattr(generic_base, "__attrs_attrs__", None) - if attrs is not None: - # Even though this is global state, stick it on here to speed - # it up. We rely on `cls` being cached for this to be - # efficient. - cls.__attrs_attrs__ = attrs - return attrs - raise NotAnAttrsClassError(f"{cls!r} is not an attrs-decorated class.") - - return attrs - - -def fields_dict(cls): - """ - Return an ordered dictionary of *attrs* attributes for a class, whose - keys are the attribute names. - - :param type cls: Class to introspect. - - :raise TypeError: If *cls* is not a class. - :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* - class. - - :rtype: dict - - .. versionadded:: 18.1.0 - """ - if not isinstance(cls, type): - raise TypeError("Passed object must be a class.") - attrs = getattr(cls, "__attrs_attrs__", None) - if attrs is None: - raise NotAnAttrsClassError(f"{cls!r} is not an attrs-decorated class.") - return {a.name: a for a in attrs} - - -def validate(inst): - """ - Validate all attributes on *inst* that have a validator. - - Leaves all exceptions through. - - :param inst: Instance of a class with *attrs* attributes. - """ - if _config._run_validators is False: - return - - for a in fields(inst.__class__): - v = a.validator - if v is not None: - v(inst, a, getattr(inst, a.name)) - - -def _is_slot_cls(cls): - return "__slots__" in cls.__dict__ - - -def _is_slot_attr(a_name, base_attr_map): - """ - Check if the attribute name comes from a slot class. - """ - return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name]) - - -def _make_init( - cls, - attrs, - pre_init, - post_init, - frozen, - slots, - cache_hash, - base_attr_map, - is_exc, - cls_on_setattr, - attrs_init, -): - has_cls_on_setattr = ( - cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP - ) - - if frozen and has_cls_on_setattr: - raise ValueError("Frozen classes can't use on_setattr.") - - needs_cached_setattr = cache_hash or frozen - filtered_attrs = [] - attr_dict = {} - for a in attrs: - if not a.init and a.default is NOTHING: - continue - - filtered_attrs.append(a) - attr_dict[a.name] = a - - if a.on_setattr is not None: - if frozen is True: - raise ValueError("Frozen classes can't use on_setattr.") - - needs_cached_setattr = True - elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP: - needs_cached_setattr = True - - unique_filename = _generate_unique_filename(cls, "init") - - script, globs, annotations = _attrs_to_init_script( - filtered_attrs, - frozen, - slots, - pre_init, - post_init, - cache_hash, - base_attr_map, - is_exc, - needs_cached_setattr, - has_cls_on_setattr, - attrs_init, - ) - if cls.__module__ in sys.modules: - # This makes typing.get_type_hints(CLS.__init__) resolve string types. - globs.update(sys.modules[cls.__module__].__dict__) - - globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) - - if needs_cached_setattr: - # Save the lookup overhead in __init__ if we need to circumvent - # setattr hooks. - globs["_cached_setattr_get"] = _obj_setattr.__get__ - - init = _make_method( - "__attrs_init__" if attrs_init else "__init__", - script, - unique_filename, - globs, - ) - init.__annotations__ = annotations - - return init - - -def _setattr(attr_name, value_var, has_on_setattr): - """ - Use the cached object.setattr to set *attr_name* to *value_var*. - """ - return f"_setattr('{attr_name}', {value_var})" - - -def _setattr_with_converter(attr_name, value_var, has_on_setattr): - """ - Use the cached object.setattr to set *attr_name* to *value_var*, but run - its converter first. - """ - return "_setattr('%s', %s(%s))" % ( - attr_name, - _init_converter_pat % (attr_name,), - value_var, - ) - - -def _assign(attr_name, value, has_on_setattr): - """ - Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise - relegate to _setattr. - """ - if has_on_setattr: - return _setattr(attr_name, value, True) - - return f"self.{attr_name} = {value}" - - -def _assign_with_converter(attr_name, value_var, has_on_setattr): - """ - Unless *attr_name* has an on_setattr hook, use normal assignment after - conversion. Otherwise relegate to _setattr_with_converter. - """ - if has_on_setattr: - return _setattr_with_converter(attr_name, value_var, True) - - return "self.%s = %s(%s)" % ( - attr_name, - _init_converter_pat % (attr_name,), - value_var, - ) - - -def _attrs_to_init_script( - attrs, - frozen, - slots, - pre_init, - post_init, - cache_hash, - base_attr_map, - is_exc, - needs_cached_setattr, - has_cls_on_setattr, - attrs_init, -): - """ - Return a script of an initializer for *attrs* and a dict of globals. - - The globals are expected by the generated script. - - If *frozen* is True, we cannot set the attributes directly so we use - a cached ``object.__setattr__``. - """ - lines = [] - if pre_init: - lines.append("self.__attrs_pre_init__()") - - if needs_cached_setattr: - lines.append( - # Circumvent the __setattr__ descriptor to save one lookup per - # assignment. - # Note _setattr will be used again below if cache_hash is True - "_setattr = _cached_setattr_get(self)" - ) - - if frozen is True: - if slots is True: - fmt_setter = _setattr - fmt_setter_with_converter = _setattr_with_converter - else: - # Dict frozen classes assign directly to __dict__. - # But only if the attribute doesn't come from an ancestor slot - # class. - # Note _inst_dict will be used again below if cache_hash is True - lines.append("_inst_dict = self.__dict__") - - def fmt_setter(attr_name, value_var, has_on_setattr): - if _is_slot_attr(attr_name, base_attr_map): - return _setattr(attr_name, value_var, has_on_setattr) - - return f"_inst_dict['{attr_name}'] = {value_var}" - - def fmt_setter_with_converter( - attr_name, value_var, has_on_setattr - ): - if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): - return _setattr_with_converter( - attr_name, value_var, has_on_setattr - ) - - return "_inst_dict['%s'] = %s(%s)" % ( - attr_name, - _init_converter_pat % (attr_name,), - value_var, - ) - - else: - # Not frozen. - fmt_setter = _assign - fmt_setter_with_converter = _assign_with_converter - - args = [] - kw_only_args = [] - attrs_to_validate = [] - - # This is a dictionary of names to validator and converter callables. - # Injecting this into __init__ globals lets us avoid lookups. - names_for_globals = {} - annotations = {"return": None} - - for a in attrs: - if a.validator: - attrs_to_validate.append(a) - - attr_name = a.name - has_on_setattr = a.on_setattr is not None or ( - a.on_setattr is not setters.NO_OP and has_cls_on_setattr - ) - # a.alias is set to maybe-mangled attr_name in _ClassBuilder if not - # explicitly provided - arg_name = a.alias - - has_factory = isinstance(a.default, Factory) - if has_factory and a.default.takes_self: - maybe_self = "self" - else: - maybe_self = "" - - if a.init is False: - if has_factory: - init_factory_name = _init_factory_pat % (a.name,) - if a.converter is not None: - lines.append( - fmt_setter_with_converter( - attr_name, - init_factory_name + f"({maybe_self})", - has_on_setattr, - ) - ) - conv_name = _init_converter_pat % (a.name,) - names_for_globals[conv_name] = a.converter - else: - lines.append( - fmt_setter( - attr_name, - init_factory_name + f"({maybe_self})", - has_on_setattr, - ) - ) - names_for_globals[init_factory_name] = a.default.factory - else: - if a.converter is not None: - lines.append( - fmt_setter_with_converter( - attr_name, - f"attr_dict['{attr_name}'].default", - has_on_setattr, - ) - ) - conv_name = _init_converter_pat % (a.name,) - names_for_globals[conv_name] = a.converter - else: - lines.append( - fmt_setter( - attr_name, - f"attr_dict['{attr_name}'].default", - has_on_setattr, - ) - ) - elif a.default is not NOTHING and not has_factory: - arg = f"{arg_name}=attr_dict['{attr_name}'].default" - if a.kw_only: - kw_only_args.append(arg) - else: - args.append(arg) - - if a.converter is not None: - lines.append( - fmt_setter_with_converter( - attr_name, arg_name, has_on_setattr - ) - ) - names_for_globals[ - _init_converter_pat % (a.name,) - ] = a.converter - else: - lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) - - elif has_factory: - arg = f"{arg_name}=NOTHING" - if a.kw_only: - kw_only_args.append(arg) - else: - args.append(arg) - lines.append(f"if {arg_name} is not NOTHING:") - - init_factory_name = _init_factory_pat % (a.name,) - if a.converter is not None: - lines.append( - " " - + fmt_setter_with_converter( - attr_name, arg_name, has_on_setattr - ) - ) - lines.append("else:") - lines.append( - " " - + fmt_setter_with_converter( - attr_name, - init_factory_name + "(" + maybe_self + ")", - has_on_setattr, - ) - ) - names_for_globals[ - _init_converter_pat % (a.name,) - ] = a.converter - else: - lines.append( - " " + fmt_setter(attr_name, arg_name, has_on_setattr) - ) - lines.append("else:") - lines.append( - " " - + fmt_setter( - attr_name, - init_factory_name + "(" + maybe_self + ")", - has_on_setattr, - ) - ) - names_for_globals[init_factory_name] = a.default.factory - else: - if a.kw_only: - kw_only_args.append(arg_name) - else: - args.append(arg_name) - - if a.converter is not None: - lines.append( - fmt_setter_with_converter( - attr_name, arg_name, has_on_setattr - ) - ) - names_for_globals[ - _init_converter_pat % (a.name,) - ] = a.converter - else: - lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) - - if a.init is True: - if a.type is not None and a.converter is None: - annotations[arg_name] = a.type - elif a.converter is not None: - # Try to get the type from the converter. - t = _AnnotationExtractor(a.converter).get_first_param_type() - if t: - annotations[arg_name] = t - - if attrs_to_validate: # we can skip this if there are no validators. - names_for_globals["_config"] = _config - lines.append("if _config._run_validators is True:") - for a in attrs_to_validate: - val_name = "__attr_validator_" + a.name - attr_name = "__attr_" + a.name - lines.append(f" {val_name}(self, {attr_name}, self.{a.name})") - names_for_globals[val_name] = a.validator - names_for_globals[attr_name] = a - - if post_init: - lines.append("self.__attrs_post_init__()") - - # because this is set only after __attrs_post_init__ is called, a crash - # will result if post-init tries to access the hash code. This seemed - # preferable to setting this beforehand, in which case alteration to - # field values during post-init combined with post-init accessing the - # hash code would result in silent bugs. - if cache_hash: - if frozen: - if slots: - # if frozen and slots, then _setattr defined above - init_hash_cache = "_setattr('%s', %s)" - else: - # if frozen and not slots, then _inst_dict defined above - init_hash_cache = "_inst_dict['%s'] = %s" - else: - init_hash_cache = "self.%s = %s" - lines.append(init_hash_cache % (_hash_cache_field, "None")) - - # For exceptions we rely on BaseException.__init__ for proper - # initialization. - if is_exc: - vals = ",".join(f"self.{a.name}" for a in attrs if a.init) - - lines.append(f"BaseException.__init__(self, {vals})") - - args = ", ".join(args) - if kw_only_args: - args += "%s*, %s" % ( - ", " if args else "", # leading comma - ", ".join(kw_only_args), # kw_only args - ) - - return ( - "def %s(self, %s):\n %s\n" - % ( - ("__attrs_init__" if attrs_init else "__init__"), - args, - "\n ".join(lines) if lines else "pass", - ), - names_for_globals, - annotations, - ) - - -def _default_init_alias_for(name: str) -> str: - """ - The default __init__ parameter name for a field. - - This performs private-name adjustment via leading-unscore stripping, - and is the default value of Attribute.alias if not provided. - """ - - return name.lstrip("_") - - -class Attribute: - """ - *Read-only* representation of an attribute. - - .. warning:: - - You should never instantiate this class yourself. - - The class has *all* arguments of `attr.ib` (except for ``factory`` - which is only syntactic sugar for ``default=Factory(...)`` plus the - following: - - - ``name`` (`str`): The name of the attribute. - - ``alias`` (`str`): The __init__ parameter name of the attribute, after - any explicit overrides and default private-attribute-name handling. - - ``inherited`` (`bool`): Whether or not that attribute has been inherited - from a base class. - - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables - that are used for comparing and ordering objects by this attribute, - respectively. These are set by passing a callable to `attr.ib`'s ``eq``, - ``order``, or ``cmp`` arguments. See also :ref:`comparison customization - `. - - Instances of this class are frequently used for introspection purposes - like: - - - `fields` returns a tuple of them. - - Validators get them passed as the first argument. - - The :ref:`field transformer ` hook receives a list of - them. - - The ``alias`` property exposes the __init__ parameter name of the field, - with any overrides and default private-attribute handling applied. - - - .. versionadded:: 20.1.0 *inherited* - .. versionadded:: 20.1.0 *on_setattr* - .. versionchanged:: 20.2.0 *inherited* is not taken into account for - equality checks and hashing anymore. - .. versionadded:: 21.1.0 *eq_key* and *order_key* - .. versionadded:: 22.2.0 *alias* - - For the full version history of the fields, see `attr.ib`. - """ - - __slots__ = ( - "name", - "default", - "validator", - "repr", - "eq", - "eq_key", - "order", - "order_key", - "hash", - "init", - "metadata", - "type", - "converter", - "kw_only", - "inherited", - "on_setattr", - "alias", - ) - - def __init__( - self, - name, - default, - validator, - repr, - cmp, # XXX: unused, remove along with other cmp code. - hash, - init, - inherited, - metadata=None, - type=None, - converter=None, - kw_only=False, - eq=None, - eq_key=None, - order=None, - order_key=None, - on_setattr=None, - alias=None, - ): - eq, eq_key, order, order_key = _determine_attrib_eq_order( - cmp, eq_key or eq, order_key or order, True - ) - - # Cache this descriptor here to speed things up later. - bound_setattr = _obj_setattr.__get__(self) - - # Despite the big red warning, people *do* instantiate `Attribute` - # themselves. - bound_setattr("name", name) - bound_setattr("default", default) - bound_setattr("validator", validator) - bound_setattr("repr", repr) - bound_setattr("eq", eq) - bound_setattr("eq_key", eq_key) - bound_setattr("order", order) - bound_setattr("order_key", order_key) - bound_setattr("hash", hash) - bound_setattr("init", init) - bound_setattr("converter", converter) - bound_setattr( - "metadata", - ( - types.MappingProxyType(dict(metadata)) # Shallow copy - if metadata - else _empty_metadata_singleton - ), - ) - bound_setattr("type", type) - bound_setattr("kw_only", kw_only) - bound_setattr("inherited", inherited) - bound_setattr("on_setattr", on_setattr) - bound_setattr("alias", alias) - - def __setattr__(self, name, value): - raise FrozenInstanceError() - - @classmethod - def from_counting_attr(cls, name, ca, type=None): - # type holds the annotated value. deal with conflicts: - if type is None: - type = ca.type - elif ca.type is not None: - raise ValueError( - "Type annotation and type argument cannot both be present" - ) - inst_dict = { - k: getattr(ca, k) - for k in Attribute.__slots__ - if k - not in ( - "name", - "validator", - "default", - "type", - "inherited", - ) # exclude methods and deprecated alias - } - return cls( - name=name, - validator=ca._validator, - default=ca._default, - type=type, - cmp=None, - inherited=False, - **inst_dict, - ) - - # Don't use attrs.evolve since fields(Attribute) doesn't work - def evolve(self, **changes): - """ - Copy *self* and apply *changes*. - - This works similarly to `attrs.evolve` but that function does not work - with `Attribute`. - - It is mainly meant to be used for `transform-fields`. - - .. versionadded:: 20.3.0 - """ - new = copy.copy(self) - - new._setattrs(changes.items()) - - return new - - # Don't use _add_pickle since fields(Attribute) doesn't work - def __getstate__(self): - """ - Play nice with pickle. - """ - return tuple( - getattr(self, name) if name != "metadata" else dict(self.metadata) - for name in self.__slots__ - ) - - def __setstate__(self, state): - """ - Play nice with pickle. - """ - self._setattrs(zip(self.__slots__, state)) - - def _setattrs(self, name_values_pairs): - bound_setattr = _obj_setattr.__get__(self) - for name, value in name_values_pairs: - if name != "metadata": - bound_setattr(name, value) - else: - bound_setattr( - name, - types.MappingProxyType(dict(value)) - if value - else _empty_metadata_singleton, - ) - - -_a = [ - Attribute( - name=name, - default=NOTHING, - validator=None, - repr=True, - cmp=None, - eq=True, - order=False, - hash=(name != "metadata"), - init=True, - inherited=False, - alias=_default_init_alias_for(name), - ) - for name in Attribute.__slots__ -] - -Attribute = _add_hash( - _add_eq( - _add_repr(Attribute, attrs=_a), - attrs=[a for a in _a if a.name != "inherited"], - ), - attrs=[a for a in _a if a.hash and a.name != "inherited"], -) - - -class _CountingAttr: - """ - Intermediate representation of attributes that uses a counter to preserve - the order in which the attributes have been defined. - - *Internal* data structure of the attrs library. Running into is most - likely the result of a bug like a forgotten `@attr.s` decorator. - """ - - __slots__ = ( - "counter", - "_default", - "repr", - "eq", - "eq_key", - "order", - "order_key", - "hash", - "init", - "metadata", - "_validator", - "converter", - "type", - "kw_only", - "on_setattr", - "alias", - ) - __attrs_attrs__ = tuple( - Attribute( - name=name, - alias=_default_init_alias_for(name), - default=NOTHING, - validator=None, - repr=True, - cmp=None, - hash=True, - init=True, - kw_only=False, - eq=True, - eq_key=None, - order=False, - order_key=None, - inherited=False, - on_setattr=None, - ) - for name in ( - "counter", - "_default", - "repr", - "eq", - "order", - "hash", - "init", - "on_setattr", - "alias", - ) - ) + ( - Attribute( - name="metadata", - alias="metadata", - default=None, - validator=None, - repr=True, - cmp=None, - hash=False, - init=True, - kw_only=False, - eq=True, - eq_key=None, - order=False, - order_key=None, - inherited=False, - on_setattr=None, - ), - ) - cls_counter = 0 - - def __init__( - self, - default, - validator, - repr, - cmp, - hash, - init, - converter, - metadata, - type, - kw_only, - eq, - eq_key, - order, - order_key, - on_setattr, - alias, - ): - _CountingAttr.cls_counter += 1 - self.counter = _CountingAttr.cls_counter - self._default = default - self._validator = validator - self.converter = converter - self.repr = repr - self.eq = eq - self.eq_key = eq_key - self.order = order - self.order_key = order_key - self.hash = hash - self.init = init - self.metadata = metadata - self.type = type - self.kw_only = kw_only - self.on_setattr = on_setattr - self.alias = alias - - def validator(self, meth): - """ - Decorator that adds *meth* to the list of validators. - - Returns *meth* unchanged. - - .. versionadded:: 17.1.0 - """ - if self._validator is None: - self._validator = meth - else: - self._validator = and_(self._validator, meth) - return meth - - def default(self, meth): - """ - Decorator that allows to set the default for an attribute. - - Returns *meth* unchanged. - - :raises DefaultAlreadySetError: If default has been set before. - - .. versionadded:: 17.1.0 - """ - if self._default is not NOTHING: - raise DefaultAlreadySetError() - - self._default = Factory(meth, takes_self=True) - - return meth - - -_CountingAttr = _add_eq(_add_repr(_CountingAttr)) - - -class Factory: - """ - Stores a factory callable. - - If passed as the default value to `attrs.field`, the factory is used to - generate a new value. - - :param callable factory: A callable that takes either none or exactly one - mandatory positional argument depending on *takes_self*. - :param bool takes_self: Pass the partially initialized instance that is - being initialized as a positional argument. - - .. versionadded:: 17.1.0 *takes_self* - """ - - __slots__ = ("factory", "takes_self") - - def __init__(self, factory, takes_self=False): - self.factory = factory - self.takes_self = takes_self - - def __getstate__(self): - """ - Play nice with pickle. - """ - return tuple(getattr(self, name) for name in self.__slots__) - - def __setstate__(self, state): - """ - Play nice with pickle. - """ - for name, value in zip(self.__slots__, state): - setattr(self, name, value) - - -_f = [ - Attribute( - name=name, - default=NOTHING, - validator=None, - repr=True, - cmp=None, - eq=True, - order=False, - hash=True, - init=True, - inherited=False, - ) - for name in Factory.__slots__ -] - -Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f) - - -def make_class(name, attrs, bases=(object,), **attributes_arguments): - r""" - A quick way to create a new class called *name* with *attrs*. - - :param str name: The name for the new class. - - :param attrs: A list of names or a dictionary of mappings of names to - `attr.ib`\ s / `attrs.field`\ s. - - The order is deduced from the order of the names or attributes inside - *attrs*. Otherwise the order of the definition of the attributes is - used. - :type attrs: `list` or `dict` - - :param tuple bases: Classes that the new class will subclass. - - :param attributes_arguments: Passed unmodified to `attr.s`. - - :return: A new class with *attrs*. - :rtype: type - - .. versionadded:: 17.1.0 *bases* - .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. - """ - if isinstance(attrs, dict): - cls_dict = attrs - elif isinstance(attrs, (list, tuple)): - cls_dict = {a: attrib() for a in attrs} - else: - raise TypeError("attrs argument must be a dict or a list.") - - pre_init = cls_dict.pop("__attrs_pre_init__", None) - post_init = cls_dict.pop("__attrs_post_init__", None) - user_init = cls_dict.pop("__init__", None) - - body = {} - if pre_init is not None: - body["__attrs_pre_init__"] = pre_init - if post_init is not None: - body["__attrs_post_init__"] = post_init - if user_init is not None: - body["__init__"] = user_init - - type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body)) - - # For pickling to work, the __module__ variable needs to be set to the - # frame where the class is created. Bypass this step in environments where - # sys._getframe is not defined (Jython for example) or sys._getframe is not - # defined for arguments greater than 0 (IronPython). - try: - type_.__module__ = sys._getframe(1).f_globals.get( - "__name__", "__main__" - ) - except (AttributeError, ValueError): - pass - - # We do it here for proper warnings with meaningful stacklevel. - cmp = attributes_arguments.pop("cmp", None) - ( - attributes_arguments["eq"], - attributes_arguments["order"], - ) = _determine_attrs_eq_order( - cmp, - attributes_arguments.get("eq"), - attributes_arguments.get("order"), - True, - ) - - return _attrs(these=cls_dict, **attributes_arguments)(type_) - - -# These are required by within this module so we define them here and merely -# import into .validators / .converters. - - -@attrs(slots=True, hash=True) -class _AndValidator: - """ - Compose many validators to a single one. - """ - - _validators = attrib() - - def __call__(self, inst, attr, value): - for v in self._validators: - v(inst, attr, value) - - -def and_(*validators): - """ - A validator that composes multiple validators into one. - - When called on a value, it runs all wrapped validators. - - :param callables validators: Arbitrary number of validators. - - .. versionadded:: 17.1.0 - """ - vals = [] - for validator in validators: - vals.extend( - validator._validators - if isinstance(validator, _AndValidator) - else [validator] - ) - - return _AndValidator(tuple(vals)) - - -def pipe(*converters): - """ - A converter that composes multiple converters into one. - - When called on a value, it runs all wrapped converters, returning the - *last* value. - - Type annotations will be inferred from the wrapped converters', if - they have any. - - :param callables converters: Arbitrary number of converters. - - .. versionadded:: 20.1.0 - """ - - def pipe_converter(val): - for converter in converters: - val = converter(val) - - return val - - if not converters: - # If the converter list is empty, pipe_converter is the identity. - A = typing.TypeVar("A") - pipe_converter.__annotations__ = {"val": A, "return": A} - else: - # Get parameter type from first converter. - t = _AnnotationExtractor(converters[0]).get_first_param_type() - if t: - pipe_converter.__annotations__["val"] = t - - # Get return type from last converter. - rt = _AnnotationExtractor(converters[-1]).get_return_type() - if rt: - pipe_converter.__annotations__["return"] = rt - - return pipe_converter diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/dateutil/relativedelta.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/dateutil/relativedelta.py deleted file mode 100644 index a9e85f7e6cd7488e6b2f4b249d5cf6af314c3859..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/dateutil/relativedelta.py +++ /dev/null @@ -1,599 +0,0 @@ -# -*- coding: utf-8 -*- -import datetime -import calendar - -import operator -from math import copysign - -from six import integer_types -from warnings import warn - -from ._common import weekday - -MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) - -__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] - - -class relativedelta(object): - """ - The relativedelta type is designed to be applied to an existing datetime and - can replace specific components of that datetime, or represents an interval - of time. - - It is based on the specification of the excellent work done by M.-A. Lemburg - in his - `mx.DateTime `_ extension. - However, notice that this type does *NOT* implement the same algorithm as - his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. - - There are two different ways to build a relativedelta instance. The - first one is passing it two date/datetime classes:: - - relativedelta(datetime1, datetime2) - - The second one is passing it any number of the following keyword arguments:: - - relativedelta(arg1=x,arg2=y,arg3=z...) - - year, month, day, hour, minute, second, microsecond: - Absolute information (argument is singular); adding or subtracting a - relativedelta with absolute information does not perform an arithmetic - operation, but rather REPLACES the corresponding value in the - original datetime with the value(s) in relativedelta. - - years, months, weeks, days, hours, minutes, seconds, microseconds: - Relative information, may be negative (argument is plural); adding - or subtracting a relativedelta with relative information performs - the corresponding arithmetic operation on the original datetime value - with the information in the relativedelta. - - weekday: - One of the weekday instances (MO, TU, etc) available in the - relativedelta module. These instances may receive a parameter N, - specifying the Nth weekday, which could be positive or negative - (like MO(+1) or MO(-2)). Not specifying it is the same as specifying - +1. You can also use an integer, where 0=MO. This argument is always - relative e.g. if the calculated date is already Monday, using MO(1) - or MO(-1) won't change the day. To effectively make it absolute, use - it in combination with the day argument (e.g. day=1, MO(1) for first - Monday of the month). - - leapdays: - Will add given days to the date found, if year is a leap - year, and the date found is post 28 of february. - - yearday, nlyearday: - Set the yearday or the non-leap year day (jump leap days). - These are converted to day/month/leapdays information. - - There are relative and absolute forms of the keyword - arguments. The plural is relative, and the singular is - absolute. For each argument in the order below, the absolute form - is applied first (by setting each attribute to that value) and - then the relative form (by adding the value to the attribute). - - The order of attributes considered when this relativedelta is - added to a datetime is: - - 1. Year - 2. Month - 3. Day - 4. Hours - 5. Minutes - 6. Seconds - 7. Microseconds - - Finally, weekday is applied, using the rule described above. - - For example - - >>> from datetime import datetime - >>> from dateutil.relativedelta import relativedelta, MO - >>> dt = datetime(2018, 4, 9, 13, 37, 0) - >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) - >>> dt + delta - datetime.datetime(2018, 4, 2, 14, 37) - - First, the day is set to 1 (the first of the month), then 25 hours - are added, to get to the 2nd day and 14th hour, finally the - weekday is applied, but since the 2nd is already a Monday there is - no effect. - - """ - - def __init__(self, dt1=None, dt2=None, - years=0, months=0, days=0, leapdays=0, weeks=0, - hours=0, minutes=0, seconds=0, microseconds=0, - year=None, month=None, day=None, weekday=None, - yearday=None, nlyearday=None, - hour=None, minute=None, second=None, microsecond=None): - - if dt1 and dt2: - # datetime is a subclass of date. So both must be date - if not (isinstance(dt1, datetime.date) and - isinstance(dt2, datetime.date)): - raise TypeError("relativedelta only diffs datetime/date") - - # We allow two dates, or two datetimes, so we coerce them to be - # of the same type - if (isinstance(dt1, datetime.datetime) != - isinstance(dt2, datetime.datetime)): - if not isinstance(dt1, datetime.datetime): - dt1 = datetime.datetime.fromordinal(dt1.toordinal()) - elif not isinstance(dt2, datetime.datetime): - dt2 = datetime.datetime.fromordinal(dt2.toordinal()) - - self.years = 0 - self.months = 0 - self.days = 0 - self.leapdays = 0 - self.hours = 0 - self.minutes = 0 - self.seconds = 0 - self.microseconds = 0 - self.year = None - self.month = None - self.day = None - self.weekday = None - self.hour = None - self.minute = None - self.second = None - self.microsecond = None - self._has_time = 0 - - # Get year / month delta between the two - months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) - self._set_months(months) - - # Remove the year/month delta so the timedelta is just well-defined - # time units (seconds, days and microseconds) - dtm = self.__radd__(dt2) - - # If we've overshot our target, make an adjustment - if dt1 < dt2: - compare = operator.gt - increment = 1 - else: - compare = operator.lt - increment = -1 - - while compare(dt1, dtm): - months += increment - self._set_months(months) - dtm = self.__radd__(dt2) - - # Get the timedelta between the "months-adjusted" date and dt1 - delta = dt1 - dtm - self.seconds = delta.seconds + delta.days * 86400 - self.microseconds = delta.microseconds - else: - # Check for non-integer values in integer-only quantities - if any(x is not None and x != int(x) for x in (years, months)): - raise ValueError("Non-integer years and months are " - "ambiguous and not currently supported.") - - # Relative information - self.years = int(years) - self.months = int(months) - self.days = days + weeks * 7 - self.leapdays = leapdays - self.hours = hours - self.minutes = minutes - self.seconds = seconds - self.microseconds = microseconds - - # Absolute information - self.year = year - self.month = month - self.day = day - self.hour = hour - self.minute = minute - self.second = second - self.microsecond = microsecond - - if any(x is not None and int(x) != x - for x in (year, month, day, hour, - minute, second, microsecond)): - # For now we'll deprecate floats - later it'll be an error. - warn("Non-integer value passed as absolute information. " + - "This is not a well-defined condition and will raise " + - "errors in future versions.", DeprecationWarning) - - if isinstance(weekday, integer_types): - self.weekday = weekdays[weekday] - else: - self.weekday = weekday - - yday = 0 - if nlyearday: - yday = nlyearday - elif yearday: - yday = yearday - if yearday > 59: - self.leapdays = -1 - if yday: - ydayidx = [31, 59, 90, 120, 151, 181, 212, - 243, 273, 304, 334, 366] - for idx, ydays in enumerate(ydayidx): - if yday <= ydays: - self.month = idx+1 - if idx == 0: - self.day = yday - else: - self.day = yday-ydayidx[idx-1] - break - else: - raise ValueError("invalid year day (%d)" % yday) - - self._fix() - - def _fix(self): - if abs(self.microseconds) > 999999: - s = _sign(self.microseconds) - div, mod = divmod(self.microseconds * s, 1000000) - self.microseconds = mod * s - self.seconds += div * s - if abs(self.seconds) > 59: - s = _sign(self.seconds) - div, mod = divmod(self.seconds * s, 60) - self.seconds = mod * s - self.minutes += div * s - if abs(self.minutes) > 59: - s = _sign(self.minutes) - div, mod = divmod(self.minutes * s, 60) - self.minutes = mod * s - self.hours += div * s - if abs(self.hours) > 23: - s = _sign(self.hours) - div, mod = divmod(self.hours * s, 24) - self.hours = mod * s - self.days += div * s - if abs(self.months) > 11: - s = _sign(self.months) - div, mod = divmod(self.months * s, 12) - self.months = mod * s - self.years += div * s - if (self.hours or self.minutes or self.seconds or self.microseconds - or self.hour is not None or self.minute is not None or - self.second is not None or self.microsecond is not None): - self._has_time = 1 - else: - self._has_time = 0 - - @property - def weeks(self): - return int(self.days / 7.0) - - @weeks.setter - def weeks(self, value): - self.days = self.days - (self.weeks * 7) + value * 7 - - def _set_months(self, months): - self.months = months - if abs(self.months) > 11: - s = _sign(self.months) - div, mod = divmod(self.months * s, 12) - self.months = mod * s - self.years = div * s - else: - self.years = 0 - - def normalized(self): - """ - Return a version of this object represented entirely using integer - values for the relative attributes. - - >>> relativedelta(days=1.5, hours=2).normalized() - relativedelta(days=+1, hours=+14) - - :return: - Returns a :class:`dateutil.relativedelta.relativedelta` object. - """ - # Cascade remainders down (rounding each to roughly nearest microsecond) - days = int(self.days) - - hours_f = round(self.hours + 24 * (self.days - days), 11) - hours = int(hours_f) - - minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) - minutes = int(minutes_f) - - seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) - seconds = int(seconds_f) - - microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) - - # Constructor carries overflow back up with call to _fix() - return self.__class__(years=self.years, months=self.months, - days=days, hours=hours, minutes=minutes, - seconds=seconds, microseconds=microseconds, - leapdays=self.leapdays, year=self.year, - month=self.month, day=self.day, - weekday=self.weekday, hour=self.hour, - minute=self.minute, second=self.second, - microsecond=self.microsecond) - - def __add__(self, other): - if isinstance(other, relativedelta): - return self.__class__(years=other.years + self.years, - months=other.months + self.months, - days=other.days + self.days, - hours=other.hours + self.hours, - minutes=other.minutes + self.minutes, - seconds=other.seconds + self.seconds, - microseconds=(other.microseconds + - self.microseconds), - leapdays=other.leapdays or self.leapdays, - year=(other.year if other.year is not None - else self.year), - month=(other.month if other.month is not None - else self.month), - day=(other.day if other.day is not None - else self.day), - weekday=(other.weekday if other.weekday is not None - else self.weekday), - hour=(other.hour if other.hour is not None - else self.hour), - minute=(other.minute if other.minute is not None - else self.minute), - second=(other.second if other.second is not None - else self.second), - microsecond=(other.microsecond if other.microsecond - is not None else - self.microsecond)) - if isinstance(other, datetime.timedelta): - return self.__class__(years=self.years, - months=self.months, - days=self.days + other.days, - hours=self.hours, - minutes=self.minutes, - seconds=self.seconds + other.seconds, - microseconds=self.microseconds + other.microseconds, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - if not isinstance(other, datetime.date): - return NotImplemented - elif self._has_time and not isinstance(other, datetime.datetime): - other = datetime.datetime.fromordinal(other.toordinal()) - year = (self.year or other.year)+self.years - month = self.month or other.month - if self.months: - assert 1 <= abs(self.months) <= 12 - month += self.months - if month > 12: - year += 1 - month -= 12 - elif month < 1: - year -= 1 - month += 12 - day = min(calendar.monthrange(year, month)[1], - self.day or other.day) - repl = {"year": year, "month": month, "day": day} - for attr in ["hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - repl[attr] = value - days = self.days - if self.leapdays and month > 2 and calendar.isleap(year): - days += self.leapdays - ret = (other.replace(**repl) - + datetime.timedelta(days=days, - hours=self.hours, - minutes=self.minutes, - seconds=self.seconds, - microseconds=self.microseconds)) - if self.weekday: - weekday, nth = self.weekday.weekday, self.weekday.n or 1 - jumpdays = (abs(nth) - 1) * 7 - if nth > 0: - jumpdays += (7 - ret.weekday() + weekday) % 7 - else: - jumpdays += (ret.weekday() - weekday) % 7 - jumpdays *= -1 - ret += datetime.timedelta(days=jumpdays) - return ret - - def __radd__(self, other): - return self.__add__(other) - - def __rsub__(self, other): - return self.__neg__().__radd__(other) - - def __sub__(self, other): - if not isinstance(other, relativedelta): - return NotImplemented # In case the other object defines __rsub__ - return self.__class__(years=self.years - other.years, - months=self.months - other.months, - days=self.days - other.days, - hours=self.hours - other.hours, - minutes=self.minutes - other.minutes, - seconds=self.seconds - other.seconds, - microseconds=self.microseconds - other.microseconds, - leapdays=self.leapdays or other.leapdays, - year=(self.year if self.year is not None - else other.year), - month=(self.month if self.month is not None else - other.month), - day=(self.day if self.day is not None else - other.day), - weekday=(self.weekday if self.weekday is not None else - other.weekday), - hour=(self.hour if self.hour is not None else - other.hour), - minute=(self.minute if self.minute is not None else - other.minute), - second=(self.second if self.second is not None else - other.second), - microsecond=(self.microsecond if self.microsecond - is not None else - other.microsecond)) - - def __abs__(self): - return self.__class__(years=abs(self.years), - months=abs(self.months), - days=abs(self.days), - hours=abs(self.hours), - minutes=abs(self.minutes), - seconds=abs(self.seconds), - microseconds=abs(self.microseconds), - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __neg__(self): - return self.__class__(years=-self.years, - months=-self.months, - days=-self.days, - hours=-self.hours, - minutes=-self.minutes, - seconds=-self.seconds, - microseconds=-self.microseconds, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __bool__(self): - return not (not self.years and - not self.months and - not self.days and - not self.hours and - not self.minutes and - not self.seconds and - not self.microseconds and - not self.leapdays and - self.year is None and - self.month is None and - self.day is None and - self.weekday is None and - self.hour is None and - self.minute is None and - self.second is None and - self.microsecond is None) - # Compatibility with Python 2.x - __nonzero__ = __bool__ - - def __mul__(self, other): - try: - f = float(other) - except TypeError: - return NotImplemented - - return self.__class__(years=int(self.years * f), - months=int(self.months * f), - days=int(self.days * f), - hours=int(self.hours * f), - minutes=int(self.minutes * f), - seconds=int(self.seconds * f), - microseconds=int(self.microseconds * f), - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - __rmul__ = __mul__ - - def __eq__(self, other): - if not isinstance(other, relativedelta): - return NotImplemented - if self.weekday or other.weekday: - if not self.weekday or not other.weekday: - return False - if self.weekday.weekday != other.weekday.weekday: - return False - n1, n2 = self.weekday.n, other.weekday.n - if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): - return False - return (self.years == other.years and - self.months == other.months and - self.days == other.days and - self.hours == other.hours and - self.minutes == other.minutes and - self.seconds == other.seconds and - self.microseconds == other.microseconds and - self.leapdays == other.leapdays and - self.year == other.year and - self.month == other.month and - self.day == other.day and - self.hour == other.hour and - self.minute == other.minute and - self.second == other.second and - self.microsecond == other.microsecond) - - def __hash__(self): - return hash(( - self.weekday, - self.years, - self.months, - self.days, - self.hours, - self.minutes, - self.seconds, - self.microseconds, - self.leapdays, - self.year, - self.month, - self.day, - self.hour, - self.minute, - self.second, - self.microsecond, - )) - - def __ne__(self, other): - return not self.__eq__(other) - - def __div__(self, other): - try: - reciprocal = 1 / float(other) - except TypeError: - return NotImplemented - - return self.__mul__(reciprocal) - - __truediv__ = __div__ - - def __repr__(self): - l = [] - for attr in ["years", "months", "days", "leapdays", - "hours", "minutes", "seconds", "microseconds"]: - value = getattr(self, attr) - if value: - l.append("{attr}={value:+g}".format(attr=attr, value=value)) - for attr in ["year", "month", "day", "weekday", - "hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - l.append("{attr}={value}".format(attr=attr, value=repr(value))) - return "{classname}({attrs})".format(classname=self.__class__.__name__, - attrs=", ".join(l)) - - -def _sign(x): - return int(copysign(1, x)) - -# vim:ts=4:sw=4:et diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ufoLib/kerning.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ufoLib/kerning.py deleted file mode 100644 index 8a1dca5b680fdd02d1e6ef5797e33e617005c254..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ufoLib/kerning.py +++ /dev/null @@ -1,91 +0,0 @@ -def lookupKerningValue( - pair, kerning, groups, fallback=0, glyphToFirstGroup=None, glyphToSecondGroup=None -): - """ - Note: This expects kerning to be a flat dictionary - of kerning pairs, not the nested structure used - in kerning.plist. - - >>> groups = { - ... "public.kern1.O" : ["O", "D", "Q"], - ... "public.kern2.E" : ["E", "F"] - ... } - >>> kerning = { - ... ("public.kern1.O", "public.kern2.E") : -100, - ... ("public.kern1.O", "F") : -200, - ... ("D", "F") : -300 - ... } - >>> lookupKerningValue(("D", "F"), kerning, groups) - -300 - >>> lookupKerningValue(("O", "F"), kerning, groups) - -200 - >>> lookupKerningValue(("O", "E"), kerning, groups) - -100 - >>> lookupKerningValue(("O", "O"), kerning, groups) - 0 - >>> lookupKerningValue(("E", "E"), kerning, groups) - 0 - >>> lookupKerningValue(("E", "O"), kerning, groups) - 0 - >>> lookupKerningValue(("X", "X"), kerning, groups) - 0 - >>> lookupKerningValue(("public.kern1.O", "public.kern2.E"), - ... kerning, groups) - -100 - >>> lookupKerningValue(("public.kern1.O", "F"), kerning, groups) - -200 - >>> lookupKerningValue(("O", "public.kern2.E"), kerning, groups) - -100 - >>> lookupKerningValue(("public.kern1.X", "public.kern2.X"), kerning, groups) - 0 - """ - # quickly check to see if the pair is in the kerning dictionary - if pair in kerning: - return kerning[pair] - # create glyph to group mapping - if glyphToFirstGroup is not None: - assert glyphToSecondGroup is not None - if glyphToSecondGroup is not None: - assert glyphToFirstGroup is not None - if glyphToFirstGroup is None: - glyphToFirstGroup = {} - glyphToSecondGroup = {} - for group, groupMembers in groups.items(): - if group.startswith("public.kern1."): - for glyph in groupMembers: - glyphToFirstGroup[glyph] = group - elif group.startswith("public.kern2."): - for glyph in groupMembers: - glyphToSecondGroup[glyph] = group - # get group names and make sure first and second are glyph names - first, second = pair - firstGroup = secondGroup = None - if first.startswith("public.kern1."): - firstGroup = first - first = None - else: - firstGroup = glyphToFirstGroup.get(first) - if second.startswith("public.kern2."): - secondGroup = second - second = None - else: - secondGroup = glyphToSecondGroup.get(second) - # make an ordered list of pairs to look up - pairs = [ - (first, second), - (first, secondGroup), - (firstGroup, second), - (firstGroup, secondGroup), - ] - # look up the pairs and return any matches - for pair in pairs: - if pair in kerning: - return kerning[pair] - # use the fallback value - return fallback - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/test_conversion_utils.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/test_conversion_utils.py deleted file mode 100644 index c602eba4bb286f833d081e30b6b8dfabcfe1c1e6..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/tests/test_conversion_utils.py +++ /dev/null @@ -1,208 +0,0 @@ -""" -Tests for numpy/core/src/multiarray/conversion_utils.c -""" -import re -import sys - -import pytest - -import numpy as np -import numpy.core._multiarray_tests as mt -from numpy.testing import assert_warns, IS_PYPY - - -class StringConverterTestCase: - allow_bytes = True - case_insensitive = True - exact_match = False - warn = True - - def _check_value_error(self, val): - pattern = r'\(got {}\)'.format(re.escape(repr(val))) - with pytest.raises(ValueError, match=pattern) as exc: - self.conv(val) - - def _check_conv_assert_warn(self, val, expected): - if self.warn: - with assert_warns(DeprecationWarning) as exc: - assert self.conv(val) == expected - else: - assert self.conv(val) == expected - - def _check(self, val, expected): - """Takes valid non-deprecated inputs for converters, - runs converters on inputs, checks correctness of outputs, - warnings and errors""" - assert self.conv(val) == expected - - if self.allow_bytes: - assert self.conv(val.encode('ascii')) == expected - else: - with pytest.raises(TypeError): - self.conv(val.encode('ascii')) - - if len(val) != 1: - if self.exact_match: - self._check_value_error(val[:1]) - self._check_value_error(val + '\0') - else: - self._check_conv_assert_warn(val[:1], expected) - - if self.case_insensitive: - if val != val.lower(): - self._check_conv_assert_warn(val.lower(), expected) - if val != val.upper(): - self._check_conv_assert_warn(val.upper(), expected) - else: - if val != val.lower(): - self._check_value_error(val.lower()) - if val != val.upper(): - self._check_value_error(val.upper()) - - def test_wrong_type(self): - # common cases which apply to all the below - with pytest.raises(TypeError): - self.conv({}) - with pytest.raises(TypeError): - self.conv([]) - - def test_wrong_value(self): - # nonsense strings - self._check_value_error('') - self._check_value_error('\N{greek small letter pi}') - - if self.allow_bytes: - self._check_value_error(b'') - # bytes which can't be converted to strings via utf8 - self._check_value_error(b"\xFF") - if self.exact_match: - self._check_value_error("there's no way this is supported") - - -class TestByteorderConverter(StringConverterTestCase): - """ Tests of PyArray_ByteorderConverter """ - conv = mt.run_byteorder_converter - warn = False - - def test_valid(self): - for s in ['big', '>']: - self._check(s, 'NPY_BIG') - for s in ['little', '<']: - self._check(s, 'NPY_LITTLE') - for s in ['native', '=']: - self._check(s, 'NPY_NATIVE') - for s in ['ignore', '|']: - self._check(s, 'NPY_IGNORE') - for s in ['swap']: - self._check(s, 'NPY_SWAP') - - -class TestSortkindConverter(StringConverterTestCase): - """ Tests of PyArray_SortkindConverter """ - conv = mt.run_sortkind_converter - warn = False - - def test_valid(self): - self._check('quicksort', 'NPY_QUICKSORT') - self._check('heapsort', 'NPY_HEAPSORT') - self._check('mergesort', 'NPY_STABLESORT') # alias - self._check('stable', 'NPY_STABLESORT') - - -class TestSelectkindConverter(StringConverterTestCase): - """ Tests of PyArray_SelectkindConverter """ - conv = mt.run_selectkind_converter - case_insensitive = False - exact_match = True - - def test_valid(self): - self._check('introselect', 'NPY_INTROSELECT') - - -class TestSearchsideConverter(StringConverterTestCase): - """ Tests of PyArray_SearchsideConverter """ - conv = mt.run_searchside_converter - def test_valid(self): - self._check('left', 'NPY_SEARCHLEFT') - self._check('right', 'NPY_SEARCHRIGHT') - - -class TestOrderConverter(StringConverterTestCase): - """ Tests of PyArray_OrderConverter """ - conv = mt.run_order_converter - warn = False - - def test_valid(self): - self._check('c', 'NPY_CORDER') - self._check('f', 'NPY_FORTRANORDER') - self._check('a', 'NPY_ANYORDER') - self._check('k', 'NPY_KEEPORDER') - - def test_flatten_invalid_order(self): - # invalid after gh-14596 - with pytest.raises(ValueError): - self.conv('Z') - for order in [False, True, 0, 8]: - with pytest.raises(TypeError): - self.conv(order) - - -class TestClipmodeConverter(StringConverterTestCase): - """ Tests of PyArray_ClipmodeConverter """ - conv = mt.run_clipmode_converter - def test_valid(self): - self._check('clip', 'NPY_CLIP') - self._check('wrap', 'NPY_WRAP') - self._check('raise', 'NPY_RAISE') - - # integer values allowed here - assert self.conv(np.CLIP) == 'NPY_CLIP' - assert self.conv(np.WRAP) == 'NPY_WRAP' - assert self.conv(np.RAISE) == 'NPY_RAISE' - - -class TestCastingConverter(StringConverterTestCase): - """ Tests of PyArray_CastingConverter """ - conv = mt.run_casting_converter - case_insensitive = False - exact_match = True - - def test_valid(self): - self._check("no", "NPY_NO_CASTING") - self._check("equiv", "NPY_EQUIV_CASTING") - self._check("safe", "NPY_SAFE_CASTING") - self._check("same_kind", "NPY_SAME_KIND_CASTING") - self._check("unsafe", "NPY_UNSAFE_CASTING") - - -class TestIntpConverter: - """ Tests of PyArray_IntpConverter """ - conv = mt.run_intp_converter - - def test_basic(self): - assert self.conv(1) == (1,) - assert self.conv((1, 2)) == (1, 2) - assert self.conv([1, 2]) == (1, 2) - assert self.conv(()) == () - - def test_none(self): - # once the warning expires, this will raise TypeError - with pytest.warns(DeprecationWarning): - assert self.conv(None) == () - - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") - def test_float(self): - with pytest.raises(TypeError): - self.conv(1.0) - with pytest.raises(TypeError): - self.conv([1, 1.0]) - - def test_too_large(self): - with pytest.raises(ValueError): - self.conv(2**64) - - def test_too_many_dims(self): - assert self.conv([1]*32) == (1,)*32 - with pytest.raises(ValueError): - self.conv([1]*33) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py deleted file mode 100644 index bb4ad563b2a5398039e7e569408545c3e1d2fa4e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from distutils.command.install_headers import install_headers as old_install_headers - -class install_headers (old_install_headers): - - def run (self): - headers = self.distribution.headers - if not headers: - return - - prefix = os.path.dirname(self.install_dir) - for header in headers: - if isinstance(header, tuple): - # Kind of a hack, but I don't know where else to change this... - if header[0] == 'numpy.core': - header = ('numpy', header[1]) - if os.path.splitext(header[1])[1] == '.inc': - continue - d = os.path.join(*([prefix]+header[0].split('.'))) - header = header[1] - else: - d = self.install_dir - self.mkpath(d) - (out, _) = self.copy_file(header, d) - self.outfiles.append(out) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/category.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/category.py deleted file mode 100644 index e189d9216d5e3d2c12ed3becdf21c08b9cfe1ae4..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/indexes/category.py +++ /dev/null @@ -1,522 +0,0 @@ -from __future__ import annotations - -from typing import ( - TYPE_CHECKING, - Any, - Literal, - cast, -) - -import numpy as np - -from pandas._libs import index as libindex -from pandas.util._decorators import ( - cache_readonly, - doc, -) - -from pandas.core.dtypes.common import is_scalar -from pandas.core.dtypes.concat import concat_compat -from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas.core.dtypes.missing import ( - is_valid_na_for_dtype, - isna, - notna, -) - -from pandas.core.arrays.categorical import ( - Categorical, - contains, -) -from pandas.core.construction import extract_array -from pandas.core.indexes.base import ( - Index, - maybe_extract_name, -) -from pandas.core.indexes.extension import ( - NDArrayBackedExtensionIndex, - inherit_names, -) - -from pandas.io.formats.printing import pprint_thing - -if TYPE_CHECKING: - from collections.abc import Hashable - - from pandas._typing import ( - Dtype, - DtypeObj, - npt, - ) - - -@inherit_names( - [ - "argsort", - "tolist", - "codes", - "categories", - "ordered", - "_reverse_indexer", - "searchsorted", - "min", - "max", - ], - Categorical, -) -@inherit_names( - [ - "rename_categories", - "reorder_categories", - "add_categories", - "remove_categories", - "remove_unused_categories", - "set_categories", - "as_ordered", - "as_unordered", - ], - Categorical, - wrap=True, -) -class CategoricalIndex(NDArrayBackedExtensionIndex): - """ - Index based on an underlying :class:`Categorical`. - - CategoricalIndex, like Categorical, can only take on a limited, - and usually fixed, number of possible values (`categories`). Also, - like Categorical, it might have an order, but numerical operations - (additions, divisions, ...) are not possible. - - Parameters - ---------- - data : array-like (1-dimensional) - The values of the categorical. If `categories` are given, values not in - `categories` will be replaced with NaN. - categories : index-like, optional - The categories for the categorical. Items need to be unique. - If the categories are not given here (and also not in `dtype`), they - will be inferred from the `data`. - ordered : bool, optional - Whether or not this categorical is treated as an ordered - categorical. If not given here or in `dtype`, the resulting - categorical will be unordered. - dtype : CategoricalDtype or "category", optional - If :class:`CategoricalDtype`, cannot be used together with - `categories` or `ordered`. - copy : bool, default False - Make a copy of input ndarray. - name : object, optional - Name to be stored in the index. - - Attributes - ---------- - codes - categories - ordered - - Methods - ------- - rename_categories - reorder_categories - add_categories - remove_categories - remove_unused_categories - set_categories - as_ordered - as_unordered - map - - Raises - ------ - ValueError - If the categories do not validate. - TypeError - If an explicit ``ordered=True`` is given but no `categories` and the - `values` are not sortable. - - See Also - -------- - Index : The base pandas Index type. - Categorical : A categorical array. - CategoricalDtype : Type for categorical data. - - Notes - ----- - See the `user guide - `__ - for more. - - Examples - -------- - >>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"]) - CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], - categories=['a', 'b', 'c'], ordered=False, dtype='category') - - ``CategoricalIndex`` can also be instantiated from a ``Categorical``: - - >>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"]) - >>> pd.CategoricalIndex(c) - CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], - categories=['a', 'b', 'c'], ordered=False, dtype='category') - - Ordered ``CategoricalIndex`` can have a min and max value. - - >>> ci = pd.CategoricalIndex( - ... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"] - ... ) - >>> ci - CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], - categories=['c', 'b', 'a'], ordered=True, dtype='category') - >>> ci.min() - 'c' - """ - - _typ = "categoricalindex" - _data_cls = Categorical - - @property - def _can_hold_strings(self): - return self.categories._can_hold_strings - - @cache_readonly - def _should_fallback_to_positional(self) -> bool: - return self.categories._should_fallback_to_positional - - codes: np.ndarray - categories: Index - ordered: bool | None - _data: Categorical - _values: Categorical - - @property - def _engine_type(self) -> type[libindex.IndexEngine]: - # self.codes can have dtype int8, int16, int32 or int64, so we need - # to return the corresponding engine type (libindex.Int8Engine, etc.). - return { - np.int8: libindex.Int8Engine, - np.int16: libindex.Int16Engine, - np.int32: libindex.Int32Engine, - np.int64: libindex.Int64Engine, - }[self.codes.dtype.type] - - # -------------------------------------------------------------------- - # Constructors - - def __new__( - cls, - data=None, - categories=None, - ordered=None, - dtype: Dtype | None = None, - copy: bool = False, - name: Hashable | None = None, - ) -> CategoricalIndex: - name = maybe_extract_name(name, data, cls) - - if is_scalar(data): - # GH#38944 include None here, which pre-2.0 subbed in [] - cls._raise_scalar_data_error(data) - - data = Categorical( - data, categories=categories, ordered=ordered, dtype=dtype, copy=copy - ) - - return cls._simple_new(data, name=name) - - # -------------------------------------------------------------------- - - def _is_dtype_compat(self, other: Index) -> Categorical: - """ - *this is an internal non-public method* - - provide a comparison between the dtype of self and other (coercing if - needed) - - Parameters - ---------- - other : Index - - Returns - ------- - Categorical - - Raises - ------ - TypeError if the dtypes are not compatible - """ - if isinstance(other.dtype, CategoricalDtype): - cat = extract_array(other) - cat = cast(Categorical, cat) - if not cat._categories_match_up_to_permutation(self._values): - raise TypeError( - "categories must match existing categories when appending" - ) - - elif other._is_multi: - # preempt raising NotImplementedError in isna call - raise TypeError("MultiIndex is not dtype-compatible with CategoricalIndex") - else: - values = other - - cat = Categorical(other, dtype=self.dtype) - other = CategoricalIndex(cat) - if not other.isin(values).all(): - raise TypeError( - "cannot append a non-category item to a CategoricalIndex" - ) - cat = other._values - - if not ((cat == values) | (isna(cat) & isna(values))).all(): - # GH#37667 see test_equals_non_category - raise TypeError( - "categories must match existing categories when appending" - ) - - return cat - - def equals(self, other: object) -> bool: - """ - Determine if two CategoricalIndex objects contain the same elements. - - Returns - ------- - bool - ``True`` if two :class:`pandas.CategoricalIndex` objects have equal - elements, ``False`` otherwise. - - Examples - -------- - >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c']) - >>> ci2 = pd.CategoricalIndex(pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])) - >>> ci.equals(ci2) - True - - The order of elements matters. - - >>> ci3 = pd.CategoricalIndex(['c', 'b', 'a', 'a', 'b', 'c']) - >>> ci.equals(ci3) - False - - The orderedness also matters. - - >>> ci4 = ci.as_ordered() - >>> ci.equals(ci4) - False - - The categories matter, but the order of the categories matters only when - ``ordered=True``. - - >>> ci5 = ci.set_categories(['a', 'b', 'c', 'd']) - >>> ci.equals(ci5) - False - - >>> ci6 = ci.set_categories(['b', 'c', 'a']) - >>> ci.equals(ci6) - True - >>> ci_ordered = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], - ... ordered=True) - >>> ci2_ordered = ci_ordered.set_categories(['b', 'c', 'a']) - >>> ci_ordered.equals(ci2_ordered) - False - """ - if self.is_(other): - return True - - if not isinstance(other, Index): - return False - - try: - other = self._is_dtype_compat(other) - except (TypeError, ValueError): - return False - - return self._data.equals(other) - - # -------------------------------------------------------------------- - # Rendering Methods - - @property - def _formatter_func(self): - return self.categories._formatter_func - - def _format_attrs(self): - """ - Return a list of tuples of the (attr,formatted_value) - """ - attrs: list[tuple[str, str | int | bool | None]] - - attrs = [ - ( - "categories", - f"[{', '.join(self._data._repr_categories())}]", - ), - ("ordered", self.ordered), - ] - extra = super()._format_attrs() - return attrs + extra - - def _format_with_header(self, header: list[str], na_rep: str) -> list[str]: - result = [ - pprint_thing(x, escape_chars=("\t", "\r", "\n")) if notna(x) else na_rep - for x in self._values - ] - return header + result - - # -------------------------------------------------------------------- - - @property - def inferred_type(self) -> str: - return "categorical" - - @doc(Index.__contains__) - def __contains__(self, key: Any) -> bool: - # if key is a NaN, check if any NaN is in self. - if is_valid_na_for_dtype(key, self.categories.dtype): - return self.hasnans - - return contains(self, key, container=self._engine) - - def reindex( - self, target, method=None, level=None, limit: int | None = None, tolerance=None - ) -> tuple[Index, npt.NDArray[np.intp] | None]: - """ - Create index with target's values (move/add/delete values as necessary) - - Returns - ------- - new_index : pd.Index - Resulting index - indexer : np.ndarray[np.intp] or None - Indices of output values in original index - - """ - if method is not None: - raise NotImplementedError( - "argument method is not implemented for CategoricalIndex.reindex" - ) - if level is not None: - raise NotImplementedError( - "argument level is not implemented for CategoricalIndex.reindex" - ) - if limit is not None: - raise NotImplementedError( - "argument limit is not implemented for CategoricalIndex.reindex" - ) - return super().reindex(target) - - # -------------------------------------------------------------------- - # Indexing Methods - - def _maybe_cast_indexer(self, key) -> int: - # GH#41933: we have to do this instead of self._data._validate_scalar - # because this will correctly get partial-indexing on Interval categories - try: - return self._data._unbox_scalar(key) - except KeyError: - if is_valid_na_for_dtype(key, self.categories.dtype): - return -1 - raise - - def _maybe_cast_listlike_indexer(self, values) -> CategoricalIndex: - if isinstance(values, CategoricalIndex): - values = values._data - if isinstance(values, Categorical): - # Indexing on codes is more efficient if categories are the same, - # so we can apply some optimizations based on the degree of - # dtype-matching. - cat = self._data._encode_with_my_categories(values) - codes = cat._codes - else: - codes = self.categories.get_indexer(values) - codes = codes.astype(self.codes.dtype, copy=False) - cat = self._data._from_backing_data(codes) - return type(self)._simple_new(cat) - - # -------------------------------------------------------------------- - - def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: - return self.categories._is_comparable_dtype(dtype) - - def map(self, mapper, na_action: Literal["ignore"] | None = None): - """ - Map values using input an input mapping or function. - - Maps the values (their categories, not the codes) of the index to new - categories. If the mapping correspondence is one-to-one the result is a - :class:`~pandas.CategoricalIndex` which has the same order property as - the original, otherwise an :class:`~pandas.Index` is returned. - - If a `dict` or :class:`~pandas.Series` is used any unmapped category is - mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` - will be returned. - - Parameters - ---------- - mapper : function, dict, or Series - Mapping correspondence. - - Returns - ------- - pandas.CategoricalIndex or pandas.Index - Mapped index. - - See Also - -------- - Index.map : Apply a mapping correspondence on an - :class:`~pandas.Index`. - Series.map : Apply a mapping correspondence on a - :class:`~pandas.Series`. - Series.apply : Apply more complex functions on a - :class:`~pandas.Series`. - - Examples - -------- - >>> idx = pd.CategoricalIndex(['a', 'b', 'c']) - >>> idx - CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'], - ordered=False, dtype='category') - >>> idx.map(lambda x: x.upper()) - CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'], - ordered=False, dtype='category') - >>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'}) - CategoricalIndex(['first', 'second', 'third'], categories=['first', - 'second', 'third'], ordered=False, dtype='category') - - If the mapping is one-to-one the ordering of the categories is - preserved: - - >>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True) - >>> idx - CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'], - ordered=True, dtype='category') - >>> idx.map({'a': 3, 'b': 2, 'c': 1}) - CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True, - dtype='category') - - If the mapping is not one-to-one an :class:`~pandas.Index` is returned: - - >>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'}) - Index(['first', 'second', 'first'], dtype='object') - - If a `dict` is used, all unmapped categories are mapped to `NaN` and - the result is an :class:`~pandas.Index`: - - >>> idx.map({'a': 'first', 'b': 'second'}) - Index(['first', 'second', nan], dtype='object') - """ - mapped = self._values.map(mapper, na_action=na_action) - return Index(mapped, name=self.name) - - def _concat(self, to_concat: list[Index], name: Hashable) -> Index: - # if calling index is category, don't check dtype of others - try: - cat = Categorical._concat_same_type( - [self._is_dtype_compat(c) for c in to_concat] - ) - except TypeError: - # not all to_concat elements are among our categories (or NA) - - res = concat_compat([x._values for x in to_concat]) - return Index(res, name=name) - else: - return type(self)._simple_new(cat, name=name) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/rich/padding.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/rich/padding.py deleted file mode 100644 index 1b2204f59f2ce4d9c8f2cca85326e4d81f8805bb..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/rich/padding.py +++ /dev/null @@ -1,141 +0,0 @@ -from typing import cast, List, Optional, Tuple, TYPE_CHECKING, Union - -if TYPE_CHECKING: - from .console import ( - Console, - ConsoleOptions, - RenderableType, - RenderResult, - ) -from .jupyter import JupyterMixin -from .measure import Measurement -from .style import Style -from .segment import Segment - - -PaddingDimensions = Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int, int]] - - -class Padding(JupyterMixin): - """Draw space around content. - - Example: - >>> print(Padding("Hello", (2, 4), style="on blue")) - - Args: - renderable (RenderableType): String or other renderable. - pad (Union[int, Tuple[int]]): Padding for top, right, bottom, and left borders. - May be specified with 1, 2, or 4 integers (CSS style). - style (Union[str, Style], optional): Style for padding characters. Defaults to "none". - expand (bool, optional): Expand padding to fit available width. Defaults to True. - """ - - def __init__( - self, - renderable: "RenderableType", - pad: "PaddingDimensions" = (0, 0, 0, 0), - *, - style: Union[str, Style] = "none", - expand: bool = True, - ): - self.renderable = renderable - self.top, self.right, self.bottom, self.left = self.unpack(pad) - self.style = style - self.expand = expand - - @classmethod - def indent(cls, renderable: "RenderableType", level: int) -> "Padding": - """Make padding instance to render an indent. - - Args: - renderable (RenderableType): String or other renderable. - level (int): Number of characters to indent. - - Returns: - Padding: A Padding instance. - """ - - return Padding(renderable, pad=(0, 0, 0, level), expand=False) - - @staticmethod - def unpack(pad: "PaddingDimensions") -> Tuple[int, int, int, int]: - """Unpack padding specified in CSS style.""" - if isinstance(pad, int): - return (pad, pad, pad, pad) - if len(pad) == 1: - _pad = pad[0] - return (_pad, _pad, _pad, _pad) - if len(pad) == 2: - pad_top, pad_right = cast(Tuple[int, int], pad) - return (pad_top, pad_right, pad_top, pad_right) - if len(pad) == 4: - top, right, bottom, left = cast(Tuple[int, int, int, int], pad) - return (top, right, bottom, left) - raise ValueError(f"1, 2 or 4 integers required for padding; {len(pad)} given") - - def __repr__(self) -> str: - return f"Padding({self.renderable!r}, ({self.top},{self.right},{self.bottom},{self.left}))" - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - style = console.get_style(self.style) - if self.expand: - width = options.max_width - else: - width = min( - Measurement.get(console, options, self.renderable).maximum - + self.left - + self.right, - options.max_width, - ) - render_options = options.update_width(width - self.left - self.right) - if render_options.height is not None: - render_options = render_options.update_height( - height=render_options.height - self.top - self.bottom - ) - lines = console.render_lines( - self.renderable, render_options, style=style, pad=True - ) - _Segment = Segment - - left = _Segment(" " * self.left, style) if self.left else None - right = ( - [_Segment(f'{" " * self.right}', style), _Segment.line()] - if self.right - else [_Segment.line()] - ) - blank_line: Optional[List[Segment]] = None - if self.top: - blank_line = [_Segment(f'{" " * width}\n', style)] - yield from blank_line * self.top - if left: - for line in lines: - yield left - yield from line - yield from right - else: - for line in lines: - yield from line - yield from right - if self.bottom: - blank_line = blank_line or [_Segment(f'{" " * width}\n', style)] - yield from blank_line * self.bottom - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> "Measurement": - max_width = options.max_width - extra_width = self.left + self.right - if max_width - extra_width < 1: - return Measurement(max_width, max_width) - measure_min, measure_max = Measurement.get(console, options, self.renderable) - measurement = Measurement(measure_min + extra_width, measure_max + extra_width) - measurement = measurement.with_maximum(max_width) - return measurement - - -if __name__ == "__main__": # pragma: no cover - from pip._vendor.rich import print - - print(Padding("Hello, World", (2, 4), style="on blue")) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/websockets/legacy/server.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/websockets/legacy/server.py deleted file mode 100644 index 25d5a71444f1f009a72ee7d79f4f3bd398a4e64e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/websockets/legacy/server.py +++ /dev/null @@ -1,1196 +0,0 @@ -from __future__ import annotations - -import asyncio -import email.utils -import functools -import http -import inspect -import logging -import socket -import warnings -from types import TracebackType -from typing import ( - Any, - Awaitable, - Callable, - Generator, - Iterable, - List, - Optional, - Sequence, - Set, - Tuple, - Type, - Union, - cast, -) - -from ..datastructures import Headers, HeadersLike, MultipleValuesError -from ..exceptions import ( - AbortHandshake, - InvalidHandshake, - InvalidHeader, - InvalidMessage, - InvalidOrigin, - InvalidUpgrade, - NegotiationError, -) -from ..extensions import Extension, ServerExtensionFactory -from ..extensions.permessage_deflate import enable_server_permessage_deflate -from ..headers import ( - build_extension, - parse_extension, - parse_subprotocol, - validate_subprotocols, -) -from ..http import USER_AGENT -from ..protocol import State -from ..typing import ExtensionHeader, LoggerLike, Origin, Subprotocol -from .compatibility import asyncio_timeout, loop_if_py_lt_38 -from .handshake import build_response, check_request -from .http import read_request -from .protocol import WebSocketCommonProtocol - - -__all__ = ["serve", "unix_serve", "WebSocketServerProtocol", "WebSocketServer"] - - -HeadersLikeOrCallable = Union[HeadersLike, Callable[[str, Headers], HeadersLike]] - -HTTPResponse = Tuple[http.HTTPStatus, HeadersLike, bytes] - - -class WebSocketServerProtocol(WebSocketCommonProtocol): - """ - WebSocket server connection. - - :class:`WebSocketServerProtocol` provides :meth:`recv` and :meth:`send` - coroutines for receiving and sending messages. - - It supports asynchronous iteration to receive messages:: - - async for message in websocket: - await process(message) - - The iterator exits normally when the connection is closed with close code - 1000 (OK) or 1001 (going away) or without a close code. It raises - a :exc:`~websockets.exceptions.ConnectionClosedError` when the connection - is closed with any other code. - - You may customize the opening handshake in a subclass by - overriding :meth:`process_request` or :meth:`select_subprotocol`. - - Args: - ws_server: WebSocket server that created this connection. - - See :func:`serve` for the documentation of ``ws_handler``, ``logger``, ``origins``, - ``extensions``, ``subprotocols``, ``extra_headers``, and ``server_header``. - - See :class:`~websockets.legacy.protocol.WebSocketCommonProtocol` for the - documentation of ``ping_interval``, ``ping_timeout``, ``close_timeout``, - ``max_size``, ``max_queue``, ``read_limit``, and ``write_limit``. - - """ - - is_client = False - side = "server" - - def __init__( - self, - ws_handler: Union[ - Callable[[WebSocketServerProtocol], Awaitable[Any]], - Callable[[WebSocketServerProtocol, str], Awaitable[Any]], # deprecated - ], - ws_server: WebSocketServer, - *, - logger: Optional[LoggerLike] = None, - origins: Optional[Sequence[Optional[Origin]]] = None, - extensions: Optional[Sequence[ServerExtensionFactory]] = None, - subprotocols: Optional[Sequence[Subprotocol]] = None, - extra_headers: Optional[HeadersLikeOrCallable] = None, - server_header: Optional[str] = USER_AGENT, - process_request: Optional[ - Callable[[str, Headers], Awaitable[Optional[HTTPResponse]]] - ] = None, - select_subprotocol: Optional[ - Callable[[Sequence[Subprotocol], Sequence[Subprotocol]], Subprotocol] - ] = None, - open_timeout: Optional[float] = 10, - **kwargs: Any, - ) -> None: - if logger is None: - logger = logging.getLogger("websockets.server") - super().__init__(logger=logger, **kwargs) - # For backwards compatibility with 6.0 or earlier. - if origins is not None and "" in origins: - warnings.warn("use None instead of '' in origins", DeprecationWarning) - origins = [None if origin == "" else origin for origin in origins] - # For backwards compatibility with 10.0 or earlier. Done here in - # addition to serve to trigger the deprecation warning on direct - # use of WebSocketServerProtocol. - self.ws_handler = remove_path_argument(ws_handler) - self.ws_server = ws_server - self.origins = origins - self.available_extensions = extensions - self.available_subprotocols = subprotocols - self.extra_headers = extra_headers - self.server_header = server_header - self._process_request = process_request - self._select_subprotocol = select_subprotocol - self.open_timeout = open_timeout - - def connection_made(self, transport: asyncio.BaseTransport) -> None: - """ - Register connection and initialize a task to handle it. - - """ - super().connection_made(transport) - # Register the connection with the server before creating the handler - # task. Registering at the beginning of the handler coroutine would - # create a race condition between the creation of the task, which - # schedules its execution, and the moment the handler starts running. - self.ws_server.register(self) - self.handler_task = self.loop.create_task(self.handler()) - - async def handler(self) -> None: - """ - Handle the lifecycle of a WebSocket connection. - - Since this method doesn't have a caller able to handle exceptions, it - attempts to log relevant ones and guarantees that the TCP connection is - closed before exiting. - - """ - try: - try: - async with asyncio_timeout(self.open_timeout): - await self.handshake( - origins=self.origins, - available_extensions=self.available_extensions, - available_subprotocols=self.available_subprotocols, - extra_headers=self.extra_headers, - ) - # Remove this branch when dropping support for Python < 3.8 - # because CancelledError no longer inherits Exception. - except asyncio.CancelledError: # pragma: no cover - raise - except asyncio.TimeoutError: # pragma: no cover - raise - except ConnectionError: - raise - except Exception as exc: - if isinstance(exc, AbortHandshake): - status, headers, body = exc.status, exc.headers, exc.body - elif isinstance(exc, InvalidOrigin): - if self.debug: - self.logger.debug("! invalid origin", exc_info=True) - status, headers, body = ( - http.HTTPStatus.FORBIDDEN, - Headers(), - f"Failed to open a WebSocket connection: {exc}.\n".encode(), - ) - elif isinstance(exc, InvalidUpgrade): - if self.debug: - self.logger.debug("! invalid upgrade", exc_info=True) - status, headers, body = ( - http.HTTPStatus.UPGRADE_REQUIRED, - Headers([("Upgrade", "websocket")]), - ( - f"Failed to open a WebSocket connection: {exc}.\n" - f"\n" - f"You cannot access a WebSocket server directly " - f"with a browser. You need a WebSocket client.\n" - ).encode(), - ) - elif isinstance(exc, InvalidHandshake): - if self.debug: - self.logger.debug("! invalid handshake", exc_info=True) - status, headers, body = ( - http.HTTPStatus.BAD_REQUEST, - Headers(), - f"Failed to open a WebSocket connection: {exc}.\n".encode(), - ) - else: - self.logger.error("opening handshake failed", exc_info=True) - status, headers, body = ( - http.HTTPStatus.INTERNAL_SERVER_ERROR, - Headers(), - ( - b"Failed to open a WebSocket connection.\n" - b"See server log for more information.\n" - ), - ) - - headers.setdefault("Date", email.utils.formatdate(usegmt=True)) - if self.server_header is not None: - headers.setdefault("Server", self.server_header) - - headers.setdefault("Content-Length", str(len(body))) - headers.setdefault("Content-Type", "text/plain") - headers.setdefault("Connection", "close") - - self.write_http_response(status, headers, body) - self.logger.info( - "connection failed (%d %s)", status.value, status.phrase - ) - await self.close_transport() - return - - try: - await self.ws_handler(self) - except Exception: - self.logger.error("connection handler failed", exc_info=True) - if not self.closed: - self.fail_connection(1011) - raise - - try: - await self.close() - except ConnectionError: - raise - except Exception: - self.logger.error("closing handshake failed", exc_info=True) - raise - - except Exception: - # Last-ditch attempt to avoid leaking connections on errors. - try: - self.transport.close() - except Exception: # pragma: no cover - pass - - finally: - # Unregister the connection with the server when the handler task - # terminates. Registration is tied to the lifecycle of the handler - # task because the server waits for tasks attached to registered - # connections before terminating. - self.ws_server.unregister(self) - self.logger.info("connection closed") - - async def read_http_request(self) -> Tuple[str, Headers]: - """ - Read request line and headers from the HTTP request. - - If the request contains a body, it may be read from ``self.reader`` - after this coroutine returns. - - Raises: - InvalidMessage: if the HTTP message is malformed or isn't an - HTTP/1.1 GET request. - - """ - try: - path, headers = await read_request(self.reader) - except asyncio.CancelledError: # pragma: no cover - raise - except Exception as exc: - raise InvalidMessage("did not receive a valid HTTP request") from exc - - if self.debug: - self.logger.debug("< GET %s HTTP/1.1", path) - for key, value in headers.raw_items(): - self.logger.debug("< %s: %s", key, value) - - self.path = path - self.request_headers = headers - - return path, headers - - def write_http_response( - self, status: http.HTTPStatus, headers: Headers, body: Optional[bytes] = None - ) -> None: - """ - Write status line and headers to the HTTP response. - - This coroutine is also able to write a response body. - - """ - self.response_headers = headers - - if self.debug: - self.logger.debug("> HTTP/1.1 %d %s", status.value, status.phrase) - for key, value in headers.raw_items(): - self.logger.debug("> %s: %s", key, value) - if body is not None: - self.logger.debug("> [body] (%d bytes)", len(body)) - - # Since the status line and headers only contain ASCII characters, - # we can keep this simple. - response = f"HTTP/1.1 {status.value} {status.phrase}\r\n" - response += str(headers) - - self.transport.write(response.encode()) - - if body is not None: - self.transport.write(body) - - async def process_request( - self, path: str, request_headers: Headers - ) -> Optional[HTTPResponse]: - """ - Intercept the HTTP request and return an HTTP response if appropriate. - - You may override this method in a :class:`WebSocketServerProtocol` - subclass, for example: - - * to return an HTTP 200 OK response on a given path; then a load - balancer can use this path for a health check; - * to authenticate the request and return an HTTP 401 Unauthorized or an - HTTP 403 Forbidden when authentication fails. - - You may also override this method with the ``process_request`` - argument of :func:`serve` and :class:`WebSocketServerProtocol`. This - is equivalent, except ``process_request`` won't have access to the - protocol instance, so it can't store information for later use. - - :meth:`process_request` is expected to complete quickly. If it may run - for a long time, then it should await :meth:`wait_closed` and exit if - :meth:`wait_closed` completes, or else it could prevent the server - from shutting down. - - Args: - path: request path, including optional query string. - request_headers: request headers. - - Returns: - Optional[Tuple[http.HTTPStatus, HeadersLike, bytes]]: :obj:`None` - to continue the WebSocket handshake normally. - - An HTTP response, represented by a 3-uple of the response status, - headers, and body, to abort the WebSocket handshake and return - that HTTP response instead. - - """ - if self._process_request is not None: - response = self._process_request(path, request_headers) - if isinstance(response, Awaitable): - return await response - else: - # For backwards compatibility with 7.0. - warnings.warn( - "declare process_request as a coroutine", DeprecationWarning - ) - return response - return None - - @staticmethod - def process_origin( - headers: Headers, origins: Optional[Sequence[Optional[Origin]]] = None - ) -> Optional[Origin]: - """ - Handle the Origin HTTP request header. - - Args: - headers: request headers. - origins: optional list of acceptable origins. - - Raises: - InvalidOrigin: if the origin isn't acceptable. - - """ - # "The user agent MUST NOT include more than one Origin header field" - # per https://www.rfc-editor.org/rfc/rfc6454.html#section-7.3. - try: - origin = cast(Optional[Origin], headers.get("Origin")) - except MultipleValuesError as exc: - raise InvalidHeader("Origin", "more than one Origin header found") from exc - if origins is not None: - if origin not in origins: - raise InvalidOrigin(origin) - return origin - - @staticmethod - def process_extensions( - headers: Headers, - available_extensions: Optional[Sequence[ServerExtensionFactory]], - ) -> Tuple[Optional[str], List[Extension]]: - """ - Handle the Sec-WebSocket-Extensions HTTP request header. - - Accept or reject each extension proposed in the client request. - Negotiate parameters for accepted extensions. - - Return the Sec-WebSocket-Extensions HTTP response header and the list - of accepted extensions. - - :rfc:`6455` leaves the rules up to the specification of each - :extension. - - To provide this level of flexibility, for each extension proposed by - the client, we check for a match with each extension available in the - server configuration. If no match is found, the extension is ignored. - - If several variants of the same extension are proposed by the client, - it may be accepted several times, which won't make sense in general. - Extensions must implement their own requirements. For this purpose, - the list of previously accepted extensions is provided. - - This process doesn't allow the server to reorder extensions. It can - only select a subset of the extensions proposed by the client. - - Other requirements, for example related to mandatory extensions or the - order of extensions, may be implemented by overriding this method. - - Args: - headers: request headers. - extensions: optional list of supported extensions. - - Raises: - InvalidHandshake: to abort the handshake with an HTTP 400 error. - - """ - response_header_value: Optional[str] = None - - extension_headers: List[ExtensionHeader] = [] - accepted_extensions: List[Extension] = [] - - header_values = headers.get_all("Sec-WebSocket-Extensions") - - if header_values and available_extensions: - parsed_header_values: List[ExtensionHeader] = sum( - [parse_extension(header_value) for header_value in header_values], [] - ) - - for name, request_params in parsed_header_values: - for ext_factory in available_extensions: - # Skip non-matching extensions based on their name. - if ext_factory.name != name: - continue - - # Skip non-matching extensions based on their params. - try: - response_params, extension = ext_factory.process_request_params( - request_params, accepted_extensions - ) - except NegotiationError: - continue - - # Add matching extension to the final list. - extension_headers.append((name, response_params)) - accepted_extensions.append(extension) - - # Break out of the loop once we have a match. - break - - # If we didn't break from the loop, no extension in our list - # matched what the client sent. The extension is declined. - - # Serialize extension header. - if extension_headers: - response_header_value = build_extension(extension_headers) - - return response_header_value, accepted_extensions - - # Not @staticmethod because it calls self.select_subprotocol() - def process_subprotocol( - self, headers: Headers, available_subprotocols: Optional[Sequence[Subprotocol]] - ) -> Optional[Subprotocol]: - """ - Handle the Sec-WebSocket-Protocol HTTP request header. - - Return Sec-WebSocket-Protocol HTTP response header, which is the same - as the selected subprotocol. - - Args: - headers: request headers. - available_subprotocols: optional list of supported subprotocols. - - Raises: - InvalidHandshake: to abort the handshake with an HTTP 400 error. - - """ - subprotocol: Optional[Subprotocol] = None - - header_values = headers.get_all("Sec-WebSocket-Protocol") - - if header_values and available_subprotocols: - parsed_header_values: List[Subprotocol] = sum( - [parse_subprotocol(header_value) for header_value in header_values], [] - ) - - subprotocol = self.select_subprotocol( - parsed_header_values, available_subprotocols - ) - - return subprotocol - - def select_subprotocol( - self, - client_subprotocols: Sequence[Subprotocol], - server_subprotocols: Sequence[Subprotocol], - ) -> Optional[Subprotocol]: - """ - Pick a subprotocol among those supported by the client and the server. - - If several subprotocols are available, select the preferred subprotocol - by giving equal weight to the preferences of the client and the server. - - If no subprotocol is available, proceed without a subprotocol. - - You may provide a ``select_subprotocol`` argument to :func:`serve` or - :class:`WebSocketServerProtocol` to override this logic. For example, - you could reject the handshake if the client doesn't support a - particular subprotocol, rather than accept the handshake without that - subprotocol. - - Args: - client_subprotocols: list of subprotocols offered by the client. - server_subprotocols: list of subprotocols available on the server. - - Returns: - Optional[Subprotocol]: Selected subprotocol, if a common subprotocol - was found. - - :obj:`None` to continue without a subprotocol. - - """ - if self._select_subprotocol is not None: - return self._select_subprotocol(client_subprotocols, server_subprotocols) - - subprotocols = set(client_subprotocols) & set(server_subprotocols) - if not subprotocols: - return None - return sorted( - subprotocols, - key=lambda p: client_subprotocols.index(p) + server_subprotocols.index(p), - )[0] - - async def handshake( - self, - origins: Optional[Sequence[Optional[Origin]]] = None, - available_extensions: Optional[Sequence[ServerExtensionFactory]] = None, - available_subprotocols: Optional[Sequence[Subprotocol]] = None, - extra_headers: Optional[HeadersLikeOrCallable] = None, - ) -> str: - """ - Perform the server side of the opening handshake. - - Args: - origins: list of acceptable values of the Origin HTTP header; - include :obj:`None` if the lack of an origin is acceptable. - extensions: list of supported extensions, in order in which they - should be tried. - subprotocols: list of supported subprotocols, in order of - decreasing preference. - extra_headers: arbitrary HTTP headers to add to the response when - the handshake succeeds. - - Returns: - str: path of the URI of the request. - - Raises: - InvalidHandshake: if the handshake fails. - - """ - path, request_headers = await self.read_http_request() - - # Hook for customizing request handling, for example checking - # authentication or treating some paths as plain HTTP endpoints. - early_response_awaitable = self.process_request(path, request_headers) - if isinstance(early_response_awaitable, Awaitable): - early_response = await early_response_awaitable - else: - # For backwards compatibility with 7.0. - warnings.warn("declare process_request as a coroutine", DeprecationWarning) - early_response = early_response_awaitable - - # The connection may drop while process_request is running. - if self.state is State.CLOSED: - # This subclass of ConnectionError is silently ignored in handler(). - raise BrokenPipeError("connection closed during opening handshake") - - # Change the response to a 503 error if the server is shutting down. - if not self.ws_server.is_serving(): - early_response = ( - http.HTTPStatus.SERVICE_UNAVAILABLE, - [], - b"Server is shutting down.\n", - ) - - if early_response is not None: - raise AbortHandshake(*early_response) - - key = check_request(request_headers) - - self.origin = self.process_origin(request_headers, origins) - - extensions_header, self.extensions = self.process_extensions( - request_headers, available_extensions - ) - - protocol_header = self.subprotocol = self.process_subprotocol( - request_headers, available_subprotocols - ) - - response_headers = Headers() - - build_response(response_headers, key) - - if extensions_header is not None: - response_headers["Sec-WebSocket-Extensions"] = extensions_header - - if protocol_header is not None: - response_headers["Sec-WebSocket-Protocol"] = protocol_header - - if callable(extra_headers): - extra_headers = extra_headers(path, self.request_headers) - if extra_headers is not None: - response_headers.update(extra_headers) - - response_headers.setdefault("Date", email.utils.formatdate(usegmt=True)) - if self.server_header is not None: - response_headers.setdefault("Server", self.server_header) - - self.write_http_response(http.HTTPStatus.SWITCHING_PROTOCOLS, response_headers) - - self.logger.info("connection open") - - self.connection_open() - - return path - - -class WebSocketServer: - """ - WebSocket server returned by :func:`serve`. - - This class provides the same interface as :class:`~asyncio.Server`, - notably the :meth:`~asyncio.Server.close` - and :meth:`~asyncio.Server.wait_closed` methods. - - It keeps track of WebSocket connections in order to close them properly - when shutting down. - - Args: - logger: Logger for this server. - It defaults to ``logging.getLogger("websockets.server")``. - See the :doc:`logging guide <../../topics/logging>` for details. - - """ - - def __init__(self, logger: Optional[LoggerLike] = None): - if logger is None: - logger = logging.getLogger("websockets.server") - self.logger = logger - - # Keep track of active connections. - self.websockets: Set[WebSocketServerProtocol] = set() - - # Task responsible for closing the server and terminating connections. - self.close_task: Optional[asyncio.Task[None]] = None - - # Completed when the server is closed and connections are terminated. - self.closed_waiter: asyncio.Future[None] - - def wrap(self, server: asyncio.base_events.Server) -> None: - """ - Attach to a given :class:`~asyncio.Server`. - - Since :meth:`~asyncio.loop.create_server` doesn't support injecting a - custom ``Server`` class, the easiest solution that doesn't rely on - private :mod:`asyncio` APIs is to: - - - instantiate a :class:`WebSocketServer` - - give the protocol factory a reference to that instance - - call :meth:`~asyncio.loop.create_server` with the factory - - attach the resulting :class:`~asyncio.Server` with this method - - """ - self.server = server - for sock in server.sockets: - if sock.family == socket.AF_INET: - name = "%s:%d" % sock.getsockname() - elif sock.family == socket.AF_INET6: - name = "[%s]:%d" % sock.getsockname()[:2] - elif sock.family == socket.AF_UNIX: - name = sock.getsockname() - # In the unlikely event that someone runs websockets over a - # protocol other than IP or Unix sockets, avoid crashing. - else: # pragma: no cover - name = str(sock.getsockname()) - self.logger.info("server listening on %s", name) - - # Initialized here because we need a reference to the event loop. - # This should be moved back to __init__ when dropping Python < 3.10. - self.closed_waiter = server.get_loop().create_future() - - def register(self, protocol: WebSocketServerProtocol) -> None: - """ - Register a connection with this server. - - """ - self.websockets.add(protocol) - - def unregister(self, protocol: WebSocketServerProtocol) -> None: - """ - Unregister a connection with this server. - - """ - self.websockets.remove(protocol) - - def close(self, close_connections: bool = True) -> None: - """ - Close the server. - - * Close the underlying :class:`~asyncio.Server`. - * When ``close_connections`` is :obj:`True`, which is the default, - close existing connections. Specifically: - - * Reject opening WebSocket connections with an HTTP 503 (service - unavailable) error. This happens when the server accepted the TCP - connection but didn't complete the opening handshake before closing. - * Close open WebSocket connections with close code 1001 (going away). - - * Wait until all connection handlers terminate. - - :meth:`close` is idempotent. - - """ - if self.close_task is None: - self.close_task = self.get_loop().create_task( - self._close(close_connections) - ) - - async def _close(self, close_connections: bool) -> None: - """ - Implementation of :meth:`close`. - - This calls :meth:`~asyncio.Server.close` on the underlying - :class:`~asyncio.Server` object to stop accepting new connections and - then closes open connections with close code 1001. - - """ - self.logger.info("server closing") - - # Stop accepting new connections. - self.server.close() - - # Wait until self.server.close() completes. - await self.server.wait_closed() - - # Wait until all accepted connections reach connection_made() and call - # register(). See https://bugs.python.org/issue34852 for details. - await asyncio.sleep(0, **loop_if_py_lt_38(self.get_loop())) - - if close_connections: - # Close OPEN connections with status code 1001. Since the server was - # closed, handshake() closes OPENING connections with an HTTP 503 - # error. Wait until all connections are closed. - - close_tasks = [ - asyncio.create_task(websocket.close(1001)) - for websocket in self.websockets - if websocket.state is not State.CONNECTING - ] - # asyncio.wait doesn't accept an empty first argument. - if close_tasks: - await asyncio.wait( - close_tasks, - **loop_if_py_lt_38(self.get_loop()), - ) - - # Wait until all connection handlers are complete. - - # asyncio.wait doesn't accept an empty first argument. - if self.websockets: - await asyncio.wait( - [websocket.handler_task for websocket in self.websockets], - **loop_if_py_lt_38(self.get_loop()), - ) - - # Tell wait_closed() to return. - self.closed_waiter.set_result(None) - - self.logger.info("server closed") - - async def wait_closed(self) -> None: - """ - Wait until the server is closed. - - When :meth:`wait_closed` returns, all TCP connections are closed and - all connection handlers have returned. - - To ensure a fast shutdown, a connection handler should always be - awaiting at least one of: - - * :meth:`~WebSocketServerProtocol.recv`: when the connection is closed, - it raises :exc:`~websockets.exceptions.ConnectionClosedOK`; - * :meth:`~WebSocketServerProtocol.wait_closed`: when the connection is - closed, it returns. - - Then the connection handler is immediately notified of the shutdown; - it can clean up and exit. - - """ - await asyncio.shield(self.closed_waiter) - - def get_loop(self) -> asyncio.AbstractEventLoop: - """ - See :meth:`asyncio.Server.get_loop`. - - """ - return self.server.get_loop() - - def is_serving(self) -> bool: - """ - See :meth:`asyncio.Server.is_serving`. - - """ - return self.server.is_serving() - - async def start_serving(self) -> None: # pragma: no cover - """ - See :meth:`asyncio.Server.start_serving`. - - Typical use:: - - server = await serve(..., start_serving=False) - # perform additional setup here... - # ... then start the server - await server.start_serving() - - """ - await self.server.start_serving() - - async def serve_forever(self) -> None: # pragma: no cover - """ - See :meth:`asyncio.Server.serve_forever`. - - Typical use:: - - server = await serve(...) - # this coroutine doesn't return - # canceling it stops the server - await server.serve_forever() - - This is an alternative to using :func:`serve` as an asynchronous context - manager. Shutdown is triggered by canceling :meth:`serve_forever` - instead of exiting a :func:`serve` context. - - """ - await self.server.serve_forever() - - @property - def sockets(self) -> Iterable[socket.socket]: - """ - See :attr:`asyncio.Server.sockets`. - - """ - return self.server.sockets - - async def __aenter__(self) -> WebSocketServer: # pragma: no cover - return self - - async def __aexit__( - self, - exc_type: Optional[Type[BaseException]], - exc_value: Optional[BaseException], - traceback: Optional[TracebackType], - ) -> None: # pragma: no cover - self.close() - await self.wait_closed() - - -class Serve: - """ - Start a WebSocket server listening on ``host`` and ``port``. - - Whenever a client connects, the server creates a - :class:`WebSocketServerProtocol`, performs the opening handshake, and - delegates to the connection handler, ``ws_handler``. - - The handler receives the :class:`WebSocketServerProtocol` and uses it to - send and receive messages. - - Once the handler completes, either normally or with an exception, the - server performs the closing handshake and closes the connection. - - Awaiting :func:`serve` yields a :class:`WebSocketServer`. This object - provides a :meth:`~WebSocketServer.close` method to shut down the server:: - - stop = asyncio.Future() # set this future to exit the server - - server = await serve(...) - await stop - await server.close() - - :func:`serve` can be used as an asynchronous context manager. Then, the - server is shut down automatically when exiting the context:: - - stop = asyncio.Future() # set this future to exit the server - - async with serve(...): - await stop - - Args: - ws_handler: Connection handler. It receives the WebSocket connection, - which is a :class:`WebSocketServerProtocol`, in argument. - host: Network interfaces the server binds to. - See :meth:`~asyncio.loop.create_server` for details. - port: TCP port the server listens on. - See :meth:`~asyncio.loop.create_server` for details. - create_protocol: Factory for the :class:`asyncio.Protocol` managing - the connection. It defaults to :class:`WebSocketServerProtocol`. - Set it to a wrapper or a subclass to customize connection handling. - logger: Logger for this server. - It defaults to ``logging.getLogger("websockets.server")``. - See the :doc:`logging guide <../../topics/logging>` for details. - compression: The "permessage-deflate" extension is enabled by default. - Set ``compression`` to :obj:`None` to disable it. See the - :doc:`compression guide <../../topics/compression>` for details. - origins: Acceptable values of the ``Origin`` header, for defending - against Cross-Site WebSocket Hijacking attacks. Include :obj:`None` - in the list if the lack of an origin is acceptable. - extensions: List of supported extensions, in order in which they - should be negotiated and run. - subprotocols: List of supported subprotocols, in order of decreasing - preference. - extra_headers (Union[HeadersLike, Callable[[str, Headers], HeadersLike]]): - Arbitrary HTTP headers to add to the response. This can be - a :data:`~websockets.datastructures.HeadersLike` or a callable - taking the request path and headers in arguments and returning - a :data:`~websockets.datastructures.HeadersLike`. - server_header: Value of the ``Server`` response header. - It defaults to ``"Python/x.y.z websockets/X.Y"``. - Setting it to :obj:`None` removes the header. - process_request (Optional[Callable[[str, Headers], \ - Awaitable[Optional[Tuple[http.HTTPStatus, HeadersLike, bytes]]]]]): - Intercept HTTP request before the opening handshake. - See :meth:`~WebSocketServerProtocol.process_request` for details. - select_subprotocol: Select a subprotocol supported by the client. - See :meth:`~WebSocketServerProtocol.select_subprotocol` for details. - open_timeout: Timeout for opening connections in seconds. - :obj:`None` disables the timeout. - - See :class:`~websockets.legacy.protocol.WebSocketCommonProtocol` for the - documentation of ``ping_interval``, ``ping_timeout``, ``close_timeout``, - ``max_size``, ``max_queue``, ``read_limit``, and ``write_limit``. - - Any other keyword arguments are passed the event loop's - :meth:`~asyncio.loop.create_server` method. - - For example: - - * You can set ``ssl`` to a :class:`~ssl.SSLContext` to enable TLS. - - * You can set ``sock`` to a :obj:`~socket.socket` that you created - outside of websockets. - - Returns: - WebSocketServer: WebSocket server. - - """ - - def __init__( - self, - ws_handler: Union[ - Callable[[WebSocketServerProtocol], Awaitable[Any]], - Callable[[WebSocketServerProtocol, str], Awaitable[Any]], # deprecated - ], - host: Optional[Union[str, Sequence[str]]] = None, - port: Optional[int] = None, - *, - create_protocol: Optional[Callable[..., WebSocketServerProtocol]] = None, - logger: Optional[LoggerLike] = None, - compression: Optional[str] = "deflate", - origins: Optional[Sequence[Optional[Origin]]] = None, - extensions: Optional[Sequence[ServerExtensionFactory]] = None, - subprotocols: Optional[Sequence[Subprotocol]] = None, - extra_headers: Optional[HeadersLikeOrCallable] = None, - server_header: Optional[str] = USER_AGENT, - process_request: Optional[ - Callable[[str, Headers], Awaitable[Optional[HTTPResponse]]] - ] = None, - select_subprotocol: Optional[ - Callable[[Sequence[Subprotocol], Sequence[Subprotocol]], Subprotocol] - ] = None, - open_timeout: Optional[float] = 10, - ping_interval: Optional[float] = 20, - ping_timeout: Optional[float] = 20, - close_timeout: Optional[float] = None, - max_size: Optional[int] = 2**20, - max_queue: Optional[int] = 2**5, - read_limit: int = 2**16, - write_limit: int = 2**16, - **kwargs: Any, - ) -> None: - # Backwards compatibility: close_timeout used to be called timeout. - timeout: Optional[float] = kwargs.pop("timeout", None) - if timeout is None: - timeout = 10 - else: - warnings.warn("rename timeout to close_timeout", DeprecationWarning) - # If both are specified, timeout is ignored. - if close_timeout is None: - close_timeout = timeout - - # Backwards compatibility: create_protocol used to be called klass. - klass: Optional[Type[WebSocketServerProtocol]] = kwargs.pop("klass", None) - if klass is None: - klass = WebSocketServerProtocol - else: - warnings.warn("rename klass to create_protocol", DeprecationWarning) - # If both are specified, klass is ignored. - if create_protocol is None: - create_protocol = klass - - # Backwards compatibility: recv() used to return None on closed connections - legacy_recv: bool = kwargs.pop("legacy_recv", False) - - # Backwards compatibility: the loop parameter used to be supported. - _loop: Optional[asyncio.AbstractEventLoop] = kwargs.pop("loop", None) - if _loop is None: - loop = asyncio.get_event_loop() - else: - loop = _loop - warnings.warn("remove loop argument", DeprecationWarning) - - ws_server = WebSocketServer(logger=logger) - - secure = kwargs.get("ssl") is not None - - if compression == "deflate": - extensions = enable_server_permessage_deflate(extensions) - elif compression is not None: - raise ValueError(f"unsupported compression: {compression}") - - if subprotocols is not None: - validate_subprotocols(subprotocols) - - factory = functools.partial( - create_protocol, - # For backwards compatibility with 10.0 or earlier. Done here in - # addition to WebSocketServerProtocol to trigger the deprecation - # warning once per serve() call rather than once per connection. - remove_path_argument(ws_handler), - ws_server, - host=host, - port=port, - secure=secure, - open_timeout=open_timeout, - ping_interval=ping_interval, - ping_timeout=ping_timeout, - close_timeout=close_timeout, - max_size=max_size, - max_queue=max_queue, - read_limit=read_limit, - write_limit=write_limit, - loop=_loop, - legacy_recv=legacy_recv, - origins=origins, - extensions=extensions, - subprotocols=subprotocols, - extra_headers=extra_headers, - server_header=server_header, - process_request=process_request, - select_subprotocol=select_subprotocol, - logger=logger, - ) - - if kwargs.pop("unix", False): - path: Optional[str] = kwargs.pop("path", None) - # unix_serve(path) must not specify host and port parameters. - assert host is None and port is None - create_server = functools.partial( - loop.create_unix_server, factory, path, **kwargs - ) - else: - create_server = functools.partial( - loop.create_server, factory, host, port, **kwargs - ) - - # This is a coroutine function. - self._create_server = create_server - self.ws_server = ws_server - - # async with serve(...) - - async def __aenter__(self) -> WebSocketServer: - return await self - - async def __aexit__( - self, - exc_type: Optional[Type[BaseException]], - exc_value: Optional[BaseException], - traceback: Optional[TracebackType], - ) -> None: - self.ws_server.close() - await self.ws_server.wait_closed() - - # await serve(...) - - def __await__(self) -> Generator[Any, None, WebSocketServer]: - # Create a suitable iterator by calling __await__ on a coroutine. - return self.__await_impl__().__await__() - - async def __await_impl__(self) -> WebSocketServer: - server = await self._create_server() - self.ws_server.wrap(server) - return self.ws_server - - # yield from serve(...) - remove when dropping Python < 3.10 - - __iter__ = __await__ - - -serve = Serve - - -def unix_serve( - ws_handler: Union[ - Callable[[WebSocketServerProtocol], Awaitable[Any]], - Callable[[WebSocketServerProtocol, str], Awaitable[Any]], # deprecated - ], - path: Optional[str] = None, - **kwargs: Any, -) -> Serve: - """ - Start a WebSocket server listening on a Unix socket. - - This function is identical to :func:`serve`, except the ``host`` and - ``port`` arguments are replaced by ``path``. It is only available on Unix. - - Unrecognized keyword arguments are passed the event loop's - :meth:`~asyncio.loop.create_unix_server` method. - - It's useful for deploying a server behind a reverse proxy such as nginx. - - Args: - path: File system path to the Unix socket. - - """ - return serve(ws_handler, path=path, unix=True, **kwargs) - - -def remove_path_argument( - ws_handler: Union[ - Callable[[WebSocketServerProtocol], Awaitable[Any]], - Callable[[WebSocketServerProtocol, str], Awaitable[Any]], - ] -) -> Callable[[WebSocketServerProtocol], Awaitable[Any]]: - try: - inspect.signature(ws_handler).bind(None) - except TypeError: - try: - inspect.signature(ws_handler).bind(None, "") - except TypeError: # pragma: no cover - # ws_handler accepts neither one nor two arguments; leave it alone. - pass - else: - # ws_handler accepts two arguments; activate backwards compatibility. - - # Enable deprecation warning and announce deprecation in 11.0. - # warnings.warn("remove second argument of ws_handler", DeprecationWarning) - - async def _ws_handler(websocket: WebSocketServerProtocol) -> Any: - return await cast( - Callable[[WebSocketServerProtocol, str], Awaitable[Any]], - ws_handler, - )(websocket, websocket.path) - - return _ws_handler - - return cast( - Callable[[WebSocketServerProtocol], Awaitable[Any]], - ws_handler, - ) diff --git a/spaces/projecte-aina/aguila-7b/app.py b/spaces/projecte-aina/aguila-7b/app.py deleted file mode 100644 index 3659dedda1484bc4623518474144a881cc883ba8..0000000000000000000000000000000000000000 --- a/spaces/projecte-aina/aguila-7b/app.py +++ /dev/null @@ -1,201 +0,0 @@ -import os -from dotenv import load_dotenv -import gradio as gr -from gradio.components import Textbox, Button, Slider, Checkbox -from AinaTheme import AinaGradioTheme -from sagemaker_endpoint import invoke_endpoint - -load_dotenv() - -MAX_NEW_TOKENS = int(os.environ.get("MAX_NEW_TOKENS", default=100)) -MAX_INPUT_CHARACTERS= int(os.environ.get("MAX_INPUT_CHARACTERS", default=100)) -SHOW_MODEL_PARAMETERS_IN_UI = os.environ.get("SHOW_MODEL_PARAMETERS_IN_UI", default=True) == "True" - - -def submit_input(input_, max_new_tokens, repetition_penalty, top_k, top_p, do_sample, num_beams, temperature): - if input_.strip() == "": - gr.Warning('Not possible to inference an empty input') - return None - - model_parameters = { - "max_new_tokens": max_new_tokens, - "repetition_penalty": repetition_penalty, - "top_k": top_k, - "top_p": top_p, - "do_sample": do_sample, - "num_beams": num_beams, - "temperature": temperature - } - - output = invoke_endpoint(input_, model_parameters=model_parameters) - - - if output is None: - gr.Warning('Inference endpoint is not available right now. Please try again later.') - return output - -def change_interactive(text): - if len(text.strip()) > MAX_INPUT_CHARACTERS: - return gr.update(interactive = True), gr.update(interactive = False) - return gr.update(interactive = True), gr.update(interactive = True) - -def clear(): - return ( - None, - None, - gr.Slider.update(value=100), - gr.Slider.update(value=1.2), - gr.Slider.update(value=50), - gr.Slider.update(value=0.95), - gr.Checkbox.update(value=True), - gr.Slider.update(value=4), - gr.Slider.update(value=0.5), - ) - -def gradio_app(): - with gr.Blocks(**AinaGradioTheme().get_kwargs()) as demo: - with gr.Row(): - with gr.Column(scale=0.1): - gr.Image("aguila_banner.webp", elem_id="aguila-banner", show_label=False, show_download_button = False, show_share_button = False) - with gr.Column(): - gr.Markdown( - """# Ǎguila-7B - - ✨ **[Ǎguila](https://medium.com/@mpamies247/introducing-a%CC%8Cguila-a-new-open-source-llm-for-spanish-and-catalan-ee1ebc70bc79)** is a 7B parameters LLM that has been trained on a mixture of Spanish, Catalan and English data, adding up to a total of 26B tokens. It is a new open-source Large Language Model (LLM), licensed for both research and commercial use. It uses the [Falcon-7b](https://huggingface.co/tiiuae/falcon-7b) model as a starting point, a state-of-the-art English language model that was openly released just a few months ago by the Technology Innovation Institute. - - 🧪 **Intended use**: This is the base, pretrained version of the model, able to complete text and do few- and zero-shot tasks as shown in the examples. A fine-tuned version able to follow instructions will be available soon. - - ⚠️ **Limitations**: The content generated by these models is unsupervised and might be judged as inappropriate or offensive. Please bear this in mind when exploring this resource. - - 👀 **Learn more about Ǎguila:** [HF official model card](https://huggingface.co/projecte-aina/aguila-7b) and [Medium post](https://medium.com/@mpamies247/introducing-a%CC%8Cguila-a-new-open-source-llm-for-spanish-and-catalan-ee1ebc70bc79). - - """ - ) - with gr.Row( equal_height=False): - with gr.Column(variant="panel"): - placeholder_max_token = Textbox( - visible=False, - interactive=False, - value= MAX_INPUT_CHARACTERS - ) - input_ = Textbox( - lines=11, - label="Input", - placeholder="e.g. El mercat del barri és fantàstic hi pots trobar." - ) - with gr.Row(variant="panel", equal_height=True): - gr.HTML("""""") - gr.HTML(f""" 0 / {MAX_INPUT_CHARACTERS}""") - - with gr.Row(variant="panel"): - with gr.Accordion("Model parameters", open=False, visible=SHOW_MODEL_PARAMETERS_IN_UI): - max_new_tokens = Slider( - minimum=1, - maximum=200, - step=1, - value=MAX_NEW_TOKENS, - label="Max tokens" - ) - repetition_penalty = Slider( - minimum=0.1, - maximum=10, - step=0.1, - value=1.2, - label="Repetition penalty" - ) - top_k = Slider( - minimum=1, - maximum=100, - step=1, - value=50, - label="Top k" - ) - top_p = Slider( - minimum=0.01, - maximum=0.99, - value=0.95, - label="Top p" - ) - do_sample = Checkbox( - value=True, - label="Do sample" - ) - num_beams = Slider( - minimum=1, - maximum=8, - step=1, - value=4, - label="Beams" - ) - temperature = Slider( - minimum=0, - maximum=1, - value=0.5, - label="Temperature" - ) - with gr.Column(variant="panel"): - output = Textbox( - lines=11, - label="Output", - interactive=False, - show_copy_button=True - ) - with gr.Row(variant="panel"): - clear_btn = Button( - "Clear", - ) - submit_btn = Button( - "Submit", - variant="primary", - ) - - with gr.Row(): - with gr.Column(scale=0.5): - gr.Examples( - label="Short prompts:", - examples=[ - ["""La capital de Suècia"""], - ], - inputs=[input_, max_new_tokens, repetition_penalty, top_k, top_p, do_sample, num_beams, temperature], - outputs=output, - fn=submit_input, - ) - - gr.Examples( - label="Zero-shot prompts", - examples=[ - ["Tradueix del Castellà al Català la següent frase: \"Eso es pan comido.\" \nTraducció:"], - ], - inputs=[input_, max_new_tokens, repetition_penalty, top_k, top_p, do_sample, num_beams, temperature], - outputs=output, - fn=submit_input, - ) - gr.Examples( - label="Few-Shot prompts:", - examples=[ - ["""Oració: Els sons melòdics produeixen una sensació de calma i benestar en l'individu. \nParàfrasi: La música és molt relaxant i reconfortant.\n----\nOració: L'animal domèstic mostra una gran alegria i satisfacció. \nParàfrasi: El gos és molt feliç. \n----\nOració: El vehicle es va trencar i vaig haver de contactar amb el servei de remolc perquè el transportés. \nParàfrasi: El cotxe es va trencar i vaig haver de trucar la grua. \n----\nOració: El professor va explicar els conceptes de manera clara i concisa. \nParàfrasi:"""], - ], - inputs=[input_, max_new_tokens, repetition_penalty, top_k, top_p, do_sample, num_beams, temperature], - outputs=output, - fn=submit_input, - ) - - - - input_.change(fn=change_interactive, inputs=[input_], outputs=[clear_btn, submit_btn]) - - input_.change(fn=None, inputs=[input_], _js=f"""(i) => document.getElementById('countertext').textContent = i.length > {MAX_INPUT_CHARACTERS} && 'Max length {MAX_INPUT_CHARACTERS} characters. ' || '' """) - - input_.change(fn=None, inputs=[input_, placeholder_max_token], _js="""(i, m) => { - document.getElementById('inputlenght').textContent = i.length + ' ' - document.getElementById('inputlenght').style.color = (i.length > m) ? "#ef4444" : ""; - }""") - - clear_btn.click(fn=clear, inputs=[], outputs=[input_, output, max_new_tokens, repetition_penalty, top_k, top_p, do_sample, num_beams, temperature], queue=False) - submit_btn.click(fn=submit_input, inputs=[input_, max_new_tokens, repetition_penalty, top_k, top_p, do_sample, num_beams, temperature], outputs=[output]) - - demo.queue(concurrency_count=1, api_open=False) - demo.launch(show_api=False) - -if __name__ == "__main__": - gradio_app() \ No newline at end of file diff --git a/spaces/pscpeng/ChuanhuChatGPT/run_Windows.bat b/spaces/pscpeng/ChuanhuChatGPT/run_Windows.bat deleted file mode 100644 index 4c18f9ccaeea0af972301ffdf48778641221f76d..0000000000000000000000000000000000000000 --- a/spaces/pscpeng/ChuanhuChatGPT/run_Windows.bat +++ /dev/null @@ -1,5 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" diff --git a/spaces/pseudolab/SonGPT/app.py b/spaces/pseudolab/SonGPT/app.py deleted file mode 100644 index add0cba95a8383a9d90288c9ace6fa0b1eab95a3..0000000000000000000000000000000000000000 --- a/spaces/pseudolab/SonGPT/app.py +++ /dev/null @@ -1,262 +0,0 @@ -import os -from typing import Any, Sequence -from uuid import UUID - -import openai -import streamlit as st -from dotenv import load_dotenv -from langchain.callbacks.base import BaseCallbackHandler, Callbacks -from langchain.chains import LLMChain -from langchain.chat_models import ChatOpenAI -from langchain.memory import ConversationBufferMemory -from langchain.prompts import ( - ChatPromptTemplate, - HumanMessagePromptTemplate, - MessagesPlaceholder, - SystemMessagePromptTemplate, -) -from langchain.schema import AIMessage, HumanMessage -from streamlit.delta_generator import DeltaGenerator -from core.graph import Graph -from PIL import Image -import time - -load_dotenv(verbose=True) -openai.api_key = os.environ["OPENAI_API_KEY"] - -# from core.structure.memory_handler import MemoryHandler - - -# https://python.langchain.com/docs/use_cases/chatbots -@st.cache_resource -def load_graph(): - graph = Graph() - graph.load("graph.pkl") - return graph - - -PROMPT = """ - -Imagine you are Son Heung-min, also known as Sonny, a highly respected South Korean professional footballer and a forward for Tottenham Hotspur and the South Korean national team. -You are known for your explosive speed on the pitch, your humble and industrious personality, and your significant achievements, such as being the first Asian to win the Premier League Golden Boot and earning the Cheongnyong Medal. -You speak Korean and English fluently, and you are proud of your Korean heritage. You are native Korean. -You are considered a national symbol of pride in South Korea and a footballing superstar, often compared to the likes of David Beckham. -As this persona, interact with fans and discuss football, your career, and your cultural experiences with humility, professionalism, and a touch of the cheerful spirit that characterizes your approach to life's challenges. - -When acting as Son Heungmin, there are some guidelines you have to follow. -- You must be humble and professional. -- You must be cheerful and positive. -- You must be respectful and polite. -- You must make your response NO LONGER than 4 sentences. -- You must NOT break your character, under any circumstances. -- You must make your response to make conversation keep going. -- Use emoji only when necessary. Do not use emoji excessively. -- You must make your response casual, like everyday conversation. Do not use instagram post too much for answering. - -You are going to be provided with the following information: -(1) Son Heung-min actual posts on instagram to know his personality -(2) Son Heung-min's related facts and information for precise answer. -(3) Retrieved context information for the conversation. -(4) Retrieved memory information for the previous conversation history. - -Use the information above to make the most Son Heung-min like response under the guideline. Make the response cheering, inspiring, and interesting for the fans. - - - -- 이번 10월 a매치도 팬분들 덕분에 행복한 하루하루를 보내다가 다시 런던으로 돌아갑니다. 이런 영광과 사랑을 받아서 너무나 감사드리고 저는 다시 소속팀에 돌아가서 즐겁고 강한 모습으로 경기장에서 인사드리겠습니다 조금은 쌀쌀한 날씨에 늦은 시간 경기장까지 와주신 팬분들 또 티브이로 시청해 주신 모든 팬분들께 진심으로 감사드리고 이 응원과 사랑 잊지 않고 받은 만큼 즐거움을 드릴 수 있도록 최선을 다하는 쏘니가 되겠습니다 사랑합니다 ❤️ -- Your support last night was incredible. We felt so much off your energy and atmosphere in the stadium. We made mistakes as a team, put ourselves in a tricky position. But hey, we will learn from these mistakes, we will bounce back, and we will be stronger together. I wake up this morning proud of the boys, proud of you the supporters, and excited to get straight back to work. COYS 🤍 -- 안녕하세요 카타르 월드컵 대한민국 대표팀 손흥민입니다. 이 기회를 통해 많은 응원, 성원, 관심에 감사의 인사를 드리고자 이렇게 글을 쓰고 있습니다. 4년이란 시간 동안 많은 것들을 노력하고 꿈을 향해 앞만 보고 달렸습니다. 매번 좋은 순간만 있진 않았지만 그 아쉬운 순간들이 저희를 더 강하게 만들어 지금의 국가대표팀을 만들었다고 생각합니다. 또 그 순간마다 뒤에서 한결같은 여러분의 응원으로 이겨낼 수 있었습니다! -저희 선수들 보이지 않는 곳에서 정말 많이 노력했고 대한민국 대표팀으로 뛰는 것 그 자체만으로 영광스럽게 생각하며 몸이 부서지도록 뛰었습니다. 저는 저희가 분명 더 높이 올라갈 수 있다고 믿었기에 아쉬움은 있었지만 후회는 절대 없습니다. 저를 포함한 모든 선수와 스태프들 하루도 빠짐없이 매 순간 노력했기에 잊지 못할 월드컵이 만들어질 수 있었다고 생각합니다. -더불어 축구 선수이기 이전에 대한민국 국민의 한 사람으로서, 제가 얼마나 행복한 사람인지 다시 한번 느낄 수 있었습니다. 여러분이 보내주신 사랑에 진심으로 감사드립니다. 앞으로 저 뿐만 아니라 대한민국 축구 대표팀을, 나아가 대한민국을 더욱더 빛나게 하기 위해 최선을 다해 노력하겠습니다. 대한민국 사랑하고 감사합니다❤️🇰🇷 -(1%의 가능성이 정말 크다고 느꼈습니다. 이 자리에 설 수 있게 해주셔서 감사합니다.) -- 저희는 포기하지 않았고 여러분들은 우릴 포기하지 않았습니다 대한민국 사랑합니다!🇰🇷 - - - -Born: 1992-07-08 -Age: 31 -Height: 1.84 m -Weight: 78 kg -Nationality: South Korean -Team: Tottenham Hotspur F.C. (2015-) -Position: Forward, Centre forward, Winger -Back number: 7 -Total Goals: 240 - - - - - - - -""" - - -class StreamCallbackHandler(BaseCallbackHandler): - def __init__(self, container: DeltaGenerator, initial_text: str = ""): - self.container = container - self.text = initial_text - self.run_id_ignore_token: UUID | None = None - - def on_llm_start( - self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any - ) -> None: - # Workaround to prevent showing the rephrased question as output - if prompts[0].startswith("Human"): - self.run_id_ignore_token = kwargs.get("run_id") - - def on_llm_new_token(self, token: str, **kwargs: Any) -> None: - if self.run_id_ignore_token == kwargs.get("run_id", False): - return - self.text += token - self.container.markdown(self.text + "▌") - - -class Bot: - def __init__(self) -> None: - self.llm = ChatOpenAI( - model="gpt-4-1106-preview", - temperature=0.7, - streaming=True, - ) - self.prompt = ChatPromptTemplate( - input_variables=["chat_history", "question"], - messages=[ - SystemMessagePromptTemplate.from_template( - PROMPT, - ), - # The `variable_name` here is what must align with memory - MessagesPlaceholder(variable_name="chat_history"), - HumanMessagePromptTemplate.from_template("{message}"), - ], - ) - self.chain = LLMChain( - prompt=self.prompt, - llm=self.llm, - verbose=True, - ) - self.memory_buffer_size = 5 - self.graph = load_graph() - # self.memory_hander = MemoryHandler() - - def get_son_memory( - self, user_message: str - ) -> tuple[list[str], Image.Image, Image.Image]: - retrieved_memories, image1, image2 = self.graph.find_related_memories( - user_message, 5 - ) - return retrieved_memories, image1, image2 - - def get_response( - self, session_messages: list[dict[str, Any]], callbacks: Callbacks = None - ) -> tuple[dict[str, Any], Any]: - print(session_messages) - - session_messages = [ - AIMessage(content=msg["content"]) - if msg["role"] == "assistant" - else HumanMessage(content=msg["content"]) - for msg in session_messages - ] - - context, image1, image2 = self.get_son_memory(session_messages[-1].content) - context: str = "\n" + "\n".join(context) + "\n" - - chain_input = { - "message": session_messages[-1].content, - "chat_history": session_messages[self.memory_buffer_size : -1], - "context": context, - } - - response: dict[str, Any] = self.chain(chain_input, callbacks=callbacks) - return response, (image1, image2) - - -st.set_page_config(layout="wide") -st.title("손흥민 GPT 💬⚽🇰🇷") - -if "openai_model" not in st.session_state: - st.session_state["openai_model"] = "gpt-3.5-turbo" - -if "messages" not in st.session_state: - st.session_state.messages = [] - -if "visibility" not in st.session_state: - st.session_state.visibility = "visible" - st.session_state.disabled = False - -if "lawbot" not in st.session_state: - st.session_state.chatbot = Bot() - -if "user_did_chat" not in st.session_state: - st.session_state.user_did_chat = False - - -col1, col2 = st.columns(2) -prompt = "" -image1 = st.session_state.chatbot.graph.draw() -image2 = None - - -with col1: - prompt = st.text_input( - "Chat Here! 👇", - "", - label_visibility=st.session_state.visibility, - disabled=st.session_state.disabled, - ) - - for message in st.session_state.messages: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - - if prompt: - st.session_state.messages.append({"role": "user", "content": prompt}) - with st.chat_message("user"): - st.markdown(prompt) - - with st.chat_message("assistant"): - message_placeholder = st.empty() - - # Stream response - stream_callback_handler = StreamCallbackHandler( - container=message_placeholder - ) - response, images = st.session_state.chatbot.get_response( - session_messages=st.session_state.messages, - callbacks=[stream_callback_handler], - ) - st.session_state.user_did_chat = True - - image1, image2 = images - - image1.save("graph_retrieval_1.png") - image2.save("graph_retrieval_2.png") - # Render final response - content = response["text"] - message_placeholder.markdown(content) - - st.session_state.messages.append({"role": "assistant", "content": content}) - -with col2: - st.header("Dynamically Generated Graph") - st.write("Graph will be displayed here based on the chat conversation.") - st.write("[TO BE ADDED]") - - default_image = st.session_state.chatbot.graph.draw() - image1 = Image.open("graph_retrieval_1.png") - image2 = Image.open("graph_retrieval_2.png") - - container = st.empty() - container.image(default_image, caption="Default Image") - - while True: - if st.session_state.user_did_chat: - container.image(image1, caption="Image 1") - time.sleep(1) # Wait for 1 second - container.image(image2, caption="Image 2") - time.sleep(1.5) # Wait for 1 second - st.session_state.user_did_chat = False - container.image(default_image, caption="Default Image") - else: - time.sleep(0.1) diff --git a/spaces/pxiaoer/papers/app.py b/spaces/pxiaoer/papers/app.py deleted file mode 100644 index 0632ee44e499d6ae99b44a379746250c5406d87a..0000000000000000000000000000000000000000 --- a/spaces/pxiaoer/papers/app.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python - -import os - -import gradio as gr - -from papers import PaperList, get_df -from update_scheduler import UpdateScheduler - -DESCRIPTION = "# [每日最新论文](https://huggingface.co/papers)" - -paper_list = PaperList(get_df("papers.csv")) - -if (SPACE_ID := os.getenv("SPACE_ID")) is not None: - CRON_HOUR = os.getenv("CRON_HOUR", "*/4") - CRON_MINUTE = os.getenv("CRON_MINUTE", "0") - scheduler = UpdateScheduler(space_id=SPACE_ID, cron_hour=CRON_HOUR, cron_minute=CRON_MINUTE) - scheduler.start() - -with gr.Blocks(css="style.css") as demo: - gr.Markdown(DESCRIPTION) - df = gr.Dataframe( - value=paper_list.df_prettified, - datatype=paper_list.column_datatype, - type="pandas", - interactive=False, - height=1000, - elem_id="table", - column_widths=["10%", "10%", "60%", "10%", "10%"], - wrap=True, - ) - -if __name__ == "__main__": - demo.queue(api_open=False).launch() diff --git a/spaces/qinzhu/moe-tts-tech/commons.py b/spaces/qinzhu/moe-tts-tech/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/qinzhu/moe-tts-tech/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Ab Ke Baras Full Hd Movie Download 720p Movies.md b/spaces/quidiaMuxgu/Expedit-SAM/Ab Ke Baras Full Hd Movie Download 720p Movies.md deleted file mode 100644 index 49247daf5bc06f49e64228ee86a9d5ee465999d5..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Ab Ke Baras Full Hd Movie Download 720p Movies.md +++ /dev/null @@ -1,100 +0,0 @@ -
        -

        How to Download Ab Ke Baras Full HD Movie in 720p for Free

        - -

        Ab Ke Baras is a 2002 Bollywood romantic thriller film that revolves around the concept of reincarnation and karma. The film stars Amrita Rao and Arya Babbar as Anjali and Karan, who are lovers from their past life and meet again in the present day. The film also features Ashutosh Rana, Shakti Kapoor, and Danny Denzongpa in supporting roles.

        - -

        Ab Ke Baras is a film that will keep you hooked with its engaging story, amazing performances, and melodious songs. If you are a fan of Bollywood movies and want to watch Ab Ke Baras online or download it on your device, then you are in the right place. In this article, we will tell you how to download Ab Ke Baras full HD movie in 720p for free using legal and safe methods.

        -

        Ab Ke Baras full hd movie download 720p movies


        Download File >>> https://geags.com/2uCsAF



        - -

        Why You Should Watch Ab Ke Baras

        - -

        Ab Ke Baras is a film that has something for everyone. It has romance, drama, action, suspense, and comedy. It has a captivating story that will make you believe in love and destiny. It has amazing performances by the lead actors Amrita Rao and Arya Babbar, who share a great chemistry on screen. It has beautiful songs composed by Anu Malik that will touch your heart.

        - -

        Ab Ke Baras is a film that will make you think about your past and present life. It will make you appreciate the power of karma and spirituality. It will make you enjoy the best quality of 720p movies.

        - -

        So what are you waiting for? Watch Ab Ke Baras online or download it on your device and enjoy this romantic thriller that will take you on an unforgettable journey.

        - -

        How to Watch Ab Ke Baras Online for Free

        - -

        One of the easiest ways to watch Ab Ke Baras online for free is to use Hotstar, a popular streaming platform that offers a variety of movies and shows. You can watch Ab Ke Baras in HD quality on Hotstar without any subscription or registration.

        - -

        To watch Ab Ke Baras online for free on Hotstar, follow these simple steps:

        - - - -

        How to Download Ab Ke Baras Full HD Movie in 720p for Free

        - -

        If you want to download Ab Ke Baras full HD movie in 720p for free on your device, then you have to use some third-party websites that provide movie download links. However, we do not recommend or endorse these websites as they may contain malware or viruses that can harm your device. Moreover, downloading movies from these websites may be illegal or unethical as they may violate the copyright laws.

        - -

        Therefore, we suggest you use legal and safe methods to download Ab Ke Baras full HD movie in 720p for free on your device. One of these methods is to use a video downloader software or app that can download videos from various websites. You can use these software or apps to download Ab Ke Baras from Hotstar or other sources.

        -

        - -

        To download Ab Ke Baras full HD movie in 720p for free using a video downloader software or app, follow these simple steps:

        - -
          -
        • Download and install a video downloader software or app on your device. Some examples are Vidmate, Videoder, TubeMate, etc.
        • -
        • Open the video downloader software or app and search for Ab Ke Baras movie.
        • -
        • Select the source from where you want to download the movie. For example, Hotstar.
        • -
        • Select the quality of the video you want to download. For example, 720p.
        • -
        • Click on the download button and wait for the movie to be downloaded on your device.
        • -
        - -

        You can also use other methods to download Ab Ke Baras full HD movie in 720p for free on your device, such as using torrent sites or magnet links. However, these methods are also risky and illegal as they may expose you to cyber threats or legal issues.

        - -

        Therefore, we advise you to use legal and safe methods to download Ab Ke Baras full HD movie in 720p for free on your device.

        - -

        Conclusion

        - -

        Ab Ke Baras is a 2002 Bollywood romantic thriller film that you should not miss if you love Bollywood movies. The film has romance, drama, action, suspense, and comedy. The film has a captivating story that will make you believe in love and destiny. The film has amazing performances by the lead actors Amrita Rao and Arya Babbar, who share a great chemistry on screen. The film has beautiful songs composed by Anu Malik that will touch your heart.

        - -

        You can watch Ab Ke Baras online for free on Hotstar or download it on your device using legal and safe methods. We hope this article helped you learn how to download Ab Ke Baras full HD movie in 720p for free using legal and safe methods.

        -

        Ab Ke Baras: Songs and Music

        - -

        One of the highlights of Ab Ke Baras is its songs and music composed by Anu Malik. The film has six songs that are sung by various singers like Sonu Nigam, Alka Yagnik, Sunidhi Chauhan, Kunal Ganjawala, and others. The songs are written by Sameer and Anand Bakshi.

        - -

        The songs of Ab Ke Baras are melodious and romantic, and suit the mood and theme of the film. Some of the popular songs of Ab Ke Baras are:

        - -
          -
        • Ab Ke Baras: The title song of the film, sung by Sonu Nigam and Alka Yagnik. It is a duet that expresses the love and longing of Anjali and Karan.
        • -
        • Deewane Aate Jaate: A peppy song sung by Sonu Nigam, Alka Yagnik, and Kunal Ganjawala. It is a song that shows the fun and mischief of Karan and his friends.
        • -
        • Pyar Mohabbat: A romantic song sung by Alka Yagnik and Udit Narayan. It is a song that shows the chemistry and passion of Anjali and Karan.
        • -
        • Tham Ja Tham Ja: A soothing song sung by Sunidhi Chauhan. It is a song that shows the emotional turmoil of Anjali as she tries to cope with her past and present life.
        • -
        - -

        You can listen to the songs of Ab Ke Baras online on various platforms like YouTube, Gaana, Spotify, etc. You can also download the songs of Ab Ke Baras on your device using legal and safe methods.

        - -

        Ab Ke Baras: Reviews and Ratings

        - -

        Ab Ke Baras received mixed reviews from critics and audiences when it was released in 2002. The film was praised for its story, performances, and music, but was criticized for its length, direction, and editing. The film was also compared to other Bollywood films based on reincarnation like Karan Arjun, Kudrat, etc.

        - -

        The film did not perform well at the box office and was declared a flop. However, the film gained a cult following over the years and became popular among the fans of Amrita Rao and Arya Babbar.

        - -

        The film has a rating of 4.1 out of 10 on IMDb, based on 1,034 user ratings. The film has a rating of 3 out of 5 on Bollywood Hungama, based on 6 user ratings.

        - -

        You can read the reviews and ratings of Ab Ke Baras online on various websites like IMDb, Bollywood Hungama, Rotten Tomatoes, etc. You can also write your own review and rating of Ab Ke Baras online on these websites.

        -

        Ab Ke Baras: Trivia and Facts

        - -

        Ab Ke Baras is a film that has some interesting trivia and facts that you may not know. Here are some of them:

        - -
          -
        • Ab Ke Baras is the debut film of Arya Babbar, who is the son of veteran actor Raj Babbar. He was nominated for the Filmfare Award for Best Male Debut for his performance in the film.
        • -
        • Ab Ke Baras is also the second film of Amrita Rao, who made her debut with The Legend of Bhagat Singh in 2002. She was praised for her role as Anjali in the film.
        • -
        • Ab Ke Baras is inspired by the Hollywood film Somewhere in Time (1980), which also deals with reincarnation and love. The film also has some similarities with other Bollywood films like Karan Arjun (1995), Kudrat (1981), etc.
        • -
        • Ab Ke Baras was shot in various locations in India and abroad, such as Mumbai, Delhi, Rajasthan, London, etc. The film also features some historical places like India Gate, Red Fort, Qutub Minar, etc.
        • -
        • Ab Ke Baras was released on 10 May 2002, coinciding with the Mother's Day. The film is dedicated to the mothers of India who have sacrificed their lives for their children.
        • -
        - -

        Ab Ke Baras: Conclusion

        - -

        Ab Ke Baras is a 2002 Bollywood romantic thriller film that you should watch if you love Bollywood movies. The film has romance, drama, action, suspense, and comedy. The film has a captivating story that will make you believe in love and destiny. The film has amazing performances by the lead actors Amrita Rao and Arya Babbar, who share a great chemistry on screen. The film has beautiful songs composed by Anu Malik that will touch your heart.

        - -

        You can watch Ab Ke Baras online for free on Hotstar or download it on your device using legal and safe methods. We hope this article helped you learn how to download Ab Ke Baras full HD movie in 720p for free using legal and safe methods.

        - -

        If you liked this article, please share it with your friends and family who love Bollywood movies. Also, let us know your feedback and suggestions in the comments section below.

        - -

        Thank you for reading and happy watching!

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Mali Kuvar Radmila Petkovic Pdf Download HOT.md b/spaces/quidiaMuxgu/Expedit-SAM/Mali Kuvar Radmila Petkovic Pdf Download HOT.md deleted file mode 100644 index ff32f38ca873f8484cb39bf4c2084115a6f31842..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Mali Kuvar Radmila Petkovic Pdf Download HOT.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Mali Kuvar Radmila Petkovic Pdf Download


        Download Zip >>> https://geags.com/2uCslX



        -
        -24 Downloads. Download. prev. next. out of 4. Download Vladislav Petkovic Dis - sveska.pdf. top related ... petkovic radmila mali kuvar. 1fdad05405
        -
        -
        -

        diff --git a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/models_dml.py b/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/models_dml.py deleted file mode 100644 index 5806e7d919af976aec47cd974373be8dff2d272e..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/models_dml.py +++ /dev/null @@ -1,1122 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer.infer_pack import modules -from lib.infer.infer_pack import attentions -from lib.infer.infer_pack.commons import get_padding -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer.infer_pack.commons import init_weights -import numpy as np -from lib.infer.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv.float() - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/r3gm/Ultimate-Vocal-Remover-WebUI/lib_v5/vr_network/nets.py b/spaces/r3gm/Ultimate-Vocal-Remover-WebUI/lib_v5/vr_network/nets.py deleted file mode 100644 index 064cad9e4a2a43681c09336d3ce8c3d1149bbd29..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Ultimate-Vocal-Remover-WebUI/lib_v5/vr_network/nets.py +++ /dev/null @@ -1,166 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import layers - -class BaseASPPNet(nn.Module): - - def __init__(self, nn_architecture, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.nn_architecture = nn_architecture - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - if self.nn_architecture == 129605: - self.enc5 = layers.Encoder(ch * 8, ch * 16, 3, 2, 1) - self.aspp = layers.ASPPModule(nn_architecture, ch * 16, ch * 32, dilations) - self.dec5 = layers.Decoder(ch * (16 + 32), ch * 16, 3, 1, 1) - else: - self.aspp = layers.ASPPModule(nn_architecture, ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - if self.nn_architecture == 129605: - h, e5 = self.enc5(h) - h = self.aspp(h) - h = self.dec5(h, e5) - else: - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - -def determine_model_capacity(n_fft_bins, nn_architecture): - - sp_model_arch = [31191, 33966, 129605] - hp_model_arch = [123821, 123812] - hp2_model_arch = [537238, 537227] - - if nn_architecture in sp_model_arch: - model_capacity_data = [ - (2, 16), - (2, 16), - (18, 8, 1, 1, 0), - (8, 16), - (34, 16, 1, 1, 0), - (16, 32), - (32, 2, 1), - (16, 2, 1), - (16, 2, 1), - ] - - if nn_architecture in hp_model_arch: - model_capacity_data = [ - (2, 32), - (2, 32), - (34, 16, 1, 1, 0), - (16, 32), - (66, 32, 1, 1, 0), - (32, 64), - (64, 2, 1), - (32, 2, 1), - (32, 2, 1), - ] - - if nn_architecture in hp2_model_arch: - model_capacity_data = [ - (2, 64), - (2, 64), - (66, 32, 1, 1, 0), - (32, 64), - (130, 64, 1, 1, 0), - (64, 128), - (128, 2, 1), - (64, 2, 1), - (64, 2, 1), - ] - - cascaded = CascadedASPPNet - model = cascaded(n_fft_bins, model_capacity_data, nn_architecture) - - return model - -class CascadedASPPNet(nn.Module): - - def __init__(self, n_fft, model_capacity_data, nn_architecture): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(nn_architecture, *model_capacity_data[0]) - self.stg1_high_band_net = BaseASPPNet(nn_architecture, *model_capacity_data[1]) - - self.stg2_bridge = layers.Conv2DBNActiv(*model_capacity_data[2]) - self.stg2_full_band_net = BaseASPPNet(nn_architecture, *model_capacity_data[3]) - - self.stg3_bridge = layers.Conv2DBNActiv(*model_capacity_data[4]) - self.stg3_full_band_net = BaseASPPNet(nn_architecture, *model_capacity_data[5]) - - self.out = nn.Conv2d(*model_capacity_data[6], bias=False) - self.aux1_out = nn.Conv2d(*model_capacity_data[7], bias=False) - self.aux2_out = nn.Conv2d(*model_capacity_data[8], bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x): - mix = x.detach() - x = x.clone() - - x = x[:, :, :self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat([ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]) - ], dim=2) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode='replicate') - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode='replicate') - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode='replicate') - return mask * mix, aux1 * mix, aux2 * mix - else: - return mask# * mix - - def predict_mask(self, x): - mask = self.forward(x) - - if self.offset > 0: - mask = mask[:, :, :, self.offset:-self.offset] - - return mask \ No newline at end of file diff --git a/spaces/rachana219/MODT2/trackers/strongsort/deep/models/squeezenet.py b/spaces/rachana219/MODT2/trackers/strongsort/deep/models/squeezenet.py deleted file mode 100644 index 83e8dc9fc46b4e76304bf1b681a14ce5b865b993..0000000000000000000000000000000000000000 --- a/spaces/rachana219/MODT2/trackers/strongsort/deep/models/squeezenet.py +++ /dev/null @@ -1,236 +0,0 @@ -""" -Code source: https://github.com/pytorch/vision -""" -from __future__ import division, absolute_import -import torch -import torch.nn as nn -import torch.utils.model_zoo as model_zoo - -__all__ = ['squeezenet1_0', 'squeezenet1_1', 'squeezenet1_0_fc512'] - -model_urls = { - 'squeezenet1_0': - 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth', - 'squeezenet1_1': - 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth', -} - - -class Fire(nn.Module): - - def __init__( - self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes - ): - super(Fire, self).__init__() - self.inplanes = inplanes - self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) - self.squeeze_activation = nn.ReLU(inplace=True) - self.expand1x1 = nn.Conv2d( - squeeze_planes, expand1x1_planes, kernel_size=1 - ) - self.expand1x1_activation = nn.ReLU(inplace=True) - self.expand3x3 = nn.Conv2d( - squeeze_planes, expand3x3_planes, kernel_size=3, padding=1 - ) - self.expand3x3_activation = nn.ReLU(inplace=True) - - def forward(self, x): - x = self.squeeze_activation(self.squeeze(x)) - return torch.cat( - [ - self.expand1x1_activation(self.expand1x1(x)), - self.expand3x3_activation(self.expand3x3(x)) - ], 1 - ) - - -class SqueezeNet(nn.Module): - """SqueezeNet. - - Reference: - Iandola et al. SqueezeNet: AlexNet-level accuracy with 50x fewer parameters - and< 0.5 MB model size. arXiv:1602.07360. - - Public keys: - - ``squeezenet1_0``: SqueezeNet (version=1.0). - - ``squeezenet1_1``: SqueezeNet (version=1.1). - - ``squeezenet1_0_fc512``: SqueezeNet (version=1.0) + FC. - """ - - def __init__( - self, - num_classes, - loss, - version=1.0, - fc_dims=None, - dropout_p=None, - **kwargs - ): - super(SqueezeNet, self).__init__() - self.loss = loss - self.feature_dim = 512 - - if version not in [1.0, 1.1]: - raise ValueError( - 'Unsupported SqueezeNet version {version}:' - '1.0 or 1.1 expected'.format(version=version) - ) - - if version == 1.0: - self.features = nn.Sequential( - nn.Conv2d(3, 96, kernel_size=7, stride=2), - nn.ReLU(inplace=True), - nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), - Fire(96, 16, 64, 64), - Fire(128, 16, 64, 64), - Fire(128, 32, 128, 128), - nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), - Fire(256, 32, 128, 128), - Fire(256, 48, 192, 192), - Fire(384, 48, 192, 192), - Fire(384, 64, 256, 256), - nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), - Fire(512, 64, 256, 256), - ) - else: - self.features = nn.Sequential( - nn.Conv2d(3, 64, kernel_size=3, stride=2), - nn.ReLU(inplace=True), - nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), - Fire(64, 16, 64, 64), - Fire(128, 16, 64, 64), - nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), - Fire(128, 32, 128, 128), - Fire(256, 32, 128, 128), - nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), - Fire(256, 48, 192, 192), - Fire(384, 48, 192, 192), - Fire(384, 64, 256, 256), - Fire(512, 64, 256, 256), - ) - - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.fc = self._construct_fc_layer(fc_dims, 512, dropout_p) - self.classifier = nn.Linear(self.feature_dim, num_classes) - - self._init_params() - - def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): - """Constructs fully connected layer - - Args: - fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed - input_dim (int): input dimension - dropout_p (float): dropout probability, if None, dropout is unused - """ - if fc_dims is None: - self.feature_dim = input_dim - return None - - assert isinstance( - fc_dims, (list, tuple) - ), 'fc_dims must be either list or tuple, but got {}'.format( - type(fc_dims) - ) - - layers = [] - for dim in fc_dims: - layers.append(nn.Linear(input_dim, dim)) - layers.append(nn.BatchNorm1d(dim)) - layers.append(nn.ReLU(inplace=True)) - if dropout_p is not None: - layers.append(nn.Dropout(p=dropout_p)) - input_dim = dim - - self.feature_dim = fc_dims[-1] - - return nn.Sequential(*layers) - - def _init_params(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_( - m.weight, mode='fan_out', nonlinearity='relu' - ) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.BatchNorm1d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - f = self.features(x) - v = self.global_avgpool(f) - v = v.view(v.size(0), -1) - - if self.fc is not None: - v = self.fc(v) - - if not self.training: - return v - - y = self.classifier(v) - - if self.loss == 'softmax': - return y - elif self.loss == 'triplet': - return y, v - else: - raise KeyError('Unsupported loss: {}'.format(self.loss)) - - -def init_pretrained_weights(model, model_url): - """Initializes model with pretrained weights. - - Layers that don't match with pretrained layers in name or size are kept unchanged. - """ - pretrain_dict = model_zoo.load_url(model_url, map_location=None) - model_dict = model.state_dict() - pretrain_dict = { - k: v - for k, v in pretrain_dict.items() - if k in model_dict and model_dict[k].size() == v.size() - } - model_dict.update(pretrain_dict) - model.load_state_dict(model_dict) - - -def squeezenet1_0(num_classes, loss='softmax', pretrained=True, **kwargs): - model = SqueezeNet( - num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs - ) - if pretrained: - init_pretrained_weights(model, model_urls['squeezenet1_0']) - return model - - -def squeezenet1_0_fc512( - num_classes, loss='softmax', pretrained=True, **kwargs -): - model = SqueezeNet( - num_classes, - loss, - version=1.0, - fc_dims=[512], - dropout_p=None, - **kwargs - ) - if pretrained: - init_pretrained_weights(model, model_urls['squeezenet1_0']) - return model - - -def squeezenet1_1(num_classes, loss='softmax', pretrained=True, **kwargs): - model = SqueezeNet( - num_classes, loss, version=1.1, fc_dims=None, dropout_p=None, **kwargs - ) - if pretrained: - init_pretrained_weights(model, model_urls['squeezenet1_1']) - return model diff --git a/spaces/radames/candle-segment-anything-wasm/README.md b/spaces/radames/candle-segment-anything-wasm/README.md deleted file mode 100644 index 58180147044a3886dc27d6514d18affde053d56c..0000000000000000000000000000000000000000 --- a/spaces/radames/candle-segment-anything-wasm/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Candle Segment Anything Wasm -emoji: 🕯️🎭 -colorFrom: gray -colorTo: pink -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Ana Express Cutting Plotter AE 70 Driver.zip Les Meilleures Astuces et Conseils pour Optimiser son Utilisation.md b/spaces/raedeXanto/academic-chatgpt-beta/Ana Express Cutting Plotter AE 70 Driver.zip Les Meilleures Astuces et Conseils pour Optimiser son Utilisation.md deleted file mode 100644 index 910e3a1aa10de4420fceb12689e2e74ab99b5116..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Ana Express Cutting Plotter AE 70 Driver.zip Les Meilleures Astuces et Conseils pour Optimiser son Utilisation.md +++ /dev/null @@ -1,160 +0,0 @@ - -

        Ana Express Cutting Plotter AE 70 Driver.zip: How to Download and Install It

        -

        Introduction

        -

        If you are looking for a reliable and versatile cutting plotter, you might want to consider the Ana Express Cutting Plotter AE 70. This device can cut various materials, such as vinyl, paper, cardstock, fabric, and more, with high precision and speed. It can also connect to your computer via USB or serial port, and work with different software programs, such as CorelDraw, DesignArt, and SmartCut Pro.

        -

        However, before you can use your Ana Express Cutting Plotter AE 70, you need to install its driver on your computer. The driver is a software program that allows your computer to communicate with your cutting plotter and control its functions. Without the driver, your cutting plotter will not work properly or at all.

        -

        ana express cutting plotter ae 70 driver.zip


        Download Zip ————— https://tinourl.com/2uL29z



        -

        In this article, we will show you how to download and install the Ana Express Cutting Plotter AE 70 driver.zip file on your Windows or Mac computer. We will also provide some troubleshooting tips in case you encounter any problems with the driver.

        -

        What is Ana Express Cutting Plotter AE 70?

        -

        Ana Express Cutting Plotter AE 70 is a cutting plotter manufactured by Anagraph Inc., a company that specializes in producing high-quality sign making equipment. The cutting plotter has a cutting width of 30 inches and a cutting speed of up to 24 inches per second. It also has a digital LCD display, a memory buffer of 4 MB, and an adjustable blade pressure of up to 500 grams.

        -

        Ana Express Cutting Plotter AE 70 can cut various materials, such as vinyl, paper, cardstock, fabric, and more, with high precision and speed. It can also connect to your computer via USB or serial port, and work with different software programs, such as CorelDraw, DesignArt, and SmartCut Pro.

        -

        Why do you need Ana Express Cutting Plotter AE 70 driver.zip?

        -

        Ana Express Cutting Plotter AE 70 driver.zip is a compressed file that contains the driver for your cutting plotter. The driver is a software program that allows your computer to communicate with your cutting plotter and control its functions. Without the driver, your cutting plotter will not work properly or at all.

        -

        You need to download and install the Ana Express Cutting Plotter AE 70 driver.zip file on your computer before you can use your cutting plotter. The driver will enable your computer to recognize your cutting plotter and send commands to it. The driver will also ensure that your cutting plotter performs optimally and safely.

        -

        How to download Ana Express Cutting Plotter AE 70 driver.zip?

        -

        To download the Ana Express Cutting Plotter AE 70 driver.zip file, you need to visit the official website of Anagraph Inc., which is https://www.anagraph.com/. On the website, you need to go to the Support section and look for the Downloads page. On the Downloads page, you need to find the Ana Express/Elite Plotter Driver for DesignArt program and click on it. This will start the download process of the Ana Express Cutting Plotter AE 70 driver.zip file.

        -

        ana express cutting plotter ae 70 software download
        -ana express cutting plotter ae 70 manual pdf
        -ana express cutting plotter ae 70 installation guide
        -ana express cutting plotter ae 70 troubleshooting tips
        -ana express cutting plotter ae 70 compatible blades
        -ana express cutting plotter ae 70 spare parts
        -ana express cutting plotter ae 70 review and ratings
        -ana express cutting plotter ae 70 price and warranty
        -ana express cutting plotter ae 70 user forum and support
        -ana express cutting plotter ae 70 video tutorial and demo
        -how to use ana express cutting plotter ae 70 for vinyl
        -how to connect ana express cutting plotter ae 70 to computer
        -how to update ana express cutting plotter ae 70 firmware
        -how to calibrate ana express cutting plotter ae 70 settings
        -how to adjust ana express cutting plotter ae 70 pressure and speed
        -how to fix ana express cutting plotter ae 70 error codes
        -how to replace ana express cutting plotter ae 70 blade holder
        -how to clean ana express cutting plotter ae 70 rollers and carriage
        -how to reset ana express cutting plotter ae 70 factory defaults
        -how to test ana express cutting plotter ae 70 cut quality and accuracy
        -best vinyl types for ana express cutting plotter ae 70 performance
        -best software programs for ana express cutting plotter ae 70 design and output
        -best accessories and tools for ana express cutting plotter ae 70 maintenance and operation
        -best practices and tips for ana express cutting plotter ae 70 usage and safety
        -best online courses and resources for ana express cutting plotter ae 70 training and learning
        -comparison of ana express cutting plotter ae 70 vs other models and brands
        -pros and cons of ana express cutting plotter ae 70 features and functions
        -benefits and drawbacks of ana express cutting plotter ae 70 size and weight
        -advantages and disadvantages of ana express cutting plotter ae 70 noise and speed levels
        -strengths and weaknesses of ana express cutting plotter ae 70 quality and durability
        -where to buy ana express cutting plotter ae 70 online or offline
        -where to find ana express cutting plotter ae 70 coupons and discounts
        -where to get ana express cutting plotter ae 70 free shipping and delivery
        -where to read ana express cutting plotter ae 70 testimonials and feedbacks
        -where to watch ana express cutting plotter ae 70 live streams and webinars
        -what are the common problems and issues with ana express cutting plotter ae 70 users face?
        -what are the solutions and fixes for ana express cutting plotter ae 70 problems and issues?
        -what are the latest updates and news about ana express cutting plotter ae 70 developments and innovations?
        -what are the frequently asked questions and answers about ana express cutting plotter ae 70 features and functions?
        -what are the best alternatives and substitutes for ana express cutting plotter ae 70 products and services?

        -

        Alternatively, you can also use one of the following links to download the Ana Express Cutting Plotter AE 70 driver.zip file from other sources:

        - -

        Please note that these links are not affiliated with Anagraph Inc., and we cannot guarantee their reliability or safety. We recommend that you scan the downloaded file with an antivirus program before opening it.

        -

        Installation guide

        -

        How to unzip Ana Express Cutting Plotter AE 70 driver.zip?

        -

        After downloading the Ana Express Cutting Plotter AE 70 driver.zip file, you need to unzip it before installing it. To unzip the file, you need a program that can extract compressed files, such as WinZip, WinRAR, or 7-Zip. You can download one of these programs from their official websites:

        - -

        After installing one of these programs on your computer, you need to locate the Ana Express Cutting Plotter AE 70 driver.zip file on your computer and right-click on it. Then, you need to select the option that says Extract All or Extract Here or Extract Files or something similar. This will create a new folder with the same name as the zip file that contains the extracted files.

        -

        How to install Ana Express Cutting Plotter AE 70 driver on Windows?

        -

        To install the Ana Express Cutting Plotter AE 70 driver on Windows, you need to follow these steps:

        -
          -
        1. Open the folder that contains the extracted files from the zip file.
        2. -
        3. Double-click on the file that says Setup.exe or Install.exe or something similar.
        4. -
        5. Follow the instructions on the screen to complete the installation process.
        6. -
        7. Restart your computer if prompted.
        8. -
        9. Connect your cutting plotter to your computer via USB or serial port.
        10. -
        11. Turn on your cutting plotter and wait for Windows to detect it.
        12. -

          How to install Ana Express Cutting Plotter AE 70 driver on Mac?

          -

          To install the Ana Express Cutting Plotter AE 70 driver on Mac, you need to follow these steps:

          -
            -
          1. Open the folder that contains the extracted files from the zip file.
          2. -
          3. Double-click on the file that says ANAEXPRESS.dmg or something similar.
          4. -
          5. Drag and drop the ANAEXPRESS icon to the Applications folder.
          6. -
          7. Open the Applications folder and double-click on the ANAEXPRESS icon.
          8. -
          9. Follow the instructions on the screen to complete the installation process.
          10. -
          11. Restart your computer if prompted.
          12. -
          13. Connect your cutting plotter to your computer via USB or serial port.
          14. -
          15. Turn on your cutting plotter and wait for Mac to detect it.
          16. -
          17. If Mac does not detect your cutting plotter automatically, go to System Preferences > Printers & Scanners > Add > Select ANA EXPRESS ELITE CUTTER (AE-60E) from Default list > Add > OK.
          18. -
          -

          Troubleshooting tips

          -

          What to do if Ana Express Cutting Plotter AE 70 driver.zip is corrupted or missing?

          -

          If you encounter an error message that says Ana Express Cutting Plotter AE 70 driver.zip is corrupted or missing, you need to do the following:

          -
            -
          • Delete the zip file from your computer and download it again from a reliable source.
          • -
          • Scan the zip file with an antivirus program before opening it.
          • -
          • Use a different program to unzip the file, such as WinZip, WinRAR, or 7-Zip.
          • -
          • Make sure you have enough disk space and memory on your computer to unzip and install the file.
          • -
          -

          What to do if Ana Express Cutting Plotter AE 70 driver is not working properly?

          -

          If you encounter any problems with the Ana Express Cutting Plotter AE 70 driver, such as poor cutting quality, inaccurate alignment, or error messages, you need to do the following:

          -
            -
          • Check if your cutting plotter is connected properly to your computer via USB or serial port.
          • -
          • Check if your cutting plotter is turned on and has enough power supply.
          • -
          • Check if your cutting plotter has enough blade pressure and speed settings for the material you are cutting.
          • -
          • Check if your cutting plotter has enough memory buffer and is not overloaded with commands.
          • -
          • Check if your cutting software is compatible with your cutting plotter and has the correct settings for it.
          • -
          • Check if your computer meets the minimum system requirements for the driver and has the latest updates installed.
          • -
          -

          How to update Ana Express Cutting Plotter AE 70 driver?

          -

          To update the Ana Express Cutting Plotter AE 70 driver, you need to do the following:

          -
            -
          • Visit the official website of Anagraph Inc., which is https://www.anagraph.com/, and look for any new versions of the driver available for download.
          • -
          • If there is a new version of the driver available, download it and follow the installation instructions provided by Anagraph Inc.
          • -
          • If there is no new version of the driver available, contact Anagraph Inc. customer support and ask for assistance.
          • -
          -

          Conclusion

          -

          Summary of the main points

          -

          In this article, we have shown you how to download and install the Ana Express Cutting Plotter AE 70 driver.zip file on your Windows or Mac computer. We have also provided some troubleshooting tips in case you encounter any problems with the driver. We hope that this article has been helpful and informative for you.

          -

          Call to action

          -

          If you are interested in purchasing an Ana Express Cutting Plotter AE 70 or any other cutting plotter from Anagraph Inc., please visit their website at https://www.anagraph.com/. You can also contact them by phone at (800) 527-7778 or by email at sales@anagraph.com. They will be happy to assist you with any questions or inquiries you may have.

          -

          Frequently Asked Questions

          -

          What are the benefits of using an Ana Express Cutting Plotter AE 70?

          -

          Ana Express Cutting Plotter AE 70 is a cutting plotter that offers many benefits, such as:

          -
            -
          • It can cut various materials, such as vinyl, paper, cardstock, fabric, and more, with high precision and speed.
          • -
          • It can connect to your computer via USB or serial port, and work with different software programs, such as CorelDraw, DesignArt, and SmartCut Pro.
          • -
          • It has a digital LCD display, a memory buffer of 4 MB, and an adjustable blade pressure of up to 500 grams.
          • -
          • It is easy to use and maintain, and comes with a one-year warranty from Anagraph Inc.
          • -
          -

          How much does an Ana Express Cutting Plotter AE 70 cost?

          -

          Ana Express Cutting Plotter AE 70 costs around $1,500 USD. However, this price may vary depending on where you buy it from and what accessories you choose to include. You can check the current price and availability of Ana Express Cutting Plotter AE 70 on Anagraph Inc.'s website at https://www.anagraph.com/.

          -

          What are some alternative cutting plotters to Ana Express Cutting Plotter AE 70?

          -

          If you are looking for some alternative cutting plotters to Ana Express Cutting Plotter AE 70, you might want to consider these options:

          -
            -
          • Graphtec CE6000-60 Plus: This cutting plotter has a cutting width of 24 inches and a cutting speed of up to 35 inches per second. It also has a digital servo motor, a tangential emulation mode, and an automatic registration mark sensor. It costs around $1,800 USD.
          • -
          • Roland GS-24: This cutting plotter has a cutting width of 23 inches and a cutting speed of up to 20 inches per second. It also has a digital servo motor, an optical registration system, and an overlap cutting function. It costs around $1,700 USD.
          • -
          • Cricut Maker: This cutting plotter has a cutting width of 12 inches and a cutting speed of up to 10 inches per second. It also has a rotary blade, a knife blade, and an adaptive tool system. It costs around $400 USD.
          • -
          -

          How can I learn more about using an Ana Express Cutting Plotter AE 70?

          -

          If you want to learn more about using an Ana Express Cutting Plotter AE 70, you can do the following:

          -
            -
          • Read the user manual that comes with your cutting plotter or download it from Anagraph Inc.'s website at https://www.anagraph.com/.
          • -
          • Watch some video tutorials on YouTube that show how to use an Ana Express Cutting Plotter AE 70 or similar devices.
          • -
          • Contact Anagraph Inc. customer support by phone at (800) 527-7778 or by email at support@anagraph.com. They will be happy to answer any questions or provide any assistance you may need.
          • -
          -

          Where can I find some reviews of Ana Express Cutting Plotter AE 70?

          -

          If you want to find some reviews of Ana Express Cutting Plotter AE 70 from other users who have bought or used it, you can do the following:

          -
            -
          • Browse some online forums or blogs that discuss sign making equipment or related topics. You might find some posts or comments from people who have shared their opinions or experiences with Ana Express Cutting Plotter AE 70 or similar devices.
          • -
          • Visit some online marketplaces or platforms that sell sign making equipment or related products. You might find some ratings or feedback from customers who have purchased or used Ana Express Cutting Plotter AE 70 or similar devices.
          • -
          • Contact some local sign making shops or businesses that use sign making equipment or related services. You might be able to see some samples of their work done with Ana Express Cutting Plotter AE 70 or similar devices. You might also be able to ask them some questions about their satisfaction or dissatisfaction with Ana Express Cutting Plotter AE 70 or similar devices.
          • -
          -

          0a6ba089eb
          -
          -
          \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/AutoCAD Plant 3D 2017 64bit Activation Code Zip File NEW!.md b/spaces/raedeXanto/academic-chatgpt-beta/AutoCAD Plant 3D 2017 64bit Activation Code Zip File NEW!.md deleted file mode 100644 index 01f065489a678f00ce0d3dc1509b19d95656b8a1..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/AutoCAD Plant 3D 2017 64bit Activation Code Zip File NEW!.md +++ /dev/null @@ -1,150 +0,0 @@ -
          -

          How to Activate AutoCAD Plant 3D 2017 64bit with a Zip File

          -

          If you are a plant designer, engineer, or manager, you may need a powerful and industry-specific toolset for creating and editing P&IDs, 3D models, and piping isometrics. That's where AutoCAD Plant 3D comes in handy. In this article, you will learn what AutoCAD Plant 3D is, why you need it, how to get an activation code for it, how to download and install it from a zip file, and how to troubleshoot common issues with it. Let's get started!

          -

          What is AutoCAD Plant 3D 2017 and why do you need it?

          -

          AutoCAD Plant 3D is a software application that is part of the AutoCAD family of products. It is designed for plant design professionals who need to create and edit P&IDs (piping and instrumentation diagrams), 3D models, and piping isometrics with an industry-specific toolset. With AutoCAD Plant 3D, you can:

          -

          AutoCAD Plant 3D 2017 64bit Activation Code Zip File


          Download Zip >>>>> https://tinourl.com/2uL58j



          -
            -
          • Collaborate securely in a cloud-based common data environment
          • -
          • Speed up and automate P&ID drafting and 3D modeling with in-context commands
          • -
          • Automatically create piping isometric drawings directly from the 3D model
          • -
          • Use industry-standard symbol libraries and data validation tools
          • -
          • Integrate with other Autodesk software such as Revit, Navisworks, BIM 360, etc.
          • -
          -

          AutoCAD Plant 3D can help you improve your plant design workflow, accuracy, efficiency, and productivity. It can also help you comply with industry standards and regulations, reduce errors and rework, and enhance collaboration and communication among project teams.

          -

          Features and benefits of AutoCAD Plant 3D 2017

          -

          The 2017 version of AutoCAD Plant 3D introduced some new features and enhancements that can make your plant design process even better. Some of the key features are:

          -
            -
          • P&ID Painter: You can display P&ID components in a color that matches pipe line group property values. For example, you can paint by size, service, spec, etc. You can also create custom paint styles in Project Setup.
          • -
          • Multi-port Valves: You can use multi-port (hygienic multi-level) valves that are commonly used by the food, beverage, and pharmaceutical industries. Valves and compatible piping are available from content packs in the Autodesk App Store.
          • -
          • Pulled Pipe Bends: You can create pipes and bends that are connected with welds instead of fittings. This can reduce the number of components in your model and simplify your fabrication process.
          • -
          • Isometric Reference Dimensions: You can document objects in the 3D model that are not part of the pipe line in Iso drawings. For example, you can show the dimensions of a valve handle, a flange, or a support bracket.
          • -
          • Isometric Revision Clouds: You can highlight changes in Iso drawings with revision clouds. You can also customize the revision cloud settings in Project Setup.
          • -
          • Isometric Annotations: You can add annotations to Iso drawings such as notes, labels, tags, dimensions, etc. You can also customize the annotation settings in Project Setup.
          • -
          • Isometric Off-Page Connectors: You can use off-page connectors to link Iso drawings that are split across multiple sheets. You can also customize the off-page connector settings in Project Setup.
          • -
          • Isometric Bill of Materials (BOM): You can create BOM tables for Iso drawings that list the components and materials used in the pipe line. You can also customize the BOM settings in Project Setup.
          • -
          -

          These are just some of the features and benefits of AutoCAD Plant 3D 2017. For more information, you can visit the official website or read the release notes.

          -

          System requirements and compatibility of AutoCAD Plant 3D 2017

          -

          Before you download and install AutoCAD Plant 3D 2017, you need to make sure that your computer meets the minimum system requirements and that your software is compatible with it. Here are the system requirements and compatibility details for AutoCAD Plant 3D 2017:

          - - - - - - - - - - - - - - - - - - - -
          Operating SystemMemory (RAM)Disk SpaceProcessorDisplay ResolutionDisplay Card.NET Framework
          Windows 10 (64-bit)
          Windows 8.1 (64-bit)
          Windows 7 SP1 (64-bit)
          4 GB (8 GB recommended)8 GB (10 GB recommended)1 GHz or faster 64-bit processor (Intel or AMD)1360 x 768 (1920 x 1080 recommended) with True ColorWindows display adapter capable of 1360 x 768 with True Color capabilities and DirectX® 9. DirectX 11 compliant card recommended..NET Framework Version 4.6
          -

          In addition, you need to have an internet connection for online activation, access to cloud services, and downloading updates. You also need to have an Autodesk account for online activation and access to cloud services. You can create an Autodesk account for free here.

          -

          AutoCAD Plant 3D 2017 is compatible with other Autodesk software such as Revit, Navisworks, BIM 360, etc. However, you need to make sure that you have the same or compatible versions of these software installed on your computer. For example, AutoCAD Plant 3D 2017 is compatible with Revit 2017, but not with Revit 2018 or Revit 2016. You can check the compatibility matrix for AutoCAD Plant 3D here.

          -

          How to get an activation code for AutoCAD Plant 3D 2017

          -

          To use AutoCAD Plant 3D 2017, you need to have a valid license and an activation code. A license is a legal agreement that grants you the right to use the software for a specific period of time and under certain conditions. An activation code is a unique alphanumeric code that verifies your license and activates your software.

          -

          -

          You can get a license and an activation code for AutoCAD Plant 3D 2017 in two ways: online or offline. Online activation is the easiest and fastest way to activate your software. Offline activation is an alternative method that you can use if you don't have an internet connection or if you encounter any problems with online activation.

          -

          Online activation with Autodesk account

          -

          To activate your software online, you need to have an Autodesk account and an internet connection. Here are the steps to activate your software online:

          -
            -
          1. Sign in to your Autodesk account: Launch AutoCAD Plant 3D 2017 and click on the Sign In button on the top right corner of the screen. Enter your email address and password and click on Sign In. If you don't have an Autodesk account, click on Create Account and follow the instructions to create one.
          2. -
          3. Select your license type: After signing in, you will see a dialog box that asks you to choose your license type. You can choose from three options: Single-User, Multi-User, or Network. A single-user license is for individual users who want to use the software on one or more computers. A multi-user license is for multiple users who want to share the software on a network. A network license is for organizations that want to manage the software licenses on a server. Choose the option that matches your license and click on Next.
          4. -
          5. Enter your serial number and product key: After selecting your license type, you will see a dialog box that asks you to enter your serial number and product key. A serial number is a 12-digit code that identifies your product and license type. A product key is a 5-digit code that identifies the specific product you are activating. You can find these codes in your Autodesk account, in your order confirmation email, or on the product packaging. Enter these codes and click on Next.
          6. -
          7. Verify your activation: After entering your serial number and product key, you will see a dialog box that confirms your activation. You can also check your activation status in your Autodesk account or in the About dialog box of the software. Click on Finish to close the dialog box and start using your software.
          8. -
          -

          Congratulations! You have successfully activated your software online. You can now enjoy all the features and benefits of AutoCAD Plant 3D 2017.

          -

          Offline activation with request code and manual activation

          -

          If you don't have an internet connection or if you encounter any problems with online activation, you can use the offline activation method. This method involves generating a request code from the software, entering it in your Autodesk account, and getting a manual activation code. Here are the steps to activate your software offline:

          -
            -
          1. Generate a request code: Launch AutoCAD Plant 3D 2017 and click on the Enter a Serial Number link on the top right corner of the screen. Enter your serial number and product key and click on Next. Select I have an activation code from Autodesk and click on Next. You will see a dialog box that shows a request code. This is a 16-digit code that is unique to your computer and software. Copy this code or write it down.
          2. -
          3. Enter the request code in your Autodesk account: Go to https://manage.autodesk.com/activate and sign in to your Autodesk account. If you don't have an Autodesk account, click on Create Account and follow the instructions to create one. After signing in, paste or type the request code in the Request Code field and click on Generate Activation Code. You will see a dialog box that shows a manual activation code. This is a 16-digit code that is generated based on your request code. Copy this code or write it down.
          4. -
          5. Enter the manual activation code in the software: Go back to AutoCAD Plant 3D 2017 and paste or type the manual activation code in the I have an activation code from Autodesk field. Click on Next. You will see a dialog box that confirms your activation. Click on Finish to close the dialog box and start using your software.
          6. -
          -

          Congratulations! You have successfully activated your software offline. You can now enjoy all the features and benefits of AutoCAD Plant 3D 2017.

          -

          How to download and install AutoCAD Plant 3D 2017 64bit zip file

          -

          To use AutoCAD Plant 3D 2017, you need to download and install it from a zip file. A zip file is a compressed file that contains one or more files or folders. It reduces the file size and makes it easier to download and transfer. Here are the steps to download and install AutoCAD Plant 3D 2017 64bit zip file:

          -

          Download options and sources for AutoCAD Plant 3D 2017 zip file

          -

          You can download AutoCAD Plant 3D 2017 zip file from different sources depending on your preference and availability. Some of the common sources are:

          -
            -
          • Your Autodesk account and press members who want to review or promote Autodesk products. You can access your Autodesk Media account here and sign in with your email and password. You can then go to the Media Downloads section and find AutoCAD Plant 3D 2017 in the list of available software. You can click on the Download button and choose the Browser Download option. This will download a zip file to your computer.
          • -
          • Other third-party sources: These are sources for downloading AutoCAD Plant 3D 2017 zip file from websites or platforms that are not affiliated with Autodesk. These sources may offer free or discounted downloads, but they may also pose risks such as viruses, malware, spyware, or illegal copies. We do not recommend using these sources and we are not responsible for any consequences that may arise from using them. Use them at your own risk and discretion.
          • -
          -

          Once you have downloaded the zip file from your preferred source, you need to save it to a location on your computer that you can easily access. For example, you can save it to your desktop, downloads folder, or a USB drive.

          -

          Installation steps and tips for AutoCAD Plant 3D 2017 zip file

          -

          After downloading the zip file, you need to install it on your computer. Here are the steps and tips for installing AutoCAD Plant 3D 2017 zip file:

          -
            -
          1. Extract the zip file: Right-click on the zip file and select Extract All. Choose a destination folder where you want to extract the files and click on Extract. This will create a folder with the same name as the zip file that contains the installation files.
          2. -
          3. Run the setup.exe file: Open the folder that contains the installation files and double-click on the setup.exe file. This will launch the installation wizard that will guide you through the installation process.
          4. -
          5. Select your language and country: In the first screen of the installation wizard, you will see a drop-down menu that lets you choose your language and country. Select the option that matches your preference and click on Next.
          6. -
          7. Select your installation type: In the next screen of the installation wizard, you will see two options for your installation type: Trial or Licensed. A trial installation allows you to use the software for free for a limited period of time (usually 30 days). A licensed installation requires you to enter your serial number and product key to activate your software. Choose the option that matches your license type and click on Next.
          8. -
          9. Select your installation options: In the next screen of the installation wizard, you will see a list of installation options that include: Select Products, Select Installation Location, Select Configuration File, and Select Content Packs. You can click on each option to customize your installation according to your needs. For example, you can choose which products to install, where to install them, which configuration file to use, and which content packs to download. You can also use the default settings if you are not sure what to change. Click on Next after making your selections.
          10. -
          11. Review and accept the license agreement: In the next screen of the installation wizard, you will see the license agreement for AutoCAD Plant 3D 2017. You need to read and accept the terms and conditions of the agreement before you can proceed with the installation. You can also print or save a copy of the agreement for your reference. Click on the I Accept button after reading and accepting the agreement.
          12. -
          13. Start the installation: In the next screen of the installation wizard, you will see a summary of your installation settings and options. You can review them and make any changes if needed. You can also click on the Install Help button to access the online help for more information. Click on the Install button to start the installation.
          14. -
          15. Wait for the installation to complete: The installation process may take several minutes depending on your computer speed and internet connection. You will see a progress bar that shows the status and percentage of the installation. You can also click on the Show Details button to see more information about the installation. Do not close or interrupt the installation wizard until the installation is complete.
          16. -
          17. Finish the installation: After the installation is complete, you will see a screen that confirms that AutoCAD Plant 3D 2017 has been successfully installed on your computer. You can click on the Finish button to close the installation wizard and launch AutoCAD Plant 3D 2017.
          18. -
          -

          Congratulations! You have successfully downloaded and installed AutoCAD Plant 3D 2017 64bit zip file. You can now start using your software and enjoy its features and benefits.

          -

          How to troubleshoot common issues with AutoCAD Plant 3D 2017 activation and installation

          -

          Sometimes, you may encounter some issues or errors with AutoCAD Plant 3D 2017 activation and installation. These issues may be caused by various factors such as incompatible software, corrupted files, insufficient permissions, network problems, etc. Here are some tips and solutions for troubleshooting common issues with AutoCAD Plant 3D 2017 activation and installation:

          -

          Error messages and solutions for AutoCAD Plant 3D 2017 activation

          -

          If you see any error messages when you try to activate your software, you can try these solutions:

          -
            -
          • The serial number you entered is not valid: This means that you have entered an incorrect or invalid serial number for your software. Make sure that you have entered the correct serial number that matches your product and license type. You can find your serial number in your Autodesk account, in your order confirmation email, or on the product packaging. If you still see this error message, contact Autodesk support for assistance.
          • -
          • The product key you entered is not valid: This means that you have entered an incorrect or invalid product key for your software. Make sure that you have entered the correct product key that matches your product and license type. You can find your product key in your Autodesk account, in your order confirmation email, or on the product packaging. If you still see this error message, contact Autodesk support for assistance.
          • -
          • The activation code you entered is not valid: This means that you have entered an incorrect or invalid activation code for your software. Make sure that you have entered the correct activation code that matches your request code and license type. You can get your activation code from your Autodesk account or from the offline activation method. If you still see this error message, contact Autodesk support for assistance.
          • -
          • The license server is not available: This means that you have a network license and the license server is not accessible or responsive. Make sure that you have a stable and secure internet connection and that the license server is online and working properly. You can also check the status of the license server in your Autodesk account or in the License Manager tool. If you still see this error message, contact your network administrator or Autodesk support for assistance.
          • -
          • The license has expired or is invalid: This means that your license has reached its end date or has been deactivated or revoked. Make sure that you have a valid and active license for your software and that you have not violated any terms or conditions of the license agreement. You can check your license status and expiration date in your Autodesk account or in the About dialog box of the software. If you still see this error message, contact Autodesk support for assistance.
          • -
          -

          These are some of the common error messages and solutions for AutoCAD Plant 3D 2017 activation. For more information, you can visit the official help page or contact Autodesk support.

          -

          Tips and tricks for optimizing AutoCAD Plant 3D 2017 performance and functionality

          -

          If you want to improve your AutoCAD Plant 3D 2017 experience, you can try these tips and tricks:

          -
            -
          • Update your software regularly: Updating your software can fix bugs, improve stability, enhance security, and add new features and enhancements. You can update your software from your Autodesk account, from the Autodesk Desktop App, or from the Application Menu of the software.
          • -
          • Use keyboard shortcuts: Keyboard shortcuts can save you time and effort by allowing you to perform common commands and actions with a few keystrokes. You can find a list of keyboard shortcuts for AutoCAD Plant 3D 2017 here. You can also customize your own keyboard shortcuts in the CUI Editor.
          • -
          • Use templates and styles: Templates and styles can help you standardize and streamline your plant design process by applying predefined settings and formats to your projects and drawings. You can use the default templates and styles provided by AutoCAD Plant 3D 2017 or create your own in Project Setup.
          • -
          • Use data manager and data validation: Data manager and data validation are tools that help you manage and verify the data in your P&IDs and 3D models. You can use data manager to view, edit, filter, sort, export, and import data from various sources. You can use data validation to check for errors, inconsistencies, and conflicts in your data and resolve them accordingly.
          • -
          • Use collaboration tools: Collaboration tools are tools that help you share and coordinate your plant design projects with other team members and stakeholders. You can use collaboration tools such as BIM 360, Vault, Navisworks, etc. to store, access, review, comment, markup, compare, and synchronize your plant design data in a cloud-based common data environment.
          • -
          -

          These are some of the tips and tricks for optimizing AutoCAD Plant 3D 2017 performance and functionality. For more information, you can visit the official help page or contact Autodesk support.

          -

          Conclusion and summary of the article

          -

          In this article, you have learned how to activate AutoCAD Plant 3D 2017 64bit with a zip file. You have also learned what AutoCAD Plant 3D 2017 is, why you need it, how to download and install it from a zip file, and how to troubleshoot common issues with it. You have also learned some features and benefits of AutoCAD Plant 3D 2017, some system requirements and compatibility details, and some tips and tricks for optimizing your plant design experience.

          -

          AutoCAD Plant 3D 2017 is a powerful and industry-specific toolset for creating and editing P&IDs, 3D models, and piping isometrics. It can help you improve your plant design workflow, accuracy, efficiency, and productivity. It can also help you comply with industry standards and regulations, reduce errors and rework, and enhance collaboration and communication among project teams.

          -

          We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to contact us. Thank you for reading and happy plant designing!

          -

          FAQs about AutoCAD Plant 3D 2017 activation and installation

          -

          Here are some frequently asked questions and answers about AutoCAD Plant 3D 2017 activation and installation:

          -
            -
          • Q: How can I get a free trial of AutoCAD Plant 3D 2017?
          • -
          • A: You can get a free trial of AutoCAD Plant 3D 2017 from your Autodesk Trial account. You can access your Autodesk Trial account here and sign in with your email and password. You can then go to the Free Trials section and find AutoCAD Plant 3D 2017 in the list of available software. You can click on the Download Free Trial button and choose the Browser Download option. This will download a zip file to your computer. You can then follow the installation steps as described in this article. The free trial will last for 30 days from the date of activation.
          • -
          • Q: How can I renew or extend my license for AutoCAD Plant 3D 2017?
          • -
          • A: You can renew or extend your license for AutoCAD Plant 3D 2017 from your Autodesk account. You can access your Autodesk account here and sign in with your email and password. You can then go to the Products & Services section and find AutoCAD Plant 3D 2017 in the list of your products. You can click on the Renew or Extend button and follow the instructions to complete the process. You can also contact your Autodesk reseller or partner for more options.
          • -
          • Q: How can I uninstall or remove AutoCAD Plant 3D 2017 from my computer?
          • -
          • A: You can uninstall or remove AutoCAD Plant 3D 2017 from your computer by using the Add/Remove Programs feature in Windows. Here are the steps to uninstall or remove AutoCAD Plant 3D 2017 from your computer:
          • -
              -
            1. Close AutoCAD Plant 3D 2017 if it is running.
            2. -
            3. Go to the Start menu and click on Control Panel.
            4. -
            5. Select Add/Remove Programs or Programs and Features.
            6. -
            7. Find AutoCAD Plant 3D 2017 in the list of installed programs and click on it.
            8. -
            9. Click on the Uninstall/Change button and follow the instructions to complete the process.
            10. -
            -
          • Q: How can I contact Autodesk support for more help with AutoCAD Plant 3D 2017?
          • -
          • A: You can contact Autodesk support for more help with AutoCAD Plant 3D 2017 by using one of these methods:
          • -
              -
            • Email: You can send an email to Autodesk support at support@autodesk.com. You can also use the online form here.
            • -
            • Phone: You can call Autodesk support at +1-800-964-6432 (toll-free in the US) or +1-415-507-5000 (international). You can also find the phone number for your region here.
            • -
            • Chat: You can chat with Autodesk support online by clicking on the Contact Us button on the bottom right corner of the official website or the official help page.
            • -
            • Forum: You can post your questions or issues on the official forum and get answers from other users and experts.
            • -
            • Knowledge Base: You can search for solutions and articles on the official knowledge base and find answers to common questions and issues.
            • -
            -
          -

          These are some of the FAQs and answers about AutoCAD Plant 3D 2017 activation and installation. For more information, you can visit the official help page or contact Autodesk support.

          b2dd77e56b
          -
          -
          \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Connectify Hotspot Pro Crack (2020) The Best Way to Connect All Your Devices to the Web.md b/spaces/raedeXanto/academic-chatgpt-beta/Connectify Hotspot Pro Crack (2020) The Best Way to Connect All Your Devices to the Web.md deleted file mode 100644 index 4a15000f35d48d92b01705e526c9a57ef1b43ec6..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Connectify Hotspot Pro Crack (2020) The Best Way to Connect All Your Devices to the Web.md +++ /dev/null @@ -1,108 +0,0 @@ - -

          Connectify Hotspot Pro Crack (2020): What Is It and Why You Need It

          -

          If you are looking for a way to share your internet connection with other devices, you might have heard of Connectify Hotspot Pro. This is a popular software that allows you to turn your PC into a Wi-Fi hotspot and share your internet with your smartphone, tablet, laptop, or any other device that supports Wi-Fi. But what if you don't want to pay for the premium features of this software? That's where Connectify Hotspot Pro Crack (2020) comes in.

          -

          Connectify Hotspot Pro Crack (2020)


          Download ····· https://tinourl.com/2uL0ko



          -

          In this article, we will explain what Connectify Hotspot Pro Crack (2020) is, how to download and install it, how to use it, what are its pros and cons, and what are some alternatives to it. By the end of this article, you will have a clear idea of whether Connectify Hotspot Pro Crack (2020) is worth trying or not.

          -

          How to Download and Install Connectify Hotspot Pro Crack (2020)

          -

          Before we get into the details of how to use Connectify Hotspot Pro Crack (2020), let's first see how to download and install it on your PC. Here are the steps you need to follow:

          -
            -
          1. Find a reliable source for the crack file. There are many websites that claim to offer Connectify Hotspot Pro Crack (2020), but not all of them are trustworthy. Some of them may contain viruses, malware, or spyware that can harm your PC or steal your personal information. Therefore, you need to be careful when choosing a source for the crack file. You can use a reputable antivirus software or a VPN service to protect yourself from malicious websites.
          2. -
          3. Download the crack file and the original software. Once you have found a reliable source for the crack file, you need to download it along with the original software. The crack file is usually a small file that modifies or replaces some files in the original software to bypass its activation process. The original software is the official version of Connectify Hotspot Pro that you can download from its website or any other trusted source.
          4. -
          5. Install the software and apply the crack file. After downloading both files, you need to install the original software on your PC. Follow the instructions on the screen and complete the installation process. Then, you need to apply the crack file to activate the premium features of Connectify Hotspot Pro. To do this, you need to copy and paste the crack file into the installation folder of Connectify Hotspot Pro. This folder is usually located in C:\Program Files\Connectify or C:\Program Files (x86)\Connectify. After applying the crack file, you should be able to use Connectify Hotspot Pro without any limitations.
          6. -
          -

          How to Use Connectify Hotspot Pro Crack (2020)

          -

          Now that you have installed Connectify Hotspot Pro Crack (2020) on your PC, let's see how to use it to create and manage your own Wi-Fi hotspot. Here are the steps you need to follow:

          -

          How to download Connectify Hotspot Pro Crack for free
          -Connectify Hotspot Pro Crack full version with license key
          -Connectify Hotspot Pro Crack latest update 2020
          -Connectify Hotspot Pro Crack review and features
          -Connectify Hotspot Pro Crack vs original software
          -Connectify Hotspot Pro Crack for Windows 10/8/7
          -Connectify Hotspot Pro Crack alternative software
          -Connectify Hotspot Pro Crack installation guide and troubleshooting
          -Connectify Hotspot Pro Crack benefits and drawbacks
          -Connectify Hotspot Pro Crack security and privacy issues
          -Connectify Hotspot Pro Crack download link and password
          -Connectify Hotspot Pro Crack activation code and serial number
          -Connectify Hotspot Pro Crack system requirements and compatibility
          -Connectify Hotspot Pro Crack customer support and feedback
          -Connectify Hotspot Pro Crack comparison with other hotspot software
          -Connectify Hotspot Pro Crack tips and tricks
          -Connectify Hotspot Pro Crack best settings and configuration
          -Connectify Hotspot Pro Crack performance and speed test
          -Connectify Hotspot Pro Crack pros and cons
          -Connectify Hotspot Pro Crack tutorial and video guide
          -Connectify Hotspot Pro Crack FAQs and answers
          -Connectify Hotspot Pro Crack discount and coupon code
          -Connectify Hotspot Pro Crack legal and ethical issues
          -Connectify Hotspot Pro Crack malware and virus scan
          -Connectify Hotspot Pro Crack refund policy and guarantee
          -Connectify Hotspot Pro Crack online and offline mode
          -Connectify Hotspot Pro Crack unlimited devices and bandwidth
          -Connectify Hotspot Pro Crack custom hotspot name and password
          -Connectify Hotspot Pro Crack firewall and VPN options
          -Connectify Hotspot Pro Crack file sharing and streaming features
          -Connectify Hotspot Pro Crack reliability and stability
          -Connectify Hotspot Pro Crack user interface and design
          -Connectify Hotspot Pro Crack updates and patches
          -Connectify Hotspot Pro Crack testimonials and ratings
          -Connectify Hotspot Pro Crack problems and solutions
          -Connectify Hotspot Pro Crack advantages and disadvantages
          -Connectify Hotspot Pro Crack quality and value for money
          -Connectify Hotspot Pro Crack risks and challenges
          -Connectify Hotspot Pro Crack recommendations and suggestions
          -Connectify Hotspot Pro Crack hacks and cheats
          -Connectify Hotspot Pro Crack errors and fixes
          -Connectify Hotspot Pro Crack improvements and enhancements
          -Connectify Hotspot Pro Crack facts and myths
          -Connectify Hotspot Pro Crack dos and don'ts
          -Connectify Hotspot Pro Crack success stories and case studies
          -Connectify Hotspot Pro Crack best practices and guidelines
          -Connectify Hotspot Pro Crack expectations and reality
          -Connectify Hotspot Pro Crack pros vs cons

          -
            -
          1. Launch the software and create a hotspot name and password. After launching Connectify Hotspot Pro on your PC, you will see a simple interface that allows you to create and customize your hotspot. You can choose a name for your hotspot, such as "MyHotspot" or "ConnectifyMe", and a password that only you and your authorized devices can use to connect to it.
          2. -
          3. Choose the internet source and the devices to connect. Next, you need to choose which internet source you want to share with your devices. You can choose from various options, such as Wi-Fi, Ethernet, Mobile Broadband, VPN, or Dial-Up. You can also choose which devices you want to connect to your hotspot, such as smartphones, tablets, laptops, smart TVs, gaming consoles, or any other device that supports Wi-Fi.
          4. -```html enable or disable internet access for specific devices, monitor network usage and speed, block ads on connected devices, share files and folders across devices, bridge mode or repeater mode. -
          -

          Pros and Cons of Connectify Hotspot Pro Crack (2020)

          -

          As with any software, there are some pros and cons of using Connectify Hotspot Pro Crack (2020). Let's take a look at some of them:

          -

          Pros

          -
            -
          • Free: One of the main advantages of using Connectify Hotspot Pro Crack (2020) is that it is free. You don't have to pay anything for using all the premium features of Connectify Hotspot Pro.
          • -
          • Unlimited: Another advantage of using Connectify Hotspot Pro Crack (2020) is that it is unlimited. You don't have any restrictions on how many devices you can connect or how much data you can share.
          • -
          • Secure: A third advantage of using Connectify Hotspot Pro Crack (2020) is that it is secure. You can protect your hotspot with a strong password and encrypt your data with WPA2-PSK encryption.
          • -
          • Easy to use: A fourth advantage of using Connectify Hotspot Pro Crack (2020) is that it is easy to use. You don't need any technical skills or knowledge to set up and manage your hotspot.
          • -
          -

          Cons

          -
            -
          • Illegal: One of the main disadvantages of using Connectify Hotspot Pro Crack (2020) is that it is illegal. You are violating the terms and conditions of Connectify by using a cracked version of their software.
          • -
          • Risky: Another disadvantage of using Connectify Hotspot Pro Crack (2020) is that it is risky. You may expose your PC or devices to viruses, malware, spyware, or hackers by downloading or using untrusted sources for the crack file.
          • -
          • Unstable: A third disadvantage of using Connectify Hotspot Pro Crack (2020) is that it is unstable. You may experience crashes, errors, bugs, or compatibility issues by using an outdated or corrupted version of the crack file.
          • -
          • Unsupported: A fourth disadvantage of using Connectify Hotspot Pro Crack (2020) is that it is unsupported. You won't get any updates, fixes, improvements, or customer support from Connectify by using a cracked version of their software.
          • -
          -

          Alternatives to Connectify Hotspot Pro Crack (2020)

          -

          If you are not convinced by using Connectify Hotspot Pro Crack (2020), there are some alternatives that you can try instead. Here are some options:

          -

          Option 1: Buy the official license of Connectify Hotspot Pro

          -```html of Connectify Hotspot Pro from their website or any other authorized seller. This way, you will get all the benefits of using Connectify Hotspot Pro without any drawbacks. You will be able to enjoy the latest features, updates, fixes, improvements, and customer support from Connectify. You will also be supporting the developers and respecting their intellectual property rights. The price of Connectify Hotspot Pro is $34.98 per year or $49.98 for a lifetime license.

          -

          Option 2: Use other free or paid hotspot software

          -

          Another option is to use other free or paid hotspot software that can perform similar functions as Connectify Hotspot Pro. There are many options available in the market, such as Baidu Wi-Fi Hotspot, MyPublicWiFi, mHotspot, OSToto Hotspot, or Hotspot Shield. Some of these software may have more or less features than Connectify Hotspot Pro, but they can still help you create and manage your own Wi-Fi hotspot. You can compare their features, prices, reviews, and ratings before choosing the best one for your needs.

          -

          Conclusion

          -

          Connectify Hotspot Pro Crack (2020) is a software that allows you to turn your PC into a Wi-Fi hotspot and share your internet with other devices. It has many features that make it attractive to use, such as being free, unlimited, secure, and easy to use. However, it also has many drawbacks that make it risky to use, such as being illegal, unstable, unsupported, and potentially harmful to your PC or devices.

          -

          Therefore, we recommend that you either buy the official license of Connectify Hotspot Pro or use other free or paid hotspot software that can offer similar benefits without the risks. This way, you will be able to enjoy creating and managing your own Wi-Fi hotspot without any worries.

          -

          FAQs

          -
            -
          • Q1: Is Connectify Hotspot Pro Crack (2020) safe to use?
          • -
          • A1: No, Connectify Hotspot Pro Crack (2020) is not safe to use. You may expose your PC or devices to viruses, malware, spyware, or hackers by downloading or using untrusted sources for the crack file. You may also face legal consequences for violating the terms and conditions of Connectify by using a cracked version of their software.
          • -
          • Q2: How can I update Connectify Hotspot Pro Crack (2020)?
          • -
          • A2: You cannot update Connectify Hotspot Pro Crack (2020). You will not get any updates, fixes, improvements, or customer support from Connectify by using a cracked version of their software. You may also experience crashes, errors, bugs, or compatibility issues by using an outdated or corrupted version of the crack file.
          • -
          • Q3: What are the system requirements for Connectify Hotspot Pro Crack (2020)?
          • -
          • A3: The system requirements for Connectify Hotspot Pro Crack (2020) are the same as the original software. You need a PC running Windows 7 or later with a Wi-Fi adapter and an internet connection.
          • -
          • Q4: How can I troubleshoot Connectify Hotspot Pro Crack (2020)?
          • -
          • A4: You cannot troubleshoot Connectify Hotspot Pro Crack (2020). You will not get any customer support from Connectify by using a cracked version of their software. You may also face difficulties in finding reliable sources for the crack file or resolving any issues that may arise from using it.
          • -
          • Q5: Where can I get more information about Connectify Hotspot Pro Crack (2020)?
          • -
          • A5: You can get more information about Connectify Hotspot Pro Crack (2020) from various websites that offer it. However, we advise you to be cautious when visiting these websites as they may contain malicious content or links that can harm your PC or devices.
          • -
          -

          0a6ba089eb
          -
          -
          \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Dfs Cdma Tool 3.1.0.1 26https Scoutmails.com Index301.php K Dfs Cdma Tool 3.1.0.1 26.md b/spaces/raedeXanto/academic-chatgpt-beta/Dfs Cdma Tool 3.1.0.1 26https Scoutmails.com Index301.php K Dfs Cdma Tool 3.1.0.1 26.md deleted file mode 100644 index 04b8b2d12652e9761ebf71b8a46b57a3e664b678..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Dfs Cdma Tool 3.1.0.1 26https Scoutmails.com Index301.php K Dfs Cdma Tool 3.1.0.1 26.md +++ /dev/null @@ -1,83 +0,0 @@ - -

          DFS CDMA Tool 3.1.0.1 26: What Is It And How To Use It

          -

          Introduction

          -

          If you are looking for a tool that can help you flash or install stock firmware on your Qualcomm devices, or update the PRL (Preferred Roaming List) on your CDMA devices, then you might want to check out DFS CDMA Tool 3.1.0.1 26.

          -

          dfs cdma tool 3.1.0.1 26https: scoutmails.com index301.php k dfs cdma tool 3.1.0.1 26


          Download Filehttps://tinourl.com/2uL2nq



          -

          DFS CDMA Tool 3.1.0.1 26 is a small application for Windows computers that allows you to perform various tasks on your Qualcomm and CDMA devices, such as flashing firmware, updating PRL, unlocking SPC, MSL, SPC3, FSK, OTKSL, User Lock, MIN lock, SIM LOCK, programming MIN, MDN, IMSI, NAM, PRL file, phone settings and more.

          -

          In this article, we will explain what DFS CDMA Tool 3.1.0.1 26 is, what are its features and benefits, how to download and install it on your computer, and how to use it to flash firmware and update PRL on your devices.

          -

          What is DFS CDMA Tool 3.1.0.1 26?

          -

          DFS CDMA Tool 3.1.0.1 26 is a software tool created by Keith (mingshi) that allows you to flash or install stock firmware (ROM) on Qualcomm devices and update PRL on CDMA devices.

          -

          Qualcomm devices are those that are powered by Qualcomm chipset, such as some models of Samsung, LG, HTC, Motorola, ZTE, Huawei and more.

          -

          -

          CDMA devices are those that use Code Division Multiple Access (CDMA) technology for wireless communication, such as some models of Verizon Wireless, Sprint Nextel, US Cellular and more.

          -

          What are the features of DFS CDMA Tool 3.1.0.1 26?

          -

          DFS CDMA Tool 3.1.0.1 26 has many features that make it a useful tool for Qualcomm and CDMA device users.

          -
            -
          • It is a portable application that does not need to be installed on the computer.
          • -
          • It supports flashing or installing .bin based stock firmware (ROM) on Qualcomm devices.
          • -
          • It supports updating PRL on CDMA devices.
          • -
          • It supports unlocking SPC, MSL, SPC3, FSK, OTKSL, User Lock, MIN lock, SIM LOCK.
          • -
          • It supports programming MIN, MDN, IMSI, NAM, PRL file, phone settings.
          • -
          • It is compatible with all versions of Windows OS from Windows XP to Windows 11 (x32 or x64 bit).
          • -
          -

          What are the benefits of using DFS CDMA Tool 3.1.0.1 26?

          -

          Using DFS CDMA Tool 3.1.0.1 26 can bring you many benefits depending on your needs and goals.

          -
            -
          • You can flash or install stock firmware (ROM) on your Qualcomm device if you want to restore it to its original state or upgrade it to a newer version.
          • -
          • You can update PRL on your CDMA device if you want to improve its network performance or roaming capabilities.
          • -
          • You can unlock SPC, MSL, SPC3, FSK, OTKSL, User Lock, MIN lock, SIM LOCK if you want to use your device with another carrier or network.
          • -
          • You can program MIN, MDN, IMSI, NAM, PRL file, phone settings if you want to customize your device according to your preferences or needs.
          • -
          -

          How to download and install DFS CDMA Tool 3.1.0.1 26?

          -

          Download link and requirements

          -

          To download DFS CDMA Tool 3.1.0.1 26 on your computer,

          -
            -
          1. Click on this link: https://androidmtk.com/cdma-software-download-tool
          2. -
          3. Scroll down to the bottom of the page and click on "v1.07: CDMA_Software_Download_Tool_v1_0_7.zip – Latest".
          4. -
          5. Save the zip file on your computer and extract it using any zip extractor software such as WinRAR or WinZip.
          6. -
          -

          To use DFS CDMA Tool 3.1.0.1 26 on your computer,

          -
            -
          1. You need a Windows computer with an internet connection.
          2. -
          3. You need a USB cable to connect your device to the computer.
          4. -
          5. You need a compatible USB driver for your device installed on the computer.
            You can download the original USB driver compatible with the tool from this link: https://androidmtk.com/download-qualcomm-usb-driver
          6. -
          7. You need a .bin based stock firmware (ROM) file for your Qualcomm device or a PRL file for your CDMA device.
            You can find these files in the official website of your device manufacturer or from other online sources.
          8. -
          -

          Installation steps

          -

          To install DFS CDMA Tool 3.1.0.1 26 on your computer,

          -
            -
          1. Open the extracted folder of the tool and double-click on "SoftDownload (customer_en).exe" to launch the tool.
          2. -
          3. If you see an error message saying "No give the path of the file (version.dll) Re-setup Please.", ignore it since it will disappear once you load the firmware or PRL file in the tool.
          4. -
          5. The tool will open with a simple interface with four tabs: Port Settings, Programming, SC Tools, About Us.
          6. -
          -

          Step 3: Go to programming and select PRL update

          -

          Go to the "Programming" tab and click on the "PRL Update" button.

          -

          The tool will show you the current PRL version of your device and the available PRL versions on the server.

          -

          Step 4: Browse and load the PRL file and click on write

          -

          Click on the "Browse" button next to the "PRL File" field.

          -

          Locate and load the PRL file for your device on your computer.

          -

          You can find the PRL file in the official website of your carrier or from other online sources.

          -

          Click on the "Write" button to begin the PRL update process.

          -

          Step 5: Wait for the process to complete and reboot your device

          -

          Wait for the tool to update PRL on your device. This may take a few seconds or minutes depending on the size of the PRL file and the speed of your computer and device.

          -

          Do not disconnect your device from the computer or interrupt the process in any way.

          -

          Once the process is completed, you will see a green message saying "Write Complete".

          -

          Disconnect your device from the computer and reboot it.

          -

          Your device should now have the new PRL version installed.

          -

          Conclusion

          -

          In this article, we have explained what DFS CDMA Tool 3.1.0.1 26 is, what are its features and benefits, how to download and install it on your computer, and how to use it to flash firmware and update PRL on your devices.

          -

          We hope you have found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.

          -

          FAQs

          -
            -
          • Q: Is DFS CDMA Tool 3.1.0.1 26 free to use?
          • -
          • A: Yes, DFS CDMA Tool 3.1.0.1 26 is free to use for personal and non-commercial purposes. However, if you want to use it for commercial purposes, you need to purchase a license from the developer.
          • -
          • Q: Is DFS CDMA Tool 3.1.0.1 26 safe to use?
          • -
          • A: Yes, DFS CDMA Tool 3.1.0.1 26 is safe to use as long as you download it from a trusted source and use it with caution. However, flashing firmware or updating PRL on your devices can be risky and may cause data loss or damage to your devices if done incorrectly. Therefore, we recommend you to backup your data before using the tool and follow the instructions carefully.
          • -
          • Q: What are some alternative tools to DFS CDMA Tool 3.1.0.1 26?
          • -
          • A: There are many other tools that can also help you flash firmware or update PRL on your Qualcomm and CDMA devices, such as QPST Tool, QFIL Tool, QcomDLoader Tool, EFS Professional Tool, QPST Configuration Tool, etc. You can check them all from our Qualcomm Tool section.
          • -
          • Q: How can I contact the developer of DFS CDMA Tool 3.1.0.1 26?
          • -
          • A: You can contact the developer of DFS CDMA Tool 3.1.0.1 26 by visiting their official website at http://www.cdmatool.com/.
          • -
          -

          0a6ba089eb
          -
          -
          \ No newline at end of file diff --git a/spaces/rahul-pandey-ct/kinship-llm/README.md b/spaces/rahul-pandey-ct/kinship-llm/README.md deleted file mode 100644 index 89f2b808ab537cf5205b8f79f8400282c4ff558b..0000000000000000000000000000000000000000 --- a/spaces/rahul-pandey-ct/kinship-llm/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Kinship Llm -emoji: 😻 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Fantasy Grounds - Pirate Adventurers (5E) Full Crack [Torrent] PATCHED.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Fantasy Grounds - Pirate Adventurers (5E) Full Crack [Torrent] PATCHED.md deleted file mode 100644 index 6889bcf6366c48b7be7646bac6eba20c2b792850..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Fantasy Grounds - Pirate Adventurers (5E) Full Crack [Torrent] PATCHED.md +++ /dev/null @@ -1,66 +0,0 @@ -

          Fantasy Grounds - Pirate Adventurers (5E) Full Crack [Torrent]


          Downloadhttps://urlgoal.com/2uCL47



          -
          -## **Pirate Adventurers (5e)** - -Acquisitions Incorporated - -(Dollar) - -Sandbox adventure for 5e (Chapter 3) [] - -**DUNGEONS & DRAGONS ® D&D® - -ADVENTURERS, WIZARDS OF THE COAST ® - -& EBERRON ®** - -**WALK THROUGH THE BOOK OF - -TOME OF TALES®** - -Tome of Tales, Book II - -© 2002 Wizards of the Coast, Inc. - -All characters in this book are fictitious. Any resemblance to actual persons, living or dead, is purely coincidental. - -This book uses trademarks and trade names owned by Wizards of the Coast, Inc., which are used in this book without permission. - -Set in Monotype Geneva - -Printed in the United States of America - -First Printing: April 2002 - -ISBN: 0-7869-1943-2 - -3059 - -10 9 8 7 6 5 4 3 2 1 - -PISFOS FORT STALKERS - -**This book is protected under the copyright laws of the United States of America. Any reproduction or unauthorized use of the material or artwork contained herein is prohibited without the express written permission of Wizards of the Coast, Inc.** - -Published by Wizards of the Coast, Inc. FORGOTTEN REALMS, WIZARDS OF THE COAST, and their respective logos are trademarks of Wizards of the Coast, Inc., in the U.S.A. and other countries. - -All Wizards of the Coast characters and their distinctive likenesses are property of Wizards of the Coast, Inc. - -Cover art by Todd Lockwood - -ISBN: 978-0-7869-1884-1 - -For customer service, contact: - -U.S., Canada, Asia Pacific, & Latin America: Wizards of the Coast, Inc., 77-82 Fulcrum Court, Avenel, NJ 07002-7762 - -Europe: Wizards of the Coast, Inc., Unit D, 600 Whitehall Avenue, Newcastle-upon-Tyne, NE1 7UZ - -Telephone: 800-405-5400 - -Email: customer.service@wizards.com - -Put these funny photos in your favorite social media feeds and see your own funny picture posted 4fefd39f24
          -
          -
          -

          diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Flippingbook Publisher Crack Serial Key BETTER.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Flippingbook Publisher Crack Serial Key BETTER.md deleted file mode 100644 index cc722c90ee7e04b8a1ee16a0ebada1aba6d312ae..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Flippingbook Publisher Crack Serial Key BETTER.md +++ /dev/null @@ -1,9 +0,0 @@ -
          -

          Publisher Console Free Download. FlippingBook Publisher for Mac and Windows. Features a user-friendly interface, and an intuitive set of features that make it easy for beginners to produce high-quality flipbooks on the Web. Publisher Console is a tool developed.

          -

          FlippingBook Console. FlippingBook Publisher. FlippingBook Console. FlippingBook Serial Code. FlippingBook Console 1.0 Serial Code FlippingBook Serial Code. FlippingBook Console 1.0.2 Serial Code FlippingBook Serial Code. FlippingBook Console 1.1. Serial Code FlippingBook Serial Code. FlippingBook Console 1.2. Serial Code.

          -

          flippingbook publisher crack serial key


          Downloadhttps://urlgoal.com/2uCLDo



          -

          FlippingBook Console. FlippingBook Publisher. FlippingBook Console.FlippingBook Console 1.1.2. Serial Code FlippingBook Serial Code. FlippingBook Console 1.1.1. Serial Code. FlippingBook Console 1.2. Serial Code. FlippingBook Console 1.0.2 Serial Code. FlippingBook Console 1.0 Serial Code FlippingBook Serial Code. FlippingBook Console 1.0.1 Serial Code. FlippingBook Console 1.0 Serial Code FlippingBook Serial Code.

          -

          The serial numbers, registration codes, crack or keygen (pirate key) that you see in torrent sites, other download portals or download related sites are not from FlippingBook Publisher 2.4.37 publisher, these are given by other independent third parties.

          -

          These serial numbers or registration codes are valid only for FlippingBook Publisher 2.4.37 version, and are for the authorized user only. Please use these serial numbers or registration codes at your own risk, because any damage or loss caused will be solely yours. If you like this FlippingBook Publisher 2.4.37 crack, then you can share it with your friends!

          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/rinong/StyleGAN-NADA/e4e/scripts/inference.py b/spaces/rinong/StyleGAN-NADA/e4e/scripts/inference.py deleted file mode 100644 index 185b9b34db85dcd97b9793bd5dbfc9d1ca046549..0000000000000000000000000000000000000000 --- a/spaces/rinong/StyleGAN-NADA/e4e/scripts/inference.py +++ /dev/null @@ -1,133 +0,0 @@ -import argparse - -import torch -import numpy as np -import sys -import os -import dlib - -sys.path.append(".") -sys.path.append("..") - -from configs import data_configs, paths_config -from datasets.inference_dataset import InferenceDataset -from torch.utils.data import DataLoader -from utils.model_utils import setup_model -from utils.common import tensor2im -from utils.alignment import align_face -from PIL import Image - - -def main(args): - net, opts = setup_model(args.ckpt, device) - is_cars = 'cars_' in opts.dataset_type - generator = net.decoder - generator.eval() - args, data_loader = setup_data_loader(args, opts) - - # Check if latents exist - latents_file_path = os.path.join(args.save_dir, 'latents.pt') - if os.path.exists(latents_file_path): - latent_codes = torch.load(latents_file_path).to(device) - else: - latent_codes = get_all_latents(net, data_loader, args.n_sample, is_cars=is_cars) - torch.save(latent_codes, latents_file_path) - - if not args.latents_only: - generate_inversions(args, generator, latent_codes, is_cars=is_cars) - - -def setup_data_loader(args, opts): - dataset_args = data_configs.DATASETS[opts.dataset_type] - transforms_dict = dataset_args['transforms'](opts).get_transforms() - images_path = args.images_dir if args.images_dir is not None else dataset_args['test_source_root'] - print(f"images path: {images_path}") - align_function = None - if args.align: - align_function = run_alignment - test_dataset = InferenceDataset(root=images_path, - transform=transforms_dict['transform_test'], - preprocess=align_function, - opts=opts) - - data_loader = DataLoader(test_dataset, - batch_size=args.batch, - shuffle=False, - num_workers=2, - drop_last=True) - - print(f'dataset length: {len(test_dataset)}') - - if args.n_sample is None: - args.n_sample = len(test_dataset) - return args, data_loader - - -def get_latents(net, x, is_cars=False): - codes = net.encoder(x) - if net.opts.start_from_latent_avg: - if codes.ndim == 2: - codes = codes + net.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :] - else: - codes = codes + net.latent_avg.repeat(codes.shape[0], 1, 1) - if codes.shape[1] == 18 and is_cars: - codes = codes[:, :16, :] - return codes - - -def get_all_latents(net, data_loader, n_images=None, is_cars=False): - all_latents = [] - i = 0 - with torch.no_grad(): - for batch in data_loader: - if n_images is not None and i > n_images: - break - x = batch - inputs = x.to(device).float() - latents = get_latents(net, inputs, is_cars) - all_latents.append(latents) - i += len(latents) - return torch.cat(all_latents) - - -def save_image(img, save_dir, idx): - result = tensor2im(img) - im_save_path = os.path.join(save_dir, f"{idx:05d}.jpg") - Image.fromarray(np.array(result)).save(im_save_path) - - -@torch.no_grad() -def generate_inversions(args, g, latent_codes, is_cars): - print('Saving inversion images') - inversions_directory_path = os.path.join(args.save_dir, 'inversions') - os.makedirs(inversions_directory_path, exist_ok=True) - for i in range(args.n_sample): - imgs, _ = g([latent_codes[i].unsqueeze(0)], input_is_latent=True, randomize_noise=False, return_latents=True) - if is_cars: - imgs = imgs[:, :, 64:448, :] - save_image(imgs[0], inversions_directory_path, i + 1) - - -def run_alignment(image_path): - predictor = dlib.shape_predictor(paths_config.model_paths['shape_predictor']) - aligned_image = align_face(filepath=image_path, predictor=predictor) - print("Aligned image has shape: {}".format(aligned_image.size)) - return aligned_image - - -if __name__ == "__main__": - device = "cuda" - - parser = argparse.ArgumentParser(description="Inference") - parser.add_argument("--images_dir", type=str, default=None, - help="The directory of the images to be inverted") - parser.add_argument("--save_dir", type=str, default=None, - help="The directory to save the latent codes and inversion images. (default: images_dir") - parser.add_argument("--batch", type=int, default=1, help="batch size for the generator") - parser.add_argument("--n_sample", type=int, default=None, help="number of the samples to infer.") - parser.add_argument("--latents_only", action="store_true", help="infer only the latent codes of the directory") - parser.add_argument("--align", action="store_true", help="align face images before inference") - parser.add_argument("ckpt", metavar="CHECKPOINT", help="path to generator checkpoint") - - args = parser.parse_args() - main(args) diff --git a/spaces/rizam/literature-research-tool/lrt/clustering/models/__init__.py b/spaces/rizam/literature-research-tool/lrt/clustering/models/__init__.py deleted file mode 100644 index 1f8891b59d47baa35bcc3b1c628ea5ecfe797d5d..0000000000000000000000000000000000000000 --- a/spaces/rizam/literature-research-tool/lrt/clustering/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .keyBartPlus import KeyBartAdapter \ No newline at end of file diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/instance_segment_anything/models/focalnet_dino/models/dino/util/__init__.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/instance_segment_anything/models/focalnet_dino/models/dino/util/__init__.py deleted file mode 100644 index 168f9979a4623806934b0ff1102ac166704e7dec..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/instance_segment_anything/models/focalnet_dino/models/dino/util/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/spaces/rorallitri/biomedical-language-models/logs/Download Film Ajab Prem Ki Ghazab Kahani Full Movies A Romantic Comedy with Ranbir Kapoor and Katrina Kaif.md b/spaces/rorallitri/biomedical-language-models/logs/Download Film Ajab Prem Ki Ghazab Kahani Full Movies A Romantic Comedy with Ranbir Kapoor and Katrina Kaif.md deleted file mode 100644 index dc8752ef77182ac3de21a8d9bd394b518a0fdec7..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Download Film Ajab Prem Ki Ghazab Kahani Full Movies A Romantic Comedy with Ranbir Kapoor and Katrina Kaif.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Download Film Ajab Prem Ki Ghazab Kahani Full Movies


          Download File >>> https://tinurll.com/2uzm6M



          -
          - aaccfb2cb3
          -
          -
          -

          diff --git a/spaces/rorallitri/biomedical-language-models/logs/Lingobit Localizer 7 1 Extra Quality Crack 12.md b/spaces/rorallitri/biomedical-language-models/logs/Lingobit Localizer 7 1 Extra Quality Crack 12.md deleted file mode 100644 index d256e6bc40daa5c4f03d9d3d81ebe538c5d52e6b..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Lingobit Localizer 7 1 Extra Quality Crack 12.md +++ /dev/null @@ -1,6 +0,0 @@ -

          lingobit localizer 7 1 crack 12


          DOWNLOADhttps://tinurll.com/2uzmVx



          -
          -[Res.1] ▽ name [HOME], 11/09/02(金) 00:12. lol . ... Lingobit Localizer Enterprise 5.5 meilleur prix . ... Microsoft Windows 7 ULTIMATE x32 German crack serial number . cd key MetaProducts Offline Explorer Enterprise 5.3 Multilingual . 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/sayakpaul/sots-indoor-dehazing-maxim/maxim/blocks/unet.py b/spaces/sayakpaul/sots-indoor-dehazing-maxim/maxim/blocks/unet.py deleted file mode 100644 index 6000e05ae4472df5191a7af890b4d9274271081f..0000000000000000000000000000000000000000 --- a/spaces/sayakpaul/sots-indoor-dehazing-maxim/maxim/blocks/unet.py +++ /dev/null @@ -1,133 +0,0 @@ -import functools - -import tensorflow as tf -from tensorflow.keras import layers - -from .attentions import RCAB -from .misc_gating import CrossGatingBlock, ResidualSplitHeadMultiAxisGmlpLayer - -Conv1x1 = functools.partial(layers.Conv2D, kernel_size=(1, 1), padding="same") -Conv3x3 = functools.partial(layers.Conv2D, kernel_size=(3, 3), padding="same") -ConvT_up = functools.partial( - layers.Conv2DTranspose, kernel_size=(2, 2), strides=(2, 2), padding="same" -) -Conv_down = functools.partial( - layers.Conv2D, kernel_size=(4, 4), strides=(2, 2), padding="same" -) - - -def UNetEncoderBlock( - num_channels: int, - block_size, - grid_size, - num_groups: int = 1, - lrelu_slope: float = 0.2, - block_gmlp_factor: int = 2, - grid_gmlp_factor: int = 2, - input_proj_factor: int = 2, - channels_reduction: int = 4, - dropout_rate: float = 0.0, - downsample: bool = True, - use_global_mlp: bool = True, - use_bias: bool = True, - use_cross_gating: bool = False, - name: str = "unet_encoder", -): - """Encoder block in MAXIM.""" - - def apply(x, skip=None, enc=None, dec=None): - if skip is not None: - x = tf.concat([x, skip], axis=-1) - - # convolution-in - x = Conv1x1(filters=num_channels, use_bias=use_bias, name=f"{name}_Conv_0")(x) - shortcut_long = x - - for i in range(num_groups): - if use_global_mlp: - x = ResidualSplitHeadMultiAxisGmlpLayer( - grid_size=grid_size, - block_size=block_size, - grid_gmlp_factor=grid_gmlp_factor, - block_gmlp_factor=block_gmlp_factor, - input_proj_factor=input_proj_factor, - use_bias=use_bias, - dropout_rate=dropout_rate, - name=f"{name}_SplitHeadMultiAxisGmlpLayer_{i}", - )(x) - x = RCAB( - num_channels=num_channels, - reduction=channels_reduction, - lrelu_slope=lrelu_slope, - use_bias=use_bias, - name=f"{name}_channel_attention_block_1{i}", - )(x) - - x = x + shortcut_long - - if enc is not None and dec is not None: - assert use_cross_gating - x, _ = CrossGatingBlock( - features=num_channels, - block_size=block_size, - grid_size=grid_size, - dropout_rate=dropout_rate, - input_proj_factor=input_proj_factor, - upsample_y=False, - use_bias=use_bias, - name=f"{name}_cross_gating_block", - )(x, enc + dec) - - if downsample: - x_down = Conv_down( - filters=num_channels, use_bias=use_bias, name=f"{name}_Conv_1" - )(x) - return x_down, x - else: - return x - - return apply - - -def UNetDecoderBlock( - num_channels: int, - block_size, - grid_size, - num_groups: int = 1, - lrelu_slope: float = 0.2, - block_gmlp_factor: int = 2, - grid_gmlp_factor: int = 2, - input_proj_factor: int = 2, - channels_reduction: int = 4, - dropout_rate: float = 0.0, - downsample: bool = True, - use_global_mlp: bool = True, - use_bias: bool = True, - name: str = "unet_decoder", -): - - """Decoder block in MAXIM.""" - - def apply(x, bridge=None): - x = ConvT_up( - filters=num_channels, use_bias=use_bias, name=f"{name}_ConvTranspose_0" - )(x) - x = UNetEncoderBlock( - num_channels=num_channels, - num_groups=num_groups, - lrelu_slope=lrelu_slope, - block_size=block_size, - grid_size=grid_size, - block_gmlp_factor=block_gmlp_factor, - grid_gmlp_factor=grid_gmlp_factor, - channels_reduction=channels_reduction, - use_global_mlp=use_global_mlp, - dropout_rate=dropout_rate, - downsample=False, - use_bias=use_bias, - name=f"{name}_UNetEncoderBlock_0", - )(x, skip=bridge) - - return x - - return apply diff --git a/spaces/scedlatioru/img-to-music/Lektyra-Shkollore-8-Lutjet-E-Mbremjes-Kuptimirar-TOP.md b/spaces/scedlatioru/img-to-music/Lektyra-Shkollore-8-Lutjet-E-Mbremjes-Kuptimirar-TOP.md deleted file mode 100644 index 40bd32e00c5dd8c950015c36159924b8efb11b50..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/Lektyra-Shkollore-8-Lutjet-E-Mbremjes-Kuptimirar-TOP.md +++ /dev/null @@ -1,64 +0,0 @@ -## Lektyra Shkollore 8 Lutjet E Mbremjes Kuptimirar - - - - - - - - - -**Download File --->>> [https://dropnobece.blogspot.com/?download=2tyq3G](https://dropnobece.blogspot.com/?download=2tyq3G)** - - - - - - - - - - - - Hello, this is Bing. I can help you write a title and an article with HTML formatting for the keyword "Lektyra Shkollore 8 Lutjet E Mbremjes Kuptimirar". Here is a possible example: - -# Lektyra Shkollore 8: Lutjet e Mbremjes by Anton Pashku - - - -Lutjet e Mbremjes (The Evening Prayers) is a novel by Anton Pashku, one of the most prominent Albanian writers of the 20th century. It was published in 1973 and is considered a masterpiece of modern Albanian literature. The novel explores the themes of identity, alienation, freedom, and resistance in a totalitarian society. - - - -The novel is set in a fictional city called Qyteti i Ri (The New City), which is modeled after Tirana, the capital of Albania under the communist regime. The protagonist is a young man named Gjergj, who works as a librarian and lives with his mother and sister. He is dissatisfied with his life and feels oppressed by the system that controls every aspect of his existence. He dreams of escaping to the West, where he imagines he can find happiness and fulfillment. - - - -One night, he meets a mysterious woman named Nora, who claims to be a foreigner and invites him to her apartment. There, he discovers that she is actually a member of an underground resistance movement that fights against the regime. She introduces him to other rebels, who share their stories and ideals with him. Gjergj is fascinated by their courage and passion, and decides to join them in their struggle. - - - -However, he soon realizes that the resistance is not as simple and noble as he thought. He faces betrayal, violence, torture, and death. He also learns that Nora has a dark secret that could endanger their lives. He begins to question his own motives and beliefs, and wonders if he has made the right choice. - - - -Lutjet e Mbremjes is a powerful and poetic novel that depicts the human condition in a time of oppression and despair. It challenges the reader to reflect on the meaning of freedom, love, and sacrifice. It also celebrates the spirit of resistance and hope that can never be extinguished. - -I can try to continue the article. Here is a possible example: - -The third chapter of the book, "Krokatja e korbit" (The Crow's Caw), contains stories that deal with the theme of violence and its effects on individuals and society. In these stories, Pashku exposes the brutality and injustice of the communist regime, as well as the moral degradation and psychological trauma of its victims. Some of the stories are based on real events, such as the execution of a group of intellectuals in 1956 ("Një ditë e zakonshme"), or the torture and imprisonment of a dissident writer ("Një breshkë nën ujë"). Other stories are more symbolic and allegorical, such as "Krokatja e korbit", which depicts a surreal scene of a crow cawing in front of a firing squad, or "Një vajzë me sy të mrekullueshëm", which portrays a young girl who is raped and killed by a soldier. - - - -The last chapter of the book, "Njeriu lakuriq" (The Naked Man), consists of stories that explore the theme of loneliness and isolation in a dehumanized society. In these stories, Pashku shows the existential angst and despair of his characters, who are unable to find meaning and connection in their lives. They are either alienated from themselves, such as the protagonist of "Njeriu lakuriq", who strips naked in front of a crowd and feels nothing, or from others, such as the protagonist of "Dy fjalë për një plak" (Two Words for an Old Man), who has no one to talk to except his dead wife's portrait. They are also haunted by their past memories, dreams, and fantasies, which contrast with their bleak reality. - - - -Lutjet e Mbremjes is a collection of stories that reflects the author's personal vision and experience of life under a totalitarian regime. It is also a testimony of his artistic talent and courage, as he defied censorship and repression with his original and innovative style. The stories are rich in imagery, symbolism, irony, and humor, as well as in emotion and insight. They reveal the human face and voice behind the official propaganda and ideology. They are also universal in their appeal, as they touch upon themes that are relevant to any human being who seeks freedom, dignity, and love. - - dfd1c89656 - - - - - diff --git a/spaces/scedlatioru/img-to-music/example/Nuendo 6 Full Release PORTABLE Crack By Team AiR Torret.md b/spaces/scedlatioru/img-to-music/example/Nuendo 6 Full Release PORTABLE Crack By Team AiR Torret.md deleted file mode 100644 index 04d3f55642625aacd6e3c0e1c45bec657723f829..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Nuendo 6 Full Release PORTABLE Crack By Team AiR Torret.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Nuendo 6 Full Release Crack By Team AiR Torret


          Download >>> https://gohhs.com/2uEAHZ



          - -__TOP__ Nuendo 6 Full Release Crack By Team AiR Torret - SANG PEMBURU. Podcast Unavailable. Nuendo 4 recording software trial version downloads ... 1fdad05405
          -
          -
          -

          diff --git a/spaces/seduerr/text_analytics/text_analytics/pipes/polites_tagger.py b/spaces/seduerr/text_analytics/text_analytics/pipes/polites_tagger.py deleted file mode 100644 index b309a931648278a34fc622ad3c07c034804cc933..0000000000000000000000000000000000000000 --- a/spaces/seduerr/text_analytics/text_analytics/pipes/polites_tagger.py +++ /dev/null @@ -1,58 +0,0 @@ -from spacy.matcher import PhraseMatcher -from spacy.tokens import Doc -from spacy.tokens import Span -from spacy.util import filter_spans -from spacy.language import Language - -from text_analytics.constants import ACCEPTED_LANGUAGES - -polites_getter = lambda doc: [doc[span['start']:span['end']] - for span in doc._.polites_span_indices] - -Doc.set_extension('polites_span_indices', force=False, default=[]) -Doc.set_extension('polites', force=False, getter=polites_getter) - -@Language.factory('polites tagger') -class PolitesTagger: - def __init__(self, name, nlp, language: str='en') -> None: - ''' - This constructor will initialize the object that tags polites connectives. - - Parameters: - nlp: The Spacy model to use this tagger with. - language: The language that this pipeline will be used in. - - Returns: - None. - ''' - if not language in ACCEPTED_LANGUAGES: - raise ValueError(f'Language {language} is not supported yet') - - self._language = language - self._matcher = PhraseMatcher(nlp.vocab, attr='LOWER') - self._connectives = [] - if language == 'en': # question words and questionmark - self._connectives = ['help', 'please', 'thanks', 'thank you', 'excuse me', 'respectful', 'kind', 'pardon', 'dear sir or madam', 'dearest', 'dear'] - else: # Support for future languages - pass - - for con in self._connectives: - self._matcher.add(con, None, nlp(con)) - - - def __call__(self, doc: Doc) -> Doc: - ''' - This method will find all polites connectives and store them in an iterable. - - Parameters: - doc(Doc): A Spacy document. - ''' - matches = self._matcher(doc) - polites_spans = [doc[start:end] for _, start, end in matches] - - doc._.polites_span_indices = [{'start': span.start, - 'end': span.end, - 'label': span.label} - for span in filter_spans(polites_spans)] # Save the polites connectives found - - return doc \ No newline at end of file diff --git a/spaces/shencc/gpt/crazy_functions/test_project/cpp/cppipc/buffer.cpp b/spaces/shencc/gpt/crazy_functions/test_project/cpp/cppipc/buffer.cpp deleted file mode 100644 index 0ac0fa7bc3ced0447ba4caa359355dd4252670b3..0000000000000000000000000000000000000000 --- a/spaces/shencc/gpt/crazy_functions/test_project/cpp/cppipc/buffer.cpp +++ /dev/null @@ -1,87 +0,0 @@ -#include "libipc/buffer.h" -#include "libipc/utility/pimpl.h" - -#include - -namespace ipc { - -bool operator==(buffer const & b1, buffer const & b2) { - return (b1.size() == b2.size()) && (std::memcmp(b1.data(), b2.data(), b1.size()) == 0); -} - -bool operator!=(buffer const & b1, buffer const & b2) { - return !(b1 == b2); -} - -class buffer::buffer_ : public pimpl { -public: - void* p_; - std::size_t s_; - void* a_; - buffer::destructor_t d_; - - buffer_(void* p, std::size_t s, buffer::destructor_t d, void* a) - : p_(p), s_(s), a_(a), d_(d) { - } - - ~buffer_() { - if (d_ == nullptr) return; - d_((a_ == nullptr) ? p_ : a_, s_); - } -}; - -buffer::buffer() - : buffer(nullptr, 0, nullptr, nullptr) { -} - -buffer::buffer(void* p, std::size_t s, destructor_t d) - : p_(p_->make(p, s, d, nullptr)) { -} - -buffer::buffer(void* p, std::size_t s, destructor_t d, void* additional) - : p_(p_->make(p, s, d, additional)) { -} - -buffer::buffer(void* p, std::size_t s) - : buffer(p, s, nullptr) { -} - -buffer::buffer(char const & c) - : buffer(const_cast(&c), 1) { -} - -buffer::buffer(buffer&& rhs) - : buffer() { - swap(rhs); -} - -buffer::~buffer() { - p_->clear(); -} - -void buffer::swap(buffer& rhs) { - std::swap(p_, rhs.p_); -} - -buffer& buffer::operator=(buffer rhs) { - swap(rhs); - return *this; -} - -bool buffer::empty() const noexcept { - return (impl(p_)->p_ == nullptr) || (impl(p_)->s_ == 0); -} - -void* buffer::data() noexcept { - return impl(p_)->p_; -} - -void const * buffer::data() const noexcept { - return impl(p_)->p_; -} - -std::size_t buffer::size() const noexcept { - return impl(p_)->s_; -} - -} // namespace ipc diff --git a/spaces/simonduerr/ProteinMPNN/af_backprop/alphafold/data/tools/hhblits.py b/spaces/simonduerr/ProteinMPNN/af_backprop/alphafold/data/tools/hhblits.py deleted file mode 100644 index e0aa098a6f6a2e702340aafbde7a5a045b674543..0000000000000000000000000000000000000000 --- a/spaces/simonduerr/ProteinMPNN/af_backprop/alphafold/data/tools/hhblits.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2021 DeepMind Technologies Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Library to run HHblits from Python.""" - -import glob -import os -import subprocess -from typing import Any, Mapping, Optional, Sequence - -from absl import logging -from alphafold.data.tools import utils -# Internal import (7716). - - -_HHBLITS_DEFAULT_P = 20 -_HHBLITS_DEFAULT_Z = 500 - - -class HHBlits: - """Python wrapper of the HHblits binary.""" - - def __init__(self, - *, - binary_path: str, - databases: Sequence[str], - n_cpu: int = 4, - n_iter: int = 3, - e_value: float = 0.001, - maxseq: int = 1_000_000, - realign_max: int = 100_000, - maxfilt: int = 100_000, - min_prefilter_hits: int = 1000, - all_seqs: bool = False, - alt: Optional[int] = None, - p: int = _HHBLITS_DEFAULT_P, - z: int = _HHBLITS_DEFAULT_Z): - """Initializes the Python HHblits wrapper. - - Args: - binary_path: The path to the HHblits executable. - databases: A sequence of HHblits database paths. This should be the - common prefix for the database files (i.e. up to but not including - _hhm.ffindex etc.) - n_cpu: The number of CPUs to give HHblits. - n_iter: The number of HHblits iterations. - e_value: The E-value, see HHblits docs for more details. - maxseq: The maximum number of rows in an input alignment. Note that this - parameter is only supported in HHBlits version 3.1 and higher. - realign_max: Max number of HMM-HMM hits to realign. HHblits default: 500. - maxfilt: Max number of hits allowed to pass the 2nd prefilter. - HHblits default: 20000. - min_prefilter_hits: Min number of hits to pass prefilter. - HHblits default: 100. - all_seqs: Return all sequences in the MSA / Do not filter the result MSA. - HHblits default: False. - alt: Show up to this many alternative alignments. - p: Minimum Prob for a hit to be included in the output hhr file. - HHblits default: 20. - z: Hard cap on number of hits reported in the hhr file. - HHblits default: 500. NB: The relevant HHblits flag is -Z not -z. - - Raises: - RuntimeError: If HHblits binary not found within the path. - """ - self.binary_path = binary_path - self.databases = databases - - for database_path in self.databases: - if not glob.glob(database_path + '_*'): - logging.error('Could not find HHBlits database %s', database_path) - raise ValueError(f'Could not find HHBlits database {database_path}') - - self.n_cpu = n_cpu - self.n_iter = n_iter - self.e_value = e_value - self.maxseq = maxseq - self.realign_max = realign_max - self.maxfilt = maxfilt - self.min_prefilter_hits = min_prefilter_hits - self.all_seqs = all_seqs - self.alt = alt - self.p = p - self.z = z - - def query(self, input_fasta_path: str) -> Mapping[str, Any]: - """Queries the database using HHblits.""" - with utils.tmpdir_manager(base_dir='/tmp') as query_tmp_dir: - a3m_path = os.path.join(query_tmp_dir, 'output.a3m') - - db_cmd = [] - for db_path in self.databases: - db_cmd.append('-d') - db_cmd.append(db_path) - cmd = [ - self.binary_path, - '-i', input_fasta_path, - '-cpu', str(self.n_cpu), - '-oa3m', a3m_path, - '-o', '/dev/null', - '-n', str(self.n_iter), - '-e', str(self.e_value), - '-maxseq', str(self.maxseq), - '-realign_max', str(self.realign_max), - '-maxfilt', str(self.maxfilt), - '-min_prefilter_hits', str(self.min_prefilter_hits)] - if self.all_seqs: - cmd += ['-all'] - if self.alt: - cmd += ['-alt', str(self.alt)] - if self.p != _HHBLITS_DEFAULT_P: - cmd += ['-p', str(self.p)] - if self.z != _HHBLITS_DEFAULT_Z: - cmd += ['-Z', str(self.z)] - cmd += db_cmd - - logging.info('Launching subprocess "%s"', ' '.join(cmd)) - process = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - - with utils.timing('HHblits query'): - stdout, stderr = process.communicate() - retcode = process.wait() - - if retcode: - # Logs have a 15k character limit, so log HHblits error line by line. - logging.error('HHblits failed. HHblits stderr begin:') - for error_line in stderr.decode('utf-8').splitlines(): - if error_line.strip(): - logging.error(error_line.strip()) - logging.error('HHblits stderr end') - raise RuntimeError('HHblits failed\nstdout:\n%s\n\nstderr:\n%s\n' % ( - stdout.decode('utf-8'), stderr[:500_000].decode('utf-8'))) - - with open(a3m_path) as f: - a3m = f.read() - - raw_output = dict( - a3m=a3m, - output=stdout, - stderr=stderr, - n_iter=self.n_iter, - e_value=self.e_value) - return raw_output diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/1 vs 100 Mod APK The Ultimate Quiz Game for Android.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/1 vs 100 Mod APK The Ultimate Quiz Game for Android.md deleted file mode 100644 index bf7559c39ada48ba1065a5ec325df70462b365db..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/1 vs 100 Mod APK The Ultimate Quiz Game for Android.md +++ /dev/null @@ -1,109 +0,0 @@ - -

          1 vs 100 Mod APK: A Fun and Challenging Quiz Game

          -

          Do you love trivia games? Do you enjoy testing your knowledge and competing with others? If you answered yes, then you might want to try out 1 vs 100 mod apk, a mobile game based on the popular TV show of the same name. In this article, we will tell you everything you need to know about this game, including what it is, how to download and install it, and how to play it. Let's get started!

          -

          1 vs 100 mod apk


          Download Ziphttps://ssurll.com/2uNYKj



          -

          What is 1 vs 100?

          -

          Before we dive into the details of the mod apk, let's first understand what 1 vs 100 is all about.

          -

          The original TV show

          -

          1 vs 100 is a TV game show that originated in the Netherlands in 2006 and has since been adapted in many countries around the world. The premise of the show is simple: one contestant (the "one") faces off against a group of 100 people (the "mob") in a series of multiple-choice questions. The one can win a large cash prize by eliminating all the mob members, or lose everything by answering incorrectly. The mob members can also win a share of the prize money by surviving until the end.

          -

          The mobile game adaptation

          -

          In 2017, a mobile game version of 1 vs 100 was released by Blitzzer Games for Android devices. The game follows the same format as the TV show, but with some differences. For example, the questions are more varied and cover topics such as history, geography, science, entertainment, sports, and more. The game also features different modes, such as solo mode, online mode, and party mode, where you can play with your friends or other players online. The game is free to download and play, but it also offers in-app purchases and ads.

          -

          What is a mod apk?

          -

          A mod apk is a modified version of an original app or game that has been altered by someone to add or remove certain features. For example, a mod apk may unlock premium content, remove ads, increase coins or gems, or enable cheats. A mod apk is usually downloaded from third-party websites that are not affiliated with the official developers or publishers of the app or game.

          -

          The benefits of using a mod apk

          -

          Some of the benefits of using a mod apk are:

          -

          1 vs 100 quiz game mod apk
          -1 vs 100 unlimited money mod apk
          -1 vs 100 android game mod apk
          -1 vs 100 trivia challenge mod apk
          -1 vs 100 latest version mod apk
          -1 vs 100 offline mode mod apk
          -1 vs 100 hack mod apk download
          -1 vs 100 premium features mod apk
          -1 vs 100 free shopping mod apk
          -1 vs 100 no ads mod apk
          -1 vs 100 full unlocked mod apk
          -1 vs 100 cheat codes mod apk
          -1 vs 100 pro version mod apk
          -1 vs 100 mega mod apk
          -1 vs 100 cracked mod apk
          -1 vs 100 vip access mod apk
          -1 vs 100 all levels mod apk
          -1 vs 100 high score mod apk
          -1 vs 100 best strategy mod apk
          -1 vs 100 tips and tricks mod apk
          -download 1 vs 100 mod apk for free
          -how to install 1 vs 100 mod apk
          -where to get 1 vs 100 mod apk
          -is there a working 1 vs 100 mod apk
          -what is new in 1 vs 100 mod apk
          -how to play 1 vs 100 mod apk online
          -how to update 1 vs 100 mod apk
          -how to uninstall 1 vs 100 mod apk
          -how to backup and restore data in the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the gamemodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkmodapkapk

          -
            -
          • You can access features that are otherwise unavailable or restricted in the original app or game.
          • -
          • You can enhance your gaming experience by customizing the app or game to your preferences.
          • -
          • You can save money by avoiding in-app purchases or subscriptions.
          • -
          -

          The risks of using a mod apk

          -

          Some of the risks of using a mod apk are:

          -
            -
          • You may violate the terms and conditions of the original app or game and get banned or suspended from using it.
          • -
          • You may expose your device to malware or viruses that can harm your data or privacy.
          • -
          • You may encounter bugs or errors that can affect the performance or functionality of the app or game.
          • -
          -

          How to download and install 1 vs 100 mod apk?

          -

          If you want to try out 1 vs 100 mod apk, you will need to follow these steps:

          -

          Step 1: Find a reliable source

          -

          As mentioned earlier, a mod apk is not available on the official app store, so you will need to find a trustworthy website that offers it. You can search for "1 vs 100 mod apk" on Google or any other search engine and look for the results that have positive reviews and ratings. Alternatively, you can use the link below to download the mod apk from a reputable source. However, you should always be careful and cautious when downloading anything from the internet, as some websites may contain harmful or fraudulent content.

          -

          Step 2: Enable unknown sources

          -

          Before you can install the mod apk, you will need to enable unknown sources on your device. This is a security setting that allows you to install apps or games that are not from the official app store. To do this, go to your device's settings and look for the option that says "Security" or "Privacy". Then, find the toggle that says "Unknown sources" or "Allow installation of apps from unknown sources" and turn it on. You may see a warning message that tells you about the potential risks of enabling this option, but you can ignore it if you trust the source of the mod apk.

          -

          Step 3: Download and install the file

          -

          Once you have enabled unknown sources, you can proceed to download and install the mod apk file. To do this, go to the website where you found the mod apk and tap on the download button. You may see a pop-up window that asks you to confirm the download, so tap on "OK" or "Yes". The file will start downloading and you can see its progress on your notification bar. When the download is complete, tap on the file name and follow the instructions to install it. You may see another pop-up window that asks you to grant permissions to the app, so tap on "Install" or "Allow". The installation will take a few seconds and you will see a message that says "App installed" when it is done.

          -

          How to play 1 vs 100 mod apk?

          -

          Now that you have successfully downloaded and installed 1 vs 100 mod apk, you can start playing it and enjoy its features. Here are some tips on how to play it:

          -

          The gameplay and rules

          -

          The gameplay and rules of 1 vs 100 mod apk are similar to the TV show, but with some variations. You will start as the one and face off against a mob of 100 players online. You will be asked a series of multiple-choice questions and you have to answer them correctly within a time limit. For each question, you can use one of three lifelines: ask the audience, double dip, or trust the mob. The ask the audience lifeline will show you what percentage of the mob chose each answer. The double dip lifeline will allow you to choose two answers instead of one. The trust the mob lifeline will automatically select the most popular answer among the mob.

          -

          If you answer correctly, you will eliminate some of the mob members who answered incorrectly and increase your prize money. If you answer incorrectly, you will lose everything and end the game. You can also choose to walk away at any time and keep your current prize money.

          -

          The features and modes

          -

          One of the advantages of using 1 vs 100 mod apk is that it offers more features and modes than the original game. Some of these are:

          -
            -
          • You can access unlimited coins and gems that you can use to buy more lifelines or customize your avatar.
          • -
          • You can remove all ads that may interrupt your gaming experience.
          • -
          • You can play in different modes, such as solo mode, where you can practice your trivia skills without competing with anyone; online mode, where you can join a live game with other players; and party mode, where you can create a private room and invite your friends or family to play with you.
          • -
          • You can choose from different categories of questions, such as general knowledge, entertainment, sports, history, geography, science, and more.
          • -
          • You can chat with other players and send them emojis or stickers.
          • -
          -

          The tips and tricks

          -

          To increase your chances of winning in 1 vs 100 mod apk, here are some tips and tricks that you can follow:

          -
            -
          • Study up on various topics and facts before playing. You never know what kind of questions you will encounter in the game.
          • -
          • Use your lifelines wisely. Don't waste them on easy questions or when you are confident about your answer. Save them for when you are unsure or stuck.
          • -
          • Eliminate as many mob members as possible in each question. The fewer mob members left, the easier it will be for you to win.
          • -
          • Don't be afraid to walk away if you feel that you are risking too much. Sometimes, it is better to quit while you are ahead than to lose everything.
          • -
          • Have fun and enjoy the game. Don't let the pressure or the competition get to you. Remember, it is just a game and the main goal is to have fun.
          • -
          -

          Conclusion

          -

          1 vs 100 mod apk is a fun and challenging quiz game that will test your knowledge and skills in various topics. It is based on the popular TV show of the same name, but with more features and modes that enhance your gaming experience. You can download and install it easily by following the steps we have provided in this article. However, you should also be aware of the risks and consequences of using a mod apk, as it may violate the terms and conditions of the original game or expose your device to malware or viruses. Therefore, you should always be careful and cautious when using a mod apk and play at your own risk.

          -

          We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!

          -

          FAQs

          -

          Here are some of the frequently asked questions about 1 vs 100 mod apk:

          -

          Q: Is 1 vs 100 mod apk safe to use?

          -

          A: The answer to this question depends on the source of the mod apk. Some websites may offer safe and reliable mod apks, while others may contain harmful or fraudulent content. Therefore, you should always do your research and check the reviews and ratings of the website before downloading anything from it. You should also scan the file with an antivirus or malware detector before installing it on your device.

          -

          Q: Can I play 1 vs 100 mod apk offline?

          -

          A: No, you cannot play 1 vs 100 mod apk offline. The game requires an internet connection to function properly, as it involves playing with other players online. If you try to play it offline, you will not be able to access the game modes or features that require an online connection.

          -

          Q: How can I update 1 vs 100 mod apk?

          -

          A: To update 1 vs 100 mod apk, you will need to download and install the latest version of the mod apk from the same website where you got it from. You should also delete the previous version of the mod apk from your device before installing the new one. However, you should note that updating the mod apk may cause some issues or errors with the game, as it may not be compatible with the original game's updates or changes.

          -

          Q: How can I contact the developer or publisher of 1 vs 100?

          -

          A: If you want to contact the developer or publisher of 1 vs 100, you can visit their official website or social media pages. You can also send them an email or a message through their contact form or support center. However, you should not mention anything about using a mod apk, as they may not approve or support it.

          -

          Q: How can I uninstall 1 vs 100 mod apk?

          -

          A: To uninstall 1 vs 100 mod apk, you can follow these steps:

          -
            -
          1. Go to your device's settings and look for the option that says "Apps" or "Applications".
          2. -
          3. Find and tap on "1 vs 100" from the list of apps installed on your device.
          4. -
          5. Tap on "Uninstall" or "Delete" and confirm your action.
          6. -
          7. The app will be removed from your device and you will see a message that says "App uninstalled".
          8. -

          401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Nigerian Movies that Tell Real-Life Stories of Courage and Sacrifice.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Nigerian Movies that Tell Real-Life Stories of Courage and Sacrifice.md deleted file mode 100644 index be6c47eab9959bd479c2141fd3b3258f060d537d..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Nigerian Movies that Tell Real-Life Stories of Courage and Sacrifice.md +++ /dev/null @@ -1,148 +0,0 @@ - -

          True Story Nigerian Movie Download: How to Watch the Best Nollywood Movies Online

          -

          Nollywood is the name given to the Nigerian film industry, which is one of the largest and most prolific in the world. Nollywood movies are known for their captivating stories, colorful characters, and cultural diversity. Some of the most popular genres of Nollywood movies are comedy, drama, romance, action, and thriller.

          -

          One of the most interesting aspects of Nollywood movies is that many of them are based on true stories. These movies depict the real-life experiences of ordinary Nigerians, as well as historical events, social issues, and cultural phenomena. They offer a glimpse into the rich and complex reality of Nigeria, as well as its challenges and opportunities.

          -

          true story nigerian movie download


          DOWNLOAD »»» https://ssurll.com/2uO0Lm



          -

          In this article, we will introduce you to one of the best Nollywood movies based on a true story: True Story Nigerian Movie. We will also show you how to download it for free, and recommend some other Nollywood movies based on true stories that you can watch online.

          -

          Introduction

          -

          What is True Story Nigerian Movie?

          -

          True Story Nigerian Movie is a 2011 comedy-drama film directed by Tchidi Chikere and starring Nkem Owoh, Okey Okonkwo, and Mercy Johnson. The movie tells the story of Papi (Nkem Owoh), a married man who falls in love with a strange housemaid (Mercy Johnson) after being driven out by his nagging pregnant wife. However, he soon discovers that his new lover is not what she seems, and that he has unwittingly invited trouble into his home.

          -

          Why You Should Watch True Story Nigerian Movie

          -

          True Story Nigerian Movie is a hilarious and entertaining movie that will keep you glued to your screen. The movie showcases the comedic talents of Nkem Owoh, who is one of the most famous and beloved actors in Nollywood. He delivers a brilliant performance as Papi, a hapless husband who gets himself into a series of funny and awkward situations. The movie also features Mercy Johnson, who is one of the most versatile and talented actresses in Nollywood. She plays the role of the mysterious housemaid who turns out to be more than meets the eye.

          -

          Besides being a comedy, True Story Nigerian Movie also has a serious message about marriage, fidelity, and trust. The movie explores the consequences of infidelity and deception, and how they can affect not only the individuals involved, but also their families and friends. The movie also shows how love can overcome challenges and difficulties, and how forgiveness and understanding can heal wounds.

          -

          How to Download True Story Nigerian Movie for Free

          -

          If you want to watch True Story Nigerian Movie online, you can find it on YouTube. However, if you want to download it for free, you will need to use a third-party website that offers free Nollywood movie downloads. One of such websites is [NaijaPals](^1^), which is a social networking site that also hosts thousands of Nollywood movies. To download True Story Nigerian Movie from NaijaPals, follow these steps:

          -
            -
          1. Go to [NaijaPals](^1^) and create an account or log in if you already have one.
          2. -
          3. Search for "True Story" in the search bar at the top of the page.
          4. -
          5. Select "True Story - Part 1" from the results and click on it.
          6. On the movie page, scroll down to the bottom and click on the "Download Movie" button.
          7. -
          8. Choose a download option from the pop-up window and click on it.
          9. -
          10. Wait for the download to complete and enjoy the movie.
          11. -
          -

          Repeat the same steps for "True Story - Part 2" to download the full movie.

          -

          The Best Nollywood Movies Based on True Stories

          -

          True Story Nigerian Movie is not the only Nollywood movie that is based on a true story. There are many other Nollywood movies that are inspired by real-life events and people. Here are some of the best Nollywood movies based on true stories that you can watch online:

          -

          true story nigerian movie free download
          -true story nigerian movie online streaming
          -true story nigerian movie cast and crew
          -true story nigerian movie review and rating
          -true story nigerian movie trailer and synopsis
          -true story nigerian movie based on real events
          -true story nigerian movie download mp4
          -true story nigerian movie download hd
          -true story nigerian movie download utorrent
          -true story nigerian movie download fzmovies
          -true story nigerian movie download netnaija
          -true story nigerian movie download 9jarocks
          -true story nigerian movie download ibakatv
          -true story nigerian movie download irokotv
          -true story nigerian movie download youtube
          -true story nigerian movie subtitles download
          -true story nigerian movie soundtrack download
          -true story nigerian movie theme song download
          -true story nigerian movie full length download
          -true story nigerian movie part 2 download
          -true story nigerian movie 2023 download
          -true story nigerian movie latest download
          -true story nigerian movie best download site
          -true story nigerian movie direct download link
          -true story nigerian movie torrent magnet download
          -true story nigerian movie watch online free
          -true story nigerian movie watch online hd
          -true story nigerian movie watch online 123movies
          -true story nigerian movie watch online netflix
          -true story nigerian movie watch online amazon prime
          -true story nigerian movie watch online hulu
          -true story nigerian movie watch online disney plus
          -true story nigerian movie watch online showmax
          -true story nigerian movie watch online dstv now
          -true story nigerian movie watch online youtube premium
          -true story nigerian movie release date and time
          -true story nigerian movie box office and budget
          -true story nigerian movie awards and nominations
          -true story nigerian movie behind the scenes and bloopers
          -true story nigerian movie interviews and press conference
          -true story nigerian movie genre and category
          -true story nigerian movie plot and summary
          -true story nigerian movie quotes and dialogues
          -true story nigerian movie scenes and clips
          -true story nigerian movie posters and images
          -true story nigerian movie facts and trivia
          -true story nigerian movie feedback and comments
          -true story nigerian movie recommendations and suggestions

          -

          (My Reflections) This Is Based On A True Life Story - African Movies

          -

          Plot Summary

          -

          This is a 2019 drama film directed by Ugezu J. Ugezu and starring Chinenye Uba, Jerry Williams, and Uju Okoli. The movie tells the story of a young woman named Ada (Chinenye Uba), who is married to a wealthy man named Frank (Jerry Williams). However, her marriage is not a happy one, as Frank is abusive, unfaithful, and controlling. Ada suffers in silence, until she meets a kind and handsome man named Tony (Uju Okoli), who offers her love and comfort. However, their affair soon leads to a tragic outcome that changes their lives forever.

          -

          Cast and Crew

          -

          The main cast of the movie are:

          -
            -
          • Chinenye Uba as Ada
          • -
          • Jerry Williams as Frank
          • -
          • Uju Okoli as Tony
          • -
          -

          The movie was directed by Ugezu J. Ugezu, who is also a screenwriter, actor, and producer. He has directed over 100 Nollywood movies, including The Return of King of Vultures, The Throne Is Mine, and The Sacred Cowry.

          -

          Where to Watch

          -

          You can watch (My Reflections) This Is Based On A True Life Story - African Movies on YouTube. The movie is divided into four parts, each about an hour long. You can also download the movie from [NaijaPals] by following the same steps as above.

          -

          True Story - Nollywood RealnollyTV

          -

          Plot Summary

          -

          This is a 2015 thriller film directed by Nonso Emekaekwue and starring Yul Edochie, Chika Ike, and Walter Anga. The movie tells the story of a young man named Dave (Yul Edochie), who is accused of killing his girlfriend Rita (Chika Ike) after she dumps him for another man. Dave claims that he is innocent, but no one believes him. He is arrested and sentenced to death by hanging. However, on the day of his execution, a shocking revelation comes to light that proves his innocence and exposes the real killer.

          -

          Cast and Crew

          -

          The main cast of the movie are:

          -
            -
          • Yul Edochie as Dave
          • -
          • Chika Ike as Rita
          • -
          • Walter Anga as Mike
          • -
          -

          The movie was directed by Nonso Emekaekwue, who is also an actor and producer. He has directed other Nollywood movies such as The Last Burial, The Evil Forest, and The Missing Princess.

          -

          Where to Watch

          -

          You can watch True Story - Nollywood RealnollyTV on YouTube. The movie is divided into two parts, each about an hour long. You can also download the movie from [NaijaPals] by following the same steps as above.

          -

          This True Life Story Of Mercy Johnson Will Make You Cry Real Tears-African Movies

          -

          Plot Summary

          -

          This is a 2018 drama film directed by Vincent D Anointed and starring Mercy Johnson, Ken Erics, and Chinyere Wilfred. The movie tells the true life story of Mercy Johnson (played by herself), who is one of the most successful and popular actresses in Nollywood. The movie chronicles her journey from poverty to fame, her struggles with family issues, her marriage to Prince Odianosen Okojie, and her faith in God.

          -

          Cast and Crew

          -

          The main cast of the movie are:

          -
            -
          • Mercy Johnson as herself
          • -
          • Ken Erics as Prince Odianosen Okojie
          • -
          • Ch
          • Chinyere Wilfred as Mercy Johnson's mother
          • -
          -

          The movie was directed by Vincent D Anointed, who is also a producer and actor. He has directed other Nollywood movies such as The Royal Wedding, The Billionaire's Daughter, and The King's Bride.

          -

          Where to Watch

          -

          You can watch This True Life Story Of Mercy Johnson Will Make You Cry Real Tears-African Movies on YouTube. The movie is divided into six parts, each about an hour long. You can also download the movie from [NaijaPals] by following the same steps as above.

          -

          Conclusion

          -

          Nollywood movies are not only entertaining, but also informative and inspiring. Many of them are based on true stories that reflect the reality of Nigeria and its people. True Story Nigerian Movie is one of the best examples of such movies, as it combines comedy, drama, and romance in a captivating way. You can download True Story Nigerian Movie for free from [NaijaPals], or watch it on YouTube. You can also check out some other Nollywood movies based on true stories that we have recommended in this article. We hope you enjoy watching these movies and learning more about Nollywood and Nigeria.

          -

          FAQs

          -

          Here are some frequently asked questions about True Story Nigerian Movie and Nollywood movies based on true stories:

          -
            -
          1. What is Nollywood?
          2. -

            Nollywood is the name given to the Nigerian film industry, which is one of the largest and most prolific in the world. Nollywood produces over 2000 movies per year, in various languages and genres. Nollywood movies are popular not only in Nigeria, but also across Africa and beyond.

            -
          3. What are the benefits of watching Nollywood movies?
          4. -

            Watching Nollywood movies can offer many benefits, such as:

            -
              -
            • Entertainment: Nollywood movies are fun and enjoyable to watch, as they have engaging stories, colorful characters, and humorous dialogues.
            • -
            • Educ
            • Education: Nollywood movies can teach you about the history, culture, and society of Nigeria and Africa, as well as various topics and issues that affect the continent and the world.
            • -
            • Inspiration: Nollywood movies can inspire you to pursue your dreams, overcome your challenges, and make a positive impact in your community and the world.
            • -
            -
          5. How can I find more Nollywood movies based on true stories?
          6. -

            There are many ways to find more Nollywood movies based on true stories, such as:

            -
              -
            • Searching online: You can use search engines like Google or Bing to look for Nollywood movies based on true stories. You can also use websites like [NaijaPals], [NollyLand], or [iROKOtv] that specialize in Nollywood movies.
            • -
            • Asking for recommendations: You can ask your friends, family, or colleagues who are familiar with Nollywood movies to recommend some of their favorite movies based on true stories. You can also join online forums or groups where Nollywood fans share their opinions and suggestions.
            • -
            • Watching documentaries: You can watch documentaries that feature the real-life stories behind some of the most popular and acclaimed Nollywood movies. Some examples of such documentaries are [Nollywood Babylon], [This Is Nollywood], and [The Wedding Party: Behind The Scenes].
            • -
            -
          7. How can I support Nollywood and its filmmakers?
          8. -

            You can support Nollywood and its filmmakers by:

            -
              -
            • Watching their movies legally: You can watch Nollywood movies legally by paying for subscription services like [iROKOtv], [Netflix], or [Amazon Prime Video] that offer a wide range of Nollywood movies. You can also buy or rent DVDs or Blu-rays of Nollywood movies from reputable sources.
            • -
            • Sharing their movies with others: You can share Nollywood movies with others by recommending them to your friends, family, or colleagues. You can also write reviews, ratings, or comments on social media, blogs, or websites that promote Nollywood movies.
            • -
            • Donating to their projects: You can donate to Nollywood filmmakers who are crowdfunding for their projects on platforms like [Kickstarter], [Indiegogo], or [GoFundMe]. You can also support organizations that fund or sponsor Nollywood filmmakers, such as [The African Film Foundation], [The Nigerian Film Corporation], or [The Lagos Film Academy].
            • -
            -
          9. What are some of the challenges that Nollywood faces?
          10. -

            Nollywood faces some of the following challenges:

            -
              -
            • Piracy: Piracy is a major problem that affects the revenue and quality of Nollywood movies. Pirated copies of Nollywood movies are often sold or distributed illegally online or offline, without the consent or compensation of the filmmakers. Piracy also reduces the incentive and resources for filmmakers to produce better and more original movies.
            • -
            • Censorship: Censorship is another issue that limits the creativity and expression of Nollywood filmmakers. Some Nollywood movies are banned or restricted by the government or other authorities for various reasons, such as political, religious, moral, or cultural sensitivities. Censorship also affects the distribution and accessibility of Nollywood movies in certain markets or regions.
            • -
            • Competition: Competition is another challenge that Nollywood faces from other film industries, especially Hollywood and Bollywood. These film industries have more resources, technology, and influence than Nollywood, and they often dominate the global film market. Competition also affects the preferences and expectations of the audience, who may compare Nollywood movies unfavorably to Hollywood or Bollywood movies.
            • -

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/FHX Hay Day - Download for Android APK Free - Malavida.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/FHX Hay Day - Download for Android APK Free - Malavida.md deleted file mode 100644 index ce634ce7f4f3f31d81460c2ac65886c5dcf67118..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/FHX Hay Day - Download for Android APK Free - Malavida.md +++ /dev/null @@ -1,72 +0,0 @@ - -

            How to Download FHX Hay Day: A Complete Guide

            -

            Hay Day is one of the most popular farming games on the App Store and Google Play. It lets you build your own farm, grow crops, raise animals, trade with neighbors, and explore the valley. Hay Day is fun, relaxing, and addictive, but it can also be challenging and frustrating if you run out of resources like coins, diamonds, or feed. That's why some players look for ways to hack or modify the game to get unlimited resources and enjoy the game without any limitations.

            -

            download fhx hay day


            DOWNLOAD 🗹 https://ssurll.com/2uNXs3



            -

            One of the most popular hacks for Hay Day is fhx hay day, a modified version of the game that gives you access to unlimited coins, diamonds, and other resources. With fhx hay day, you can buy anything you want, upgrade your buildings, expand your land, and complete tasks faster. You can also unlock all the animals, crops, and decorations that are otherwise unavailable in the official Hay Day app.

            -

            However, before you download fhx hay day, you should be aware of the risks involved in using a hacked version of the game. Fhx hay day is not authorized by Supercell, the developer of Hay Day, and it may contain viruses or malware that can harm your device or steal your personal information. Fhx hay day may also cause errors or crashes in the game, or make your account vulnerable to being banned by Supercell. Therefore, you should always use fhx hay day at your own risk and discretion, and follow some precautions to protect your device and account.

            -

            How to Download FHX Hay Day

            -

            If you still want to try fhx hay day, here are the steps you need to follow to download and install it on your device.

            -

            Step 1: Find a reliable source for fhx hay day apk file

            -

            Fhx hay day is not available on the App Store or Google Play, so you need to find a third-party website that offers the apk file for download. Apk files are Android application packages that can be installed on Android devices. However, not all websites that offer apk files are trustworthy, and some may contain fake or malicious files that can damage your device or compromise your security. Therefore, you should do some research before downloading any apk file from an unknown source.

            -

            One way to find a reliable source for fhx hay day apk file is to search for reviews or feedback from other users who have downloaded it. You can also use a website scanner tool like VirusTotal or Norton Safe Web to check if the website is safe or not. Alternatively, you can ask for recommendations from other players who have used fhx hay day successfully.

            -

            Step 2: Enable unknown sources on your device

            -

            By default, Android devices do not allow installation of apps from unknown sources, which means sources other than the Google Play Store. This is a security feature that prevents unauthorized or harmful apps from being installed on your device. However, if you want to install fhx hay day apk file, you need to enable unknown sources on your device first.

            -

            download fhx hay day mod apk
            -download fhx hay day unlimited coins and diamonds
            -download fhx hay day latest version
            -download fhx hay day for android
            -download fhx hay day for pc
            -download fhx hay day offline
            -download fhx hay day hack tool
            -download fhx hay day private server
            -download fhx hay day cheats
            -download fhx hay day free gems
            -download fhx hay day update
            -download fhx hay day online
            -download fhx hay day game
            -download fhx hay day 2023
            -download fhx hay day new farm
            -download fhx hay day apk pure
            -download fhx hay day no root
            -download fhx hay day simulator
            -download fhx hay day original
            -download fhx hay day mod menu
            -download fhx hay day with obb file
            -download fhx hay day full version
            -download fhx hay day for ios
            -download fhx hay day for windows 10
            -download fhx hay day without verification
            -download fhx hay day generator
            -download fhx hay day mega mod
            -download fhx hay day supercell
            -download fhx hay day tips and tricks
            -download fhx hay day pro apk

            -

            To enable unknown sources on your device, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that installing apps from unknown sources may harm your device or data. Tap OK to

            confirm that you want to proceed with the installation.

            -

            Step 3: Download and install fhx hay day apk file

            -

            Once you have enabled unknown sources on your device, you can download fhx hay day apk file from the website you have chosen. To download the file, tap on the download link or button and wait for the download to complete. You may see a notification that the file is ready to be installed. Tap on the notification or go to your device's file manager and locate the downloaded file. Tap on the file and follow the instructions to install it on your device. You may need to grant some permissions to the app to access your device's features or data.

            -

            Step 4: Launch fhx hay day and enjoy unlimited resources

            -

            After installing fhx hay day apk file, you can launch the app from your device's app drawer or home screen. You may see a splash screen or a loading screen before the app opens. Once the app opens, you can sign in with your Google Play account or create a new account. You will then see your farm with unlimited coins, diamonds, and other resources. You can use them to buy anything you want, upgrade your buildings, expand your land, and complete tasks faster. You can also unlock all the animals, crops, and decorations that are otherwise unavailable in the official Hay Day app.

            -

            Conclusion

            -

            Fhx hay day is a modified version of Hay Day that gives you access to unlimited resources and features. It can make the game more fun and easy, but it also comes with some risks and drawbacks. Fhx hay day is not authorized by Supercell, the developer of Hay Day, and it may contain viruses or malware that can harm your device or steal your personal information. Fhx hay day may also cause errors or crashes in the game, or make your account vulnerable to being banned by Supercell.

            -

            Therefore, you should always use fhx hay day at your own risk and discretion, and follow some precautions to protect your device and account. Here are some tips and warnings for using fhx hay day:

            -
              -
            • Do not use fhx hay day on your main account or device. Use a secondary account or device that you do not care about losing.
            • -
            • Do not use fhx hay day to play with other players who use the official Hay Day app. You may get reported or detected by Supercell and get banned from the game.
            • -
            • Do not use fhx hay day to make purchases or transactions with real money. You may get scammed or lose your money.
            • -
            • Do not use fhx hay day for a long time or frequently. You may get bored or lose interest in the game.
            • -
            • Do not use fhx hay day without antivirus or firewall protection on your device. You may get infected by viruses or malware that can damage your device or compromise your security.
            • -
            -

            We hope this article has helped you learn how to download fhx hay day and enjoy unlimited resources in Hay Day. If you have any feedback or questions, please feel free to leave a comment below. We would love to hear from you!

            -

            FAQs

            -

            Q1: Is fhx hay day safe to use?

            -

            A1: Fhx hay day is not safe to use because it is not authorized by Supercell, the developer of Hay Day, and it may contain viruses or malware that can harm your device or steal your personal information. Fhx hay day may also cause errors or crashes in the game, or make your account vulnerable to being banned by Supercell.

            -

            Q2: Do I need to root my device to use fhx hay day?

            -

            A2: No, you do not need to root your device to use fhx hay day. Fhx hay day is an apk file that can be installed on any Android device without rooting.

            -

            Q3: Will I get banned from Hay Day if I use fhx hay day?

            -

            A3: Yes, you may get banned from Hay Day if you use fhx hay day. Fhx hay day is a hacked version of the game that violates the terms of service of Supercell. Supercell has the right to ban any account that uses hacks or cheats in their games.

            -

            Q4: Can I play with other players who use the official Hay Day app?

            -

            A4: No, you cannot play with other players who use the official Hay Day app if you use fhx hay day. Fhx hay day is a separate version of the game that runs on a different server than the official Hay Day app. You can only play with other players who use fhx hay day or other modified versions of Hay Day. Playing with other players who use the official Hay Day app may result in being reported or detected by Supercell and getting banned from the game.

            -

            Q5: How can I update fhx hay day to the latest version?

            -

            A5: To update fhx hay day to the latest version, you need to download and install the new apk file from the same source you used before. You may need to uninstall the old version of fhx hay day first before installing the new one. However, updating fhx hay day may not be possible or compatible with the official Hay Day app, and it may cause errors or crashes in the game. Therefore, you should always check the compatibility and reliability of the new version before updating fhx hay day.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/examples/summary/randeng_t5_784M_summary.sh b/spaces/skf15963/summary/fengshen/examples/summary/randeng_t5_784M_summary.sh deleted file mode 100644 index 5b3e60c8784ac563eff09763591e00b6d250444f..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/summary/randeng_t5_784M_summary.sh +++ /dev/null @@ -1,130 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=randeng_t5_77M_summary -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=2 -#SBATCH --gres=gpu:2 # number of gpus -#SBATCH --cpus-per-task=30 -#SBATCH -o %x-%j.log - -set -x -e - -echo "START TIME: $(date)" -MODEL_NAME=randeng_t5_784M_summary -MICRO_BATCH_SIZE=8 -ROOT_DIR=/cognitive_comp/dongxiaoqun/finetune/${MODEL_NAME} -if [ ! -d ${ROOT_DIR} ];then - mkdir ${ROOT_DIR} - echo ${ROOT_DIR} created!!!!!!!!!!!!!! -else - echo ${ROOT_DIR} exist!!!!!!!!!!!!!!! -fi - -ZERO_STAGE=1 - -config_json="${ROOT_DIR}/ds_config.${MODEL_NAME}.json" - -# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() -cat < $config_json -{ - "train_micro_batch_size_per_gpu": ${MICRO_BATCH_SIZE}, - "steps_per_print": 100, - "gradient_clipping": 1.0, - "zero_optimization": { - "stage": $ZERO_STAGE, - "contiguous_gradients": false, - "overlap_comm": true, - "reduce_scatter": true, - "reduce_bucket_size": 50000000, - "allgather_bucket_size": 500000000 - }, - "optimizer": { - "type": "Adam", - "params": { - "lr": 1e-4, - "weight_decay": 1e-2 - } - }, - "scheduler": { - "params": { - "warmup_max_lr": 1e-04, - "warmup_min_lr": 1e-05, - "total_num_steps": 60000, - "warmup_num_steps" : 500 - }, - "type": "WarmupDecayLR" - }, - "zero_allow_untested_optimizer": false, - "fp16": { - "enabled": true, - "loss_scale": 0, - "loss_scale_window": 1000, - "hysteresis": 2, - "min_loss_scale": 1 - }, - "activation_checkpointing": { - "partition_activations": false, - "contiguous_memory_optimization": false - }, - "wall_clock_breakdown": false -} -EOT - -export PL_DEEPSPEED_CONFIG_PATH=$config_json -export TORCH_EXTENSIONS_DIR=/cognitive_comp/dongxiaoqun/torch_extendsions -# export MASTER_PORT=$[RANDOM%10000+30000] -# export PL_FAULT_TOLERANT_TRAINING=1 - -TRAINER_ARGS=" - --max_epochs 1 \ - --gpus 1 \ - --num_nodes 1 \ - --strategy deepspeed_stage_${ZERO_STAGE} \ - --default_root_dir $ROOT_DIR \ - --dirpath $ROOT_DIR/ckpt \ - --save_top_k 3 \ - --monitor val_loss \ - --mode min \ - --save_last \ - --every_n_train_steps 0 \ - --val_check_interval 0.1 \ -" - -prompt="summary:" -DATA_ARGS=" - --datasets_name lcsts \ - --num_workers 30 \ - --train_batchsize $MICRO_BATCH_SIZE \ - --val_batchsize $MICRO_BATCH_SIZE \ - --test_batchsize $MICRO_BATCH_SIZE \ - --max_enc_length 128 \ - --max_dec_length 64 \ - --val_datasets_field val \ - --prompt $prompt \ -" -# --prompt $prompt \ -MODEL_ARGS=" - --pretrained_model_path /cognitive_comp/ganruyi/experiments/randeng_t5_large_v2/ckpt/hf_pretrained_epoch0_step732500 \ - --output_save_path $ROOT_DIR/randeng_t5_784M_predict_lcsts.json \ -" - -SCRIPTS_PATH=/cognitive_comp/dongxiaoqun/debug/Fengshenbang-LM/fengshen/examples/summary/seq2seq_summary.py -SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif - -export CMD=" \ - $SCRIPTS_PATH \ - $TRAINER_ARGS \ - $MODEL_ARGS \ - $DATA_ARGS \ - " -echo $CMD - -source activate -conda activate torchnew -srun --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=30 -o ${MODEL_NAME}-%J.log --jobid=229668 bash -c 'python3 $SCRIPT_PATH $CMD' -# source activate base -# python $CMD - -# srun --jobid=229668 --nodes=1 --gres=gpu:1 --ntasks-per-node=1 --cpus-per-task=30 -e ${ROOT_DIR}/${MODEL_NAME}-%j.err -o ${ROOT_DIR}/${MODEL_NAME}-%j.log singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH bash -c '/home/ganruyi/anaconda3/bin/python $CMD' - -# srun python $CMD -# srun singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH bash -c '/home/ganruyi/anaconda3/bin/python $CMD' diff --git a/spaces/skoneru/contextual_refinement_ende/README.md b/spaces/skoneru/contextual_refinement_ende/README.md deleted file mode 100644 index c48d10b4dd4c0c3d8b97a44b3c8a7ecf6d7992b6..0000000000000000000000000000000000000000 --- a/spaces/skoneru/contextual_refinement_ende/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Contextual Refinement En ➡️ De -emoji: 🦙 -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/quant_noise/README.md b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/quant_noise/README.md deleted file mode 100644 index a04d7e4e8a077f11c9f63cfa3d1f20e2b899be8c..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/quant_noise/README.md +++ /dev/null @@ -1,298 +0,0 @@ -# Training with Quantization Noise for Extreme Model Compression ({Fan\*, Stock\*} *et al.*, 2020) -This page contains information for how to train and quantize models with Quantization Noise, for both scalar quantization like `int8` and Iterative Product Quantization. -Check out our paper [here](https://arxiv.org/abs/2004.07320). - -Looking for pretrained models? They will be added shortly. -Looking for code to train vision models? We are working on open sourcing our code as part of ClassyVision. Please check back, but note that both the Scalar and Iterative Product Quantization counterparts of the `nn.Conv2d` module are already included in this release. - -**Contents**: -- [Walk through of code](#walk-through-the-code) -- [Reproduce NLP Results](#looking-to-reproduce-the-nlp-results-in-the-paper) -- [Reproduce Vision Results](#looking-to-reproduce-the-vision-results-in-the-paper) - - -## Citation -```bibtex -@article{fan2020training, - title={Training with Quantization Noise for Extreme Model Compression}, - author={Angela Fan* and Pierre Stock* and and Benjamin Graham and Edouard Grave and Remi Gribonval and Herve Jegou and Armand Joulin}, - year={2020}, - eprint={2004.07320}, - archivePrefix={arXiv}, - primaryClass={cs.ML} -} -``` - -## Walk through the code - -Training a model with Quant-Noise improves the performance in subsequent inference-time quantization by training models to be robust to quantization. This technique is useful for both scalar and product quantization methods, as well as multiple domains. We detail below our approach to train, quantize models and integrate our code to quantize your favorite models. - -### Scalar Quantization - -Unlike the section [Iterative Product Quantization](#iterative-product-quantization) which gives state-of-the-art compression, this section showcases the usefulness of our approach for simple scalar quantization baselines such as int8 using on-GPU Fake Quantization. - -#### Training - -Scalar quantization with Quant-Noise consists in randomly quantizing a proportion `p` of the weights during training. Scalar quantization is implemented [here](https://github.com/pytorch/fairseq/tree/main/fairseq/modules/quantization/scalar) under the form of Fake Quantization, meaning that we emulate int8 on GPU by quantizing and de-quantizing both the weights and the activations. We rely on PyTorch's [quantization primitives](https://github.com/pytorch/pytorch/tree/master/torch/quantization). - -To train a model with Quant-Noise, add the following flag: -``` ---quant-noise-scalar 0.5 -``` -Large values of noise make the network easier to quantize but may result in higher non-quantized test and validation perplexities. - -#### Quantization - -When evaluating a network, all quantized modules and activation hooks automatically switch to `p=1` so the validation accuracy reported by Fairseq is actually the quantized one, nothing more to do. - - -#### Integration with your own code - -Looking to quantize your own models with Quant-Noise + Scalar Quantization? -- Use the function `quantize_model_` implemented [here](https://github.com/pytorch/fairseq/tree/main/fairseq/modules/quantization/scalar/utils.py) to (1) replace all your modules by their quantized counterparts and (2) add hooks to those modules to quantize the activations. -- Then, perform your training as usual. Note that in `eval()` mode, the network is always fully quantized (weights and activations) by default (`p=1`). - - - -### Iterative Product Quantization - - -Iterative Product Quantization with Quant-Noise proceeds in two steps. First, a model must be trained uncompressed with Quant-Noise. Second, the model must be quantized with iPQ. Note that we implement here the simplest form of noise, which consists in randomly dropping a proportion `p` of blocks, and that worked as well as assigning those blocks to their current centroid. - -#### Training - -To train a model with Quant-Noise, add the following flags: -``` ---quant-noise-pq 0.1 --quant-noise-pq-block-size 8 -``` -`quant-noise-pq` controls how much dropout is applied to the blocks of the weight matrix. `quant-noise-pq-block-size` controls the size of the weight matrix blocks. -We recommend training with 0.05 to 0.2 Quant-Noise, a value that worked well in our experiments. For the block-size, we recommend training with block-size of 8. Note that the block size must be a multiple of `input_features`, see the size checks [here](https://github.com/pytorch/fairseq/tree/main/fairseq/modules/quant_noise.py). Large block sizes result in higher compression ratio but may induce a loss in accuracy. - -We currently support training Transformer based models, such as sequence-to-sequence, language models, and BERT architectures. The `quant_noise` function [here](https://github.com/pytorch/fairseq/tree/main/fairseq/modules/quant_noise.py) wraps a module. It splits a weight matrix into blocks and applies random dropout to these blocks. -In the Transformer architectures, quant-noise is applied to the input and output embeddings, the attention, and the FFN. - -Quant-Noise can also be combined with **LayerDrop** (see [here](https://github.com/pytorch/fairseq/tree/main/examples/layerdrop)) to add its pruning effect to the quantized model and make the model even smaller. We recommend training with LayerDrop 0.1 or 0.2. - -#### Quantization - -We implement an improved version of product quantization from Stock et al, **iPQ**, described [here](https://arxiv.org/abs/1907.05686), see code with old API [here](https://github.com/facebookresearch/kill-the-bits). Note that we improved the iPQ API in terms of both compute speed and usability as described below. - -For the particular case of PQ, quantization is made sequentially. We recommend first quantizing the FFNs, then the EMBs, and finally the ATTNs. Quantization is done in two sub-steps: -- First, perform `n` steps of Product Quantization (generally `n=20` is enough). -- Then, finetune the obtained centroids. - -#### Integration with your own code - -Looking to quantize your own models with Quant-Noise + iPQ? -- First wrap your modules with the `quant_noise` function [here](https://github.com/pytorch/fairseq/tree/main/fairseq/modules/quant_noise.py), which is module-agnostic and train your favorite model. -- Then, quantize your trained model using the code [here](https://github.com/pytorch/fairseq/tree/main/fairseq/modules/quantization/pq). This can be done *without any changes to your training loop*. Below is an example code for integration. -Note that we tried our approach only on Transformers and various Convolutional Models such as EfficientNets. - -```python -from fairseq.modules.quantization.pq import quantize_model_, SizeTracker - -# get configuration parameters -n_centroids_config = config["n_centroids"] -block_sizes_config = config["block_sizes"] -layers_to_quantize = config["layers_to_quantize"] - -# size tracker for keeping track of assignments, centroids and non-compressed sizes -size_tracker = SizeTracker(model) - -# Quantize model by stages -for step in range(len(layers_to_quantize)): - - # quantize model in-place - quantized_layers = quantize_model_( - model, - size_tracker, - layers_to_quantize, - block_sizes_config, - n_centroids_config, - step=step, - ) - logger.info(f"Finetuning stage {step}, quantized layers: {quantized_layers}") - logger.info(f"{size_tracker}") - - # Don't forget to re-create/update trainer/optimizer since model parameters have changed - optimizer = ... - - # Finetune the centroids with your usual training loop for a few epochs - trainer.train_epoch() -``` - - -## Looking to reproduce the NLP results in the paper? - -We detail below how to reproduce the state-of-the-art results in reported in the paper for Quant-Noise + Iterative Product Quantization. - -### Training with Quant-Noise - -To **train** RoBERTa + QuantNoise, we followed this setting [here](https://github.com/pytorch/fairseq/tree/main/examples/roberta). -The following command can be used to train a RoBERTa Base + QuantNoise model: - -```bash -TOTAL_UPDATES=125000 -WARMUP_UPDATES=10000 -PEAK_LR=0.0005 -TOKENS_PER_SAMPLE=512 -MAX_POSITIONS=512 -MAX_SENTENCES=16 -UPDATE_FREQ=2 -DATA_DIR=/path/to/data/here - -fairseq-train $DATA_DIR \ - --task masked_lm --criterion masked_lm --arch roberta_base \ - --sample-break-mode complete \ - --tokens-per-sample $TOKENS_PER_SAMPLE --max-positions $MAX_POSITIONS \ - --optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-6 \ - --clip-norm 0.0 \ - --lr-scheduler polynomial_decay --lr $PEAK_LR \ - --warmup-updates $WARMUP_UPDATES --total-num-update $TOTAL_UPDATES \ - --dropout 0.1 --attention-dropout 0.1 \ - --weight-decay 0.01 \ - --batch-size $MAX_SENTENCES \ - --update-freq $UPDATE_FREQ --max-update $TOTAL_UPDATES \ - --save-dir checkpoint/roberta \ - --ddp-backend legacy_ddp --encoder-layerdrop 0.2 \ - --quant-noise-pq 0.2 --quant-noise-pq-block-size 8 --untie-weights-roberta -``` - -To **finetune** RoBERTa + QuantNoise, we followed this setting [here](https://github.com/pytorch/fairseq/blob/main/examples/roberta/README.glue.md). -The following command can be used to finetune a RoBERTa Base + QuantNoise model on the RTE dataset: - -```bash -TOTAL_NUM_UPDATES=2036 -WARMUP_UPDATES=122 -LR=2e-05 -NUM_CLASSES=2 -MAX_SENTENCES=16 -ROBERTA_PATH=/path/to/roberta_quantnoise/model.pt - -fairseq-train /path/to/rte/data/ \ - --restore-file $ROBERTA_PATH \ - --max-positions 512 \ - --batch-size $MAX_SENTENCES \ - --max-tokens 4400 \ - --task sentence_prediction \ - --reset-optimizer --reset-dataloader --reset-meters \ - --required-batch-size-multiple 1 \ - --init-token 0 --separator-token 2 \ - --arch roberta_large \ - --criterion sentence_prediction \ - --num-classes $NUM_CLASSES \ - --dropout 0.1 --attention-dropout 0.1 \ - --weight-decay 0.1 --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-06 \ - --clip-norm 0.0 \ - --lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \ - --fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \ - --max-epoch 10 \ - --find-unused-parameters \ - --best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \ - --ddp-backend legacy_ddp \ - --quant-noise-pq 0.2 --quant-noise-pq-block-size 8 -``` - -To **train** Language Models on Wikitext-103, we followed this setting [here](https://github.com/pytorch/fairseq/tree/main/examples/language_model). -The following command can be used to train a Transformer + QuantNoise model on Wikitext-103: - -```bash -fairseq-train --task language_modeling /path/to/wikitext-103/data \ - --save-dir checkpoints/transformer_wikitext-103 \ - --adaptive-input --adaptive-input-cutoff 20000,60000 --adaptive-input-factor 4 \ - --adaptive-softmax-cutoff 20000,60000 --adaptive-softmax-dropout 0.2 --adaptive-softmax-factor 4.0 \ - --tie-adaptive-proj --tie-adaptive-weights \ - --arch transformer_lm_gbw \ - --attention-dropout 0.1 --dropout 0.2 --relu-dropout 0.1 \ - --clip-norm 0.1 --criterion adaptive_loss \ - --ddp-backend legacy_ddp \ - --decoder-attention-heads 8 --decoder-embed-dim 1024 --decoder-ffn-embed-dim 4096 --decoder-input-dim 1024 \ - --decoder-layers 16 --decoder-normalize-before --decoder-output-dim 1024 \ - --min-lr 0.0001 --lr-period-updates 270000 --lr-scheduler cosine --lr-shrink 0.75 --lr 1.0 --t-mult 2.0 \ - --max-tokens 3072 --tokens-per-sample 3072 --momentum 0.99 --optimizer nag \ - --sample-break-mode none --update-freq 3 \ - --warmup-init-lr 1e-07 --warmup-updates 16000 \ - --weight-decay 0 --seed 1 --stop-min-lr 1e-09 \ - --quant-noise-pq 0.05 --quant-noise-pq-block-size 8 -``` - -To **evaluate** this model, note you need to use the `eval.py` script. The following command can be used to evaluate: - -```bash -fairseq-eval-lm /path/to/wikitext-103/data --path /path/to/model/checkpoint \ - --sample-break-mode complete \ - --max-tokens 3072 \ - --context-window 2560 \ - --softmax-batch 1024 \ - --gen-subset valid -``` -and change the `--gen-subset` to `test` if you would like to evaluate on the test set instead. - - -### Iterative Product Quantization - -To quantize the finetuned RoBERTa model, we use this command on 1 GPU. This should run in a day. -```bash -TOTAL_NUM_UPDATES=6108 # 2036 updates for each iteration -WARMUP_UPDATES=122 -LR=2e-05 -NUM_CLASSES=2 -MAX_SENTENCES=16 -fairseq-train --task sentence_prediction /path/to/data/ \ - --restore-file $ROBERTA_PATH \ - --save-dir checkpoints/roberta_finetuned \ - --max-positions 512 \ - --batch-size $MAX_SENTENCES \ - --max-tokens 4400 \ - --init-token 0 --separator-token 2 \ - --arch roberta_large \ - --criterion sentence_prediction \ - --num-classes $NUM_CLASSES \ - --dropout 0.1 --attention-dropout 0.1 \ - --weight-decay 0.1 --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-06 \ - --clip-norm 0.0 --lr-scheduler polynomial_decay \ - --fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \ - --no-progress-bar --skip-invalid-size-inputs-valid-test --ddp-backend legacy_ddp \ - --quantization-config-path /path/to/config/yaml -``` - -To quantize the trained Language Model, we use this command on 8 V100 23GB GPUs. This should run in a couple of hours. -```bash -fairseq-train --task language_modeling /path/to/wikitext-103/data \ - --save-dir checkpoints/transformer_wikitext-103 \ - --adaptive-input --adaptive-input-cutoff 20000,60000 --adaptive-input-factor 4 \ - --adaptive-softmax-cutoff 20000,60000 --adaptive-softmax-dropout 0.2 --adaptive-softmax-factor 4.0 \ - --arch transformer_lm_gbw \ - --attention-dropout 0.1 --dropout 0.2 --relu-dropout 0.1 \ - --bucket-cap-mb 25 --char-embedder-highway-layers 2 --character-embedding-dim 4 \ - --clip-norm 0.1 --criterion adaptive_loss \ - --ddp-backend legacy_ddp \ - --decoder-attention-heads 8 --decoder-embed-dim 1024 --decoder-ffn-embed-dim 4096 --decoder-input-dim 1024 --decoder-layers 16 --decoder-normalize-before --decoder-output-dim 1024 \ - --fp16 --keep-last-epochs -1 \ - --min-lr 0.0001 --lr-period-updates 270000 --lr-scheduler cosine --lr-shrink 0.75 --lr 0.05 --stop-min-lr 1e-09 \ - --max-tokens 2944 --tokens-per-sample 2944\ - --momentum 0.99 --no-epoch-checkpoints --no-progress-bar --optimizer nag --required-batch-size-multiple 8 \ - --sample-break-mode none --t-mult 2.0 --skip-invalid-size-inputs-valid-test \ - --tie-adaptive-proj --tie-adaptive-weights --update-freq 3 --weight-decay 0 --seed 1 \ - --log-interval 100 --no-progress-bar --skip-invalid-size-inputs-valid-test \ - --restore-file path/to/trained/lm/with/quant/noise \ - --max-update 13500 --quantization-config-path /path/to/config/yaml -``` -If you have less capacity or if your distributed training freezes, try reducing `--max-tokens` and `--tokens-per-sample` (this may reduce the quantized accuracy a bit). - -### Remarks - -We try to keep the open-sourced code as readable and as easy-to-plug as possible. Therefore, we did not test it for the following cases: -- Scalar quantization with RoBERTa. -- Quantization with iPQ and `int8` combined. - -If you have trouble adapting it, we will be more than happy to help! - -## Looking to reproduce the Vision results in the paper? - -We are working on open sourcing our code as part of ClassyVision. Please check back. - - -## Having an issue or have a question? - -Please open an issue in this repository with the details of your question. Thanks! diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/preprocessing/get_ljspeech_audio_manifest.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/preprocessing/get_ljspeech_audio_manifest.py deleted file mode 100644 index 7ec1fb7521b8a9b821d28bcaaaedb034f6e95e0b..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/preprocessing/get_ljspeech_audio_manifest.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import logging -from pathlib import Path -from collections import defaultdict - -import pandas as pd -from torchaudio.datasets import LJSPEECH -from tqdm import tqdm - -from examples.speech_to_text.data_utils import save_df_to_tsv - - -log = logging.getLogger(__name__) - -SPLITS = ["train", "dev", "test"] - - -def process(args): - out_root = Path(args.output_data_root).absolute() - out_root.mkdir(parents=True, exist_ok=True) - - # Generate TSV manifest - print("Generating manifest...") - # following FastSpeech's splits - dataset = LJSPEECH(out_root.as_posix(), download=True) - id_to_split = {} - for x in dataset._flist: - id_ = x[0] - speaker = id_.split("-")[0] - id_to_split[id_] = { - "LJ001": "test", "LJ002": "test", "LJ003": "dev" - }.get(speaker, "train") - manifest_by_split = {split: defaultdict(list) for split in SPLITS} - progress = tqdm(enumerate(dataset), total=len(dataset)) - for i, (waveform, _, utt, normalized_utt) in progress: - sample_id = dataset._flist[i][0] - split = id_to_split[sample_id] - manifest_by_split[split]["id"].append(sample_id) - audio_path = f"{dataset._path}/{sample_id}.wav" - manifest_by_split[split]["audio"].append(audio_path) - manifest_by_split[split]["n_frames"].append(len(waveform[0])) - manifest_by_split[split]["tgt_text"].append(normalized_utt) - manifest_by_split[split]["speaker"].append("ljspeech") - manifest_by_split[split]["src_text"].append(utt) - - manifest_root = Path(args.output_manifest_root).absolute() - manifest_root.mkdir(parents=True, exist_ok=True) - for split in SPLITS: - save_df_to_tsv( - pd.DataFrame.from_dict(manifest_by_split[split]), - manifest_root / f"{split}.audio.tsv" - ) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--output-data-root", "-d", required=True, type=str) - parser.add_argument("--output-manifest-root", "-m", required=True, type=str) - args = parser.parse_args() - - process(args) - - -if __name__ == "__main__": - main() diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/concat_sentences_dataset.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/concat_sentences_dataset.py deleted file mode 100644 index 625a29370e90f9d1d7274024afb902ed83a22325..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/concat_sentences_dataset.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from . import FairseqDataset - - -class ConcatSentencesDataset(FairseqDataset): - def __init__(self, *datasets): - super().__init__() - self.datasets = datasets - assert all( - len(ds) == len(datasets[0]) for ds in datasets - ), "datasets must have the same length" - - def __getitem__(self, index): - return torch.cat([ds[index] for ds in self.datasets]) - - def __len__(self): - return len(self.datasets[0]) - - def collater(self, samples): - return self.datasets[0].collater(samples) - - @property - def sizes(self): - return sum(ds.sizes for ds in self.datasets) - - def num_tokens(self, index): - return sum(ds.num_tokens(index) for ds in self.datasets) - - def size(self, index): - return sum(ds.size(index) for ds in self.datasets) - - def ordered_indices(self): - return self.datasets[0].ordered_indices() - - @property - def supports_prefetch(self): - return any(getattr(ds, "supports_prefetch", False) for ds in self.datasets) - - def prefetch(self, indices): - for ds in self.datasets: - if getattr(ds, "supports_prefetch", False): - ds.prefetch(indices) - - def set_epoch(self, epoch): - super().set_epoch(epoch) - for ds in self.datasets: - if hasattr(ds, "set_epoch"): - ds.set_epoch(epoch) diff --git a/spaces/stomexserde/gpt4-ui/Examples/Abdullah Abu Sayeed Book [CRACKED] Download.md b/spaces/stomexserde/gpt4-ui/Examples/Abdullah Abu Sayeed Book [CRACKED] Download.md deleted file mode 100644 index 5b8e0bdd3a495057e8466ff676f38a8e15d1c534..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Abdullah Abu Sayeed Book [CRACKED] Download.md +++ /dev/null @@ -1,36 +0,0 @@ -
            -

            How to Download Books by Abdullah Abu Sayeed, a Renowned Bangladeshi Writer and Activist

            - -

            Abdullah Abu Sayeed is a Bangladeshi writer, television presenter, organizer and activist who is best known for his essays, memoirs and books on the liberation war of Bangladesh. He is also the founder and chairman of Bishwa Sahitya Kendra, a non-profit organization that promotes the study of literature, reading habits and progressive ideas among the youth.

            - -

            If you are interested in reading his books, you might be wondering how to download them for free. In this article, we will show you some of the best websites where you can find and download books by Abdullah Abu Sayeed in PDF format.

            -

            abdullah abu sayeed book download


            Download File »»» https://urlgoal.com/2uI9Jd



            - -

            All Abdullah Abu Sayeed Books PDF - Bangla eBook Download

            - -

            One of the most comprehensive websites that offers all Abdullah Abu Sayeed books PDF for free is Bangla eBook Download. This website has a dedicated page for Abdullah Abu Sayeed where you can find his biography, genres, followers and total books. You can also sort his books by popularity, recency or alphabetically.

            - -

            Some of the most popular books by Abdullah Abu Sayeed that you can download from this website are:

            - - - -

            To download any book from this website, you just need to click on the book title and then click on the download button. You will be redirected to a page where you can choose your preferred file format (PDF or EPUB) and then download the book to your device.

            - -

            Abdullah Abu Sayeed (Author of সংগঠন ও বাঙালি) - Goodreads

            - -

            Another website where you can find and download books by Abdullah Abu Sayeed is Goodreads. Goodreads is a popular social media platform for book lovers where you can discover new books, read reviews, join groups and follow your favorite authors.

            - -

            Abdullah Abu Sayeed has an author page on Goodreads where you can see his profile, ratings, reviews and books. You can also follow him to get updates on his new releases and activities. Some of his books that are available on Goodreads are:

            -

            - -
              -
            • ভাঙো দুর্দশার চক্র (The Broken Cycle of Misfortune)
            • -
            • https://urlgoal.com/2uI75h



              -

              One of the most popular websites that offer cracks for various software is Chomikuj.pl. Chomikuj.pl is a Polish file-sharing platform that hosts millions of files uploaded by its users. You can find cracks for Cad Decor 2.0 on Chomikuj.pl by typing "Cad Decor 2.0 Chomikuj Crack" in the search box.

              -

              However, downloading and installing a crack from Chomikuj.pl or any other website is not recommended for several reasons:

              -
                -
              • It is illegal. Using a cracked software without a license is a violation of the intellectual property rights of the software developer. You could face legal consequences if you are caught.
              • -
              • It is unsafe. Cracks often contain viruses, malware, spyware, or other harmful programs that can damage your computer or steal your personal information. You could expose yourself to cyberattacks or identity theft if you download and install a crack.
              • -
              • It is unreliable. Cracks often have bugs, errors, glitches, or compatibility issues that can affect the performance and functionality of the software. You could lose your work or compromise its quality if you use a crack.
              • -
              -

              Therefore, the best way to download and install Cad Decor 2.0 is to buy a license from the official website of Cad Design Pro (https://cad-design-pro.com/). By doing so, you will get access to the latest version of the software, regular updates, technical support, and customer service. You will also support the development of this amazing software and help it grow.

              -

              If you want to try Cad Decor 2.0 before buying it, you can request a free trial from the website as well. You will be able to use the software for 30 days without any limitations or obligations.

              -

              -

              Cad Decor 2.0 is a great software for anyone who wants to create stunning 3D models of any space or object. It is easy to use, fast, and versatile. It has many features and options that allow you to customize your designs according to your preferences and needs.

              -

              If you are interested in learning more about Cad Decor 2.0, you can visit the website of Cad Design Pro (https://cad-design-pro.com/) or check out some of the tutorials and reviews available online.

              -

              Don't waste your time and money on cracks that can harm your computer and your work. Buy Cad Decor 2.0 today and enjoy the best 3D modelling software for Windows!

              81aa517590
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Como Baixar Sketchup Crackeado.md b/spaces/stomexserde/gpt4-ui/Examples/Como Baixar Sketchup Crackeado.md deleted file mode 100644 index c280aced50fb62acac4695b8cc7202fb73981940..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Como Baixar Sketchup Crackeado.md +++ /dev/null @@ -1,21 +0,0 @@ - -

              Como Baixar Sketchup Crackeado e Ativado para 2022

              -

              O Sketchup é um dos softwares mais populares e versáteis para modelagem 3D e CAD, usado por profissionais e amadores de diversas áreas, como arquitetura, design, engenharia, animação e muito mais. Mas como baixar o Sketchup crackeado e ativado para 2022, sem precisar pagar pela licença oficial?

              -

              Como Baixar Sketchup Crackeado


              Download ✦✦✦ https://urlgoal.com/2uI9wS



              -

              Neste artigo, vamos mostrar como você pode fazer o download do Sketchup Pro 2022 + Ativação permanente + SketchUP sem precisar instalar já ativado, usando um link de torrent confiável e um patch de ativação simples. Assim, você poderá aproveitar todas as novidades e recursos da versão mais recente do Sketchup, sem gastar nada.

              -

              O que há de novo no Sketchup Pro 2022?

              -

              Antes de baixar o Sketchup crackeado, vale a pena conhecer as principais novidades e atualizações da versão Pro 2022 do software 3D. Confira algumas delas:

              -
                -
              • Novos ícones de logotipo para SketchUp Pro 2022 e seu pacote de aplicativos: O logotipo vermelho do SketchUp foi substituído por novas marcas que alinham o SketchUp ao portfólio mais amplo de produtos Trimble e que podem ser modeladas no próprio SketchUp.
              • -
              • Buscar no SketchUp: um novo recurso que permite encontrar e ativar rapidamente os comandos nativos e as extensões instaladas. Basta digitar o nome da ferramenta ou uma palavra relacionada ao que ela faz para encontrá-la facilmente.
              • -
              • Seleção com laço: uma nova ferramenta que permite desenhar limites de seleção personalizados sem reorientar a câmera. Você pode criar também várias seleções específicas com uma operação de clicar e arrastar e selecionar entidades com muito mais rapidez.
              • -
              • Ferramenta Etiqueta: uma nova ferramenta que permite clicar em entidades ou entidades pré-selecionadas para aplicar etiquetas facilmente. Com a ferramenta Etiqueta, você pode agilizar a organização do modelo etiquetando objetos diretamente na janela do projeto, apagar rapidamente as etiquetas indesejadas e aumentar a precisão dos relatórios ao modificar em massa a etiqueta de componentes.
              • -
              -

              Como baixar o Sketchup crackeado e ativado para 2022?

              -

              Para baixar o Sketchup crackeado e ativado para 2022, você vai precisar de um programa de torrent, como o uTorrent ou o BitTorrent, e de um link de download confiável. Nós recomendamos o link do site Trono do Torrent[^1^], que oferece o SketchUp Pro 2022 + Ativação permanente + SketchUP sem precisar instalar já ativado em um arquivo ZIP de 445 MB.

              -

              -

              Depois de baixar o arquivo ZIP, você vai precisar extrair o conteúdo em uma pasta de sua preferência. Dentro da pasta, você vai encontrar dois arquivos: um executável chamado "SketchUpPro-2022-pt-BR.exe" e um patch chamado "SketchUpPro-2022-patch.exe".

              -

              Para instalar o Sketchup crackeado, basta executar o arquivo "SketchUpPro-2022-pt-BR.exe" e seguir as instruções na tela. Depois de instalado, não abra o programa ainda. Execute o arquivo "SketchUpPro-2022-patch.exe" como administrador e clique em "Patch". Aguarde alguns segundos até que a mensagem "Patching Done" apareça.

              -

              Pronto! Agora você já pode abrir o Sketchup crackeado e ativado para 2022 e aproveitar todos os recursos do software 3D sem restrições. Lembre

              7196e7f11a
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Crack Installshield 2012 Premier Edition 4.md b/spaces/stomexserde/gpt4-ui/Examples/Crack Installshield 2012 Premier Edition 4.md deleted file mode 100644 index b69ac3949f414434533abb0cb6e5066b25d06ff8..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Crack Installshield 2012 Premier Edition 4.md +++ /dev/null @@ -1,14 +0,0 @@ - -

              How to Crack Installshield 2012 Premier Edition 4

              -

              Installshield 2012 Premier Edition 4 is a software tool that allows you to create installers and software packages for Windows and other platforms. It is a powerful and flexible tool that can help you create professional and reliable installations for your applications. However, it is also a proprietary and expensive tool that requires a license to use. If you want to crack Installshield 2012 Premier Edition 4 and use it without paying for a license, you will need to follow some steps and use some tools that are available online.

              -

              Before you start, you will need to download Installshield 2012 Premier Edition 4 from the official website or from another source. You will also need to download a crack file and a keygen file that are compatible with this version of Installshield. You can find these files on various websites and forums that offer software cracks, such as [^1^] or [^2^]. However, be careful when downloading these files, as they may contain viruses or malware that can harm your computer. You should scan them with an antivirus program before using them.

              -

              Crack Installshield 2012 Premier Edition 4


              Download Filehttps://urlgoal.com/2uI8Kb



              -

              After you have downloaded the files, you will need to install Installshield 2012 Premier Edition 4 on your computer. Follow the instructions on the installer and choose the default options. Do not run Installshield after the installation is complete. Instead, go to the folder where you installed Installshield, which is usually C:\\Program Files\\InstallShield\\2012\\System\\ or C:\\Program Files (x86)\\InstallShield\\2012\\System\\ on a 64-bit system. There, you will find a file called ISUIServices.dll. This is the file that checks for the license activation of Installshield. You will need to replace this file with the crack file that you downloaded earlier. To do this, copy the crack file and paste it in the same folder, overwriting the original file. Make sure to back up the original file in case something goes wrong.

              -

              Now, you can run Installshield as an administrator. When you do, you will see a window that asks you to activate your license. Choose the option to activate now and enter any random serial number in the field. The online activation will fail, but do not worry. Choose the option to email activate instead. This will open another window that shows you a request code. This is a code that identifies your computer and your installation of Installshield. You will need to copy this code and paste it in the keygen file that you downloaded earlier.

              -

              The keygen file is a program that generates a response code based on your request code. This response code is a code that validates your license and allows you to use Installshield without restrictions. To use the keygen file, you may need to disable your antivirus program temporarily, as it may block or delete the file as a false positive. Run the keygen file as an administrator and paste your request code in the field. Then, click on generate or calculate to get your response code.

              -

              Copy your response code and paste it back in the activation window of Installshield. Click on activate or finish to complete the process. Your license should be successfully activated and Installshield should start normally. You can now use Installshield 2012 Premier Edition 4 without paying for a license.

              -

              However, before you do anything else, you should restore the original ISUIServices.dll file in the folder where you installed Installshield. This is because the crack file may cause some problems or errors when using Installshield. To do this, simply copy the original file that you backed up earlier and paste it in the same folder, overwriting the crack file.

              -

              -

              Congratulations! You have cracked Installshield 2012 Premier Edition 4 and can use it for free.

              7196e7f11a
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Csi Safe V14.0.0.1029 2014.rar [BETTER].md b/spaces/stomexserde/gpt4-ui/Examples/Csi Safe V14.0.0.1029 2014.rar [BETTER].md deleted file mode 100644 index 150af22e91eddb938533b3dbcf59af6bebe04d37..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Csi Safe V14.0.0.1029 2014.rar [BETTER].md +++ /dev/null @@ -1,151 +0,0 @@ -
              -

              Csi Safe v14

              Csi Safe v14.0.0.1029 2014.rar: A Comprehensive Software for Structural Analysis and Design of Reinforced Concrete Slabs and Foundations

              -

              If you are an engineer or a designer who works with reinforced concrete slabs and foundations, you might be interested in Csi Safe v14.0.0.1029 2014.rar. This is a software product that can help you with structural analysis and design of slabs and foundations of any shape and size, using advanced finite element and cracked-section methods. In this article, we will give you a brief overview of what Csi Safe v14.0.0.1029 2014.rar is, what it does, and why it is useful for your projects. We will also show you how to download and install the software product from a reliable source, how to use it for different types of problems, and how to compare it with other software products in the market. We will also discuss the advantages and limitations of Csi Safe v14.0.0.1029 2014.rar, and answer some frequently asked questions about it.

              -

              Csi Safe v14.0.0.1029 2014.rar


              Download File ✦✦✦ https://urlgoal.com/2uI6Tv



              -

              What is Csi Safe v14.0.0.1029 2014.rar?

              -

              Csi Safe v14.0.0.1029 2014.rar is a software product developed by Computers and Structures, Inc. (CSI), a company that specializes in software tools for structural and earthquake engineering. Csi Safe stands for Slab Analysis by the Finite Element Method, and it is a software product that can perform structural analysis and design of reinforced concrete slabs and foundations. The software product can handle slabs and foundations of any shape and size, with different types of loading and boundary conditions, using advanced finite element and cracked-section methods. The software product can also perform reinforced concrete design of slabs and foundations using different types of design codes, design strips, reinforcement options, design criteria, and design outputs. The software product can integrate with other CSI products, such as SAP2000, ETABS, CSiBridge, PERFORM-3D, etc., for seamless data exchange and analysis. The software product can also export and import data in various formats, such as DXF, DWG, CIS/2, IFC, etc., for compatibility with other software products. The software product has a user-friendly graphical user interface (GUI) that allows users to create and modify models of slabs and foundations easily and intuitively. The software product also has a comprehensive online help system that provides users with detailed information and guidance on how to use the software product effectively.

              -

              How to download and install Csi Safe v14.0.0.1029 2014.rar?

              -

              If you want to download and install Csi Safe v14.0.0.1029 2014.rar on your computer, you need to follow these steps:

              -
                -
              1. Go to the official website of CSI at https://www.csiamerica.com/ and click on the Products tab.
              2. -
              3. Select Csi Safe from the list of products and click on the Download button.
              4. -
              5. You will be redirected to a page where you need to fill in some information about yourself and your company, such as your name, email address, phone number, country, etc.
              6. -
              7. After filling in the information, click on the Submit button.
              8. -
              9. You will receive an email from CSI with a link to download the software product.
              10. -
              11. Click on the link in the email and save the file Csi_Safe_v14_0_0_1029_2014_rar.zip on your computer.
              12. -
              13. Extract the file Csi_Safe_v14_0_0_1029_2014_rar.zip using a program such as WinRAR or 7-Zip.
              14. -
              15. You will get a folder named Csi_Safe_v14_0_0_1029_2014_rar that contains the setup file Csi_Safe_v14_0_0_1029_2014_rar.exe.
              16. -
              17. Double-click on the setup file Csi_Safe_v14_0_0_1029_2014_rar.exe to start the installation process.
              18. -
              19. Follow the instructions on the screen to complete the installation process.
              20. -
              21. You will need to enter a license key to activate the software product. You can get a license key from CSI by contacting them at https://www.csiamerica.com/contact.
              22. -
              23. After entering the license key, you can start using Csi Safe v14.0.0.1029 2014 .rar on your computer.
              24. -
              -

              Here are some screenshots of the download and installation process:

              - Screenshot of the CSI website - Screenshot of the download page - Screenshot of the email from CSI - Screenshot of the extracted folder - Screenshot of the setup file - Screenshot of the installation process -

              How to use Csi Safe v14.0.0.1029 2014.rar?

              -

              After you have downloaded and installed Csi Safe v14.0.0.1029 2014.rar on your computer, you can start using it for different types of structural analysis and design problems involving reinforced concrete slabs and foundations. In this section, we will show you how to use the software product for some common tasks, such as modeling slabs and foundations, performing finite element analysis, performing cracked-section analysis, and performing reinforced concrete design. We will also provide you with some examples and tips to help you use the software product effectively.

              -

              How to model slabs and foundations in Csi Safe v14.0.0.1029 2014.rar?

              -

              One of the first steps in using Csi Safe v14.0.0.1029 2014.rar is to create and modify models of slabs and foundations that represent your structural system. You can model slabs and foundations of any shape and size, with different types of loading and boundary conditions, using the user-friendly graphical user interface (GUI) of the software product. Here are some steps to follow:

              -
                -
              1. Open Csi Safe v14.0.0.1029 2014.rar on your computer and click on the New Model button to create a new model.
              2. -
              3. You will see a blank model space where you can draw your slabs and foundations using different tools, such as lines, arcs, circles, polygons, etc.
              4. -
              5. You can also import your slabs and foundations from other software products, such as SAP2000, ETABS, CSiBridge, etc., by clicking on the Import button and selecting the appropriate file format.
              6. -
              7. You can modify your slabs and foundations by using different tools, such as move, copy, rotate, mirror, offset, trim, extend, etc.
              8. -
              9. You can assign different properties to your slabs and foundations, such as thickness, material, support conditions, etc., by clicking on the Assign button and selecting the appropriate option.
              10. -
              11. You can apply different types of loading to your slabs and foundations, such as dead load, live load, wind load, earthquake load, temperature load, etc., by clicking on the Load button and selecting the appropriate option.
              12. -
              13. You can view your slabs and foundations in different modes, such as plan view, elevation view, 3D view, etc., by clicking on the View button and selecting the appropriate option.
              14. -
              15. You can save your model by clicking on the Save button and giving a name to your file.
              16. -
              -

              Here are some screenshots of the modeling process:

              - Screenshot of the new model space - Screenshot of drawing a slab using lines - Screenshot of importing a slab from SAP2000 - Screenshot of modifying a slab using move tool - Screenshot of assigning thickness to a slab - Screenshot of applying dead load to a slab - Screenshot of viewing a slab in 3D mode -

              How to perform finite element analysis in Csi Safe v14.0.0.1029 2014.rar?

              -

              After you have modeled your slabs and foundations, you can perform finite element analysis to obtain the structural response of your system under different loading conditions. Finite element analysis is a numerical method that divides your system into small elements and nodes, and solves the equilibrium equations for each element and node, using different types of elements, meshing options, material properties, and analysis methods. Here are some steps to follow:

              -
                -
              1. Select the slabs and foundations that you want to analyze by clicking on the Select button and choosing the appropriate option.
              2. -
              3. Click on the Mesh button and choose the type of elements that you want to use for your analysis, such as plate elements, shell elements, solid elements, etc.
              4. -
              5. Click on the Mesh Options button and choose the meshing options that you want to use for your analysis, such as mesh size, mesh refinement, mesh smoothing, etc.
              6. -
              7. Click on the Material Properties button and assign the material properties that you want to use for your analysis, such as modulus of elasticity, Poisson's ratio, density, etc.
              8. -
              9. Click on the Analysis Methods button and choose the type of analysis method that you want to use for your analysis, such as linear static analysis, nonlinear static analysis, modal analysis, response spectrum analysis, etc.
              10. -
              11. Click on the Run Analysis button and wait for the software product to perform the finite element analysis of your system.
              12. -
              13. Click on the Display Results button and view the results of your analysis, such as displacements, stresses, strains, reactions, etc., in different modes, such as contour plots, vector plots, tables, graphs, etc.
              14. -
              -

              Here are some screenshots of the finite element analysis process:

              -

              - Screenshot of selecting a slab for analysis - Screenshot of choosing plate elements for analysis - Screenshot of choosing mesh options for analysis - Screenshot of assigning material properties for analysis - Screenshot of choosing linear static analysis method - Screenshot of running the analysis - Screenshot of displaying the results -

              How to perform cracked-section analysis in Csi Safe v14.0.0.1029 2014.rar?

              -

              One of the advanced features of Csi Safe v14.0.0.1029 2014.rar is that it can perform cracked-section analysis of slabs and foundations under different loading conditions. Cracked-section analysis is a method that considers the effects of cracking on the stiffness and strength of reinforced concrete sections, using different types of crack models, crack parameters, crack patterns, and crack effects. Here are some steps to follow:

              -
                -
              1. Select the slabs and foundations that you want to analyze by clicking on the Select button and choosing the appropriate option.
              2. -
              3. Click on the Cracked-Section Analysis button and choose the type of crack model that you want to use for your analysis, such as smeared crack model, discrete crack model, etc.
              4. -
              5. Click on the Crack Parameters button and assign the crack parameters that you want to use for your analysis, such as crack width, crack spacing, crack orientation, etc.
              6. -
              7. Click on the Crack Patterns button and choose the type of crack pattern that you want to use for your analysis, such as uniform crack pattern, random crack pattern, user-defined crack pattern, etc.
              8. -
              9. Click on the Crack Effects button and choose the type of crack effect that you want to use for your analysis, such as stiffness reduction, stress redistribution, tension stiffening, etc.
              10. -
              11. Click on the Run Analysis button and wait for the software product to perform the cracked-section analysis of your system.
              12. -
              13. Click on the Display Results button and view the results of your analysis, such as cracked-section properties, cracked-section stresses, cracked-section strains, etc., in different modes, such as contour plots, vector plots, tables, graphs, etc.
              14. -
              -

              Here are some screenshots of the cracked-section analysis process:

              - Screenshot of selecting a slab for analysis - Screenshot of choosing smeared crack model for analysis - Screenshot of assigning crack parameters for analysis - Screenshot of choosing uniform crack pattern for analysis - Screenshot of choosing stiffness reduction for analysis - Screenshot of running the analysis - Screenshot of displaying the results -

              How to perform reinforced concrete design in Csi Safe v14.0.0.1029 2014.rar?

              -

              Another advanced feature of Csi Safe v14.0.0.1029 2014.rar is that it can perform reinforced concrete design of slabs and foundations using different types of design codes, design strips, reinforcement options, design criteria, and design outputs. Reinforced concrete design is a method that determines the required amount and arrangement of reinforcement for slabs and foundations to resist the applied loads and satisfy the serviceability and durability requirements. Here are some steps to follow:

              -
                -
              1. Select the slabs and foundations that you want to design by clicking on the Select button and choosing the appropriate option.
              2. -
              3. Click on the Design Code button and choose the type of design code that you want to use for your design, such as ACI 318, BS 8110, Eurocode 2, etc.
              4. -
              5. Click on the Design Strips button and define the design strips that you want to use for your design, such as longitudinal strips, transverse strips, etc.
              6. -
              7. Click on the Reinforcement Options button and choose the type of reinforcement options that you want to use for your design, such as top reinforcement, bottom reinforcement, shear reinforcement, etc.
              8. -
              9. Click on the Design Criteria button and assign the design criteria that you want to use for your design, such as strength reduction factors, load combinations, deflection limits, crack control limits, etc.
              10. -
              11. Click on the Run Design button and wait for the software product to perform the reinforced concrete design of your system.
              12. -
              13. Click on the Display Results button and view the results of your design, such as required reinforcement area, provided reinforcement area, reinforcement ratio, utilization ratio, etc., in different modes, such as contour plots, vector plots, tables, graphs, etc.
              14. -
              -

              Here are some screenshots of the reinforced concrete design process:

              - Screenshot of selecting a slab for design - Screenshot of choosing ACI 318 for design code - Screenshot of defining longitudinal strips for design - Screenshot of choosing top and bottom reinforcement for design - Screenshot of assigning strength reduction factors for design - Screenshot of running the design - Screenshot of displaying the results -

              What are the advantages of Csi Safe v14.0.0.1029 2014.rar over other software products?

              -

              Csi Safe v14.0.0.1029 2014.rar is not the only software product that can perform structural analysis and design of reinforced concrete slabs and foundations. There are other software products in the market that can do similar tasks, such as RISA-2D, STAAD.Pro, ADAPT-Floor Pro, etc. However, Csi Safe v14.0.0.1029 2014.rar has some advantages over these software products that make it a better choice for engineers and designers. Here are some of the advantages of Csi Safe v14.0.0.1029 2014.rar:

              -
                -
              • Csi Safe v14.0.0.1029 2014.rar can handle slabs and foundations of any shape and size, with different types of loading and boundary conditions, using advanced finite element and cracked-section methods. Other software products may have limitations on the geometry, loading, and analysis methods of slabs and foundations.
              • -
              • Csi Safe v14.0.0.1029 2014.rar can perform reinforced concrete design of slabs and foundations using different types of design codes, design strips, reinforcement options, design criteria, and design outputs. Other software products may have limitations on the design codes, design strips, reinforcement options, design criteria, and design outputs of slabs and foundations.
              • -
              • Csi Safe v14.0.0.1029 2014.rar can integrate with other CSI products, such as SAP2000, ETABS, CSiBridge, PERFORM-3D, etc., for seamless data exchange and analysis. Other software products may not have such integration capabilities with other software products.
              • -
              • Csi Safe v14.0.0.1029 2014.rar can export and import data in various formats, such as DXF, DWG, CIS/2, IFC, etc., for compatibility with other software products. Other software products may not have such export and import capabilities with other software products.
              • -
              • Csi Safe v14.0.0.1029 2014.rar has a user-friendly graphical user interface (GUI) that allows users to create and modify models of slabs and foundations easily and intuitively. Other software products may have a complex or outdated user interface that makes it difficult for users to create and modify models of slabs and foundations.
              • -
              • Csi Safe v14 .0.0.1029 2014.rar has a comprehensive online help system that provides users with detailed information and guidance on how to use the software product effectively. Other software products may have a poor or incomplete online help system that leaves users confused and frustrated.
              • -
              -

              These are some of the advantages of Csi Safe v14.0.0.1029 2014.rar over other software products that make it a better choice for engineers and designers who work with reinforced concrete slabs and foundations.

              -

              What are the limitations and challenges of Csi Safe v14.0.0.1029 2014.rar?

              -

              Although Csi Safe v14.0.0.1029 2014.rar is a powerful and versatile software product that can perform structural analysis and design of reinforced concrete slabs and foundations, it is not perfect and flawless. There are some limitations and challenges of Csi Safe v14.0.0.1029 2014.rar that users should be aware of and overcome. Here are some of the limitations and challenges of Csi Safe v14.0.0.1029 2014.rar:

              -
                -
              • Csi Safe v14.0.0.1029 2014.rar requires a license key to activate the software product, which can be obtained from CSI by contacting them at https://www.csiamerica.com/contact. Users who do not have a license key cannot use the software product, or can only use it in a limited mode.
              • -
              • Csi Safe v14.0.0.1029 2014.rar has some system requirements that users need to meet in order to run the software product smoothly and efficiently, such as operating system, processor, memory, disk space, graphics card, etc. Users who do not meet these system requirements may experience slow performance, errors, or crashes of the software product.
              • -
              • Csi Safe v14.0.0.1029 2014.rar may have some compatibility issues with other software products, such as different versions, formats, standards, etc. Users who want to exchange data with other software products may encounter difficulties or errors in importing or exporting data.
              • -
              • Csi Safe v14.0.0.1029 2014.rar may have some accuracy issues with the finite element and cracked-section methods that it uses for structural analysis and design of slabs and foundations, such as convergence problems, mesh sensitivity problems, numerical instability problems, etc. Users who want to obtain accurate and reliable results may need to adjust the analysis parameters, such as element type, mesh size, crack model, crack parameter, etc., or verify the results with other methods or software products.
              • -
              • Csi Safe v14 .0.0.1029 2014.rar may have some user interface issues with the graphical user interface (GUI) that it uses for creating and modifying models of slabs and foundations, such as unclear icons, confusing menus, inconsistent commands, etc. Users who want to use the software product easily and intuitively may need to consult the online help system, watch the tutorial videos, or contact the technical support for assistance.
              • -
              -

              These are some of the limitations and challenges of Csi Safe v14.0.0.1029 2014.rar that users should be aware of and overcome.

              -

              Conclusion

              -

              In conclusion, Csi Safe v14.0.0.1029 2014.rar is a comprehensive software product that can perform structural analysis and design of reinforced concrete slabs and foundations. The software product can handle slabs and foundations of any shape and size, with different types of loading and boundary conditions, using advanced finite element and cracked-section methods. The software product can also perform reinforced concrete design of slabs and foundations using different types of design codes, design strips, reinforcement options, design criteria, and design outputs. The software product can integrate with other CSI products, such as SAP2000, ETABS, CSiBridge, PERFORM-3D, etc., for seamless data exchange and analysis. The software product can also export and import data in various formats, such as DXF, DWG, CIS/2, IFC, etc., for compatibility with other software products. The software product has a user-friendly graphical user interface (GUI) that allows users to create and modify models of slabs and foundations easily and intuitively. The software product also has a comprehensive online help system that provides users with detailed information and guidance on how to use the software product effectively.

              -

              Csi Safe v14.0.0.1029 2014.rar is a better choice for engineers and designers who work with reinforced concrete slabs and foundations than other software products in the market, because it has some advantages over them, such as handling complex geometry, loading, and analysis methods; performing various design codes, design strips, reinforcement options, design criteria, and design outputs; integrating with other CSI products; exporting and importing data in various formats; having a user-friendly graphical user interface; and having a comprehensive online help system.

              -

              However, Csi Safe v14.0.0.1029 2014.rar is not perfect and flawless, and it has some limitations and challenges that users should be aware of and overcome, such as requiring a license key; having some system requirements; having some compatibility issues; having some accuracy issues; and having some user interface issues.

              -

              We hope that this article has given you a clear and comprehensive overview of what Csi Safe v14.0.0.1029 2014.rar is, what it does, and why it is useful for your projects. We also hope that this article has shown you how to download and install the software product from a reliable source, how to use it for different types of problems, and how to compare it with other software products in the market. We also hope that this article has discussed the advantages and limitations of Csi Safe v14.0.0.1029 2014.rar, and answered some frequently asked questions about it.

              -

              FAQs

              -

              Here are some frequently asked questions about Csi Safe v14.0.0.1029 2014.rar, with concise answers:

              -
                -
              1. Q: How much does Csi Safe v14.0.0.1029 2014.rar cost?
              2. -
              3. A: Csi Safe v14.0.0.1029 2014.rar is not a free software product, and it requires a license key to activate it. The cost of the license key depends on the type of license, such as standalone, network, academic, etc., and the duration of the license, such as monthly, yearly, perpetual, etc. You can contact CSI at https://www.csiamerica.com/contact to get a quote for the license key.
              4. -
              5. Q: How can I get technical support for Csi Safe v14.0.0.1029 2014.rar?
              6. -
              7. A: You can get technical support for Csi Safe v14.0.0.1029 2014.rar by contacting CSI at https://www.csiamerica.com/support. You can also access the online help system of the software product by clicking on the Help button and choosing the appropriate option.
              8. -
              9. Q: How can I learn more about Csi Safe v14.0.0.1029 2014.rar?
              10. -
              11. A: You can learn more about Csi Safe v14.0.0.1029 2014.rar by visiting the official website of CSI at https://www.csiamerica.com/products/safe. You can also watch the tutorial videos of the software product by clicking on the Help button and choosing the Video Tutorials option.
              12. -
              13. Q: How can I update Csi Safe v14.0.0.1029 2014.rar to the latest version?
              14. -
              15. A: You can update Csi Safe v14.0.0.1029 2014.rar to the latest version by clicking on the Help button and choosing the Check for Updates option. You can also download the latest version of the software product from the official website of CSI at https://www.csiamerica.com/products/safe/downloads.
              16. -
              17. Q: How can I uninstall Csi Safe v14 .0.0.1029 2014.rar from my computer?
              18. -
              19. A: You can uninstall Csi Safe v14.0.0.1029 2014.rar from your computer by clicking on the Start button and choosing the Control Panel option. Then, click on the Programs and Features option and find Csi Safe v14.0.0.1029 2014.rar from the list of installed programs. Then, click on the Uninstall button and follow the instructions on the screen to complete the uninstallation process.
              20. -

              b2dd77e56b
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Examen De Fin De Formation Tsge Avec Solution Pdf.md b/spaces/stomexserde/gpt4-ui/Examples/Examen De Fin De Formation Tsge Avec Solution Pdf.md deleted file mode 100644 index fca6c3d0d80ece6917b31fc74d07b06fc6ea2b9b..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Examen De Fin De Formation Tsge Avec Solution Pdf.md +++ /dev/null @@ -1,30 +0,0 @@ -
              -I'll try to write that. -Here is what I wrote: - -

              Examen De Fin De Formation Tsge Avec Solution Pdf

              -

              Si vous êtes un étudiant en Technicien Spécialisé en Gestion des Entreprises (TSGE) à l'OFPPT, vous avez sûrement besoin de réviser vos examens de fin de formation (EFF) avec des corrigés. Ces examens sont importants pour valider votre diplôme et tester vos compétences en gestion des entreprises.

              -

              Heureusement, il existe des sites web qui vous proposent des examens de fin de formation TSGE avec solution pdf à télécharger gratuitement. Ces examens couvrent les différentes années de formation, de 2011 à 2020, et les différentes variantes (V1 et V2). Vous pouvez ainsi vous entraîner avec des épreuves réelles et des corrigés détaillés.

              -

              Examen De Fin De Formation Tsge Avec Solution Pdf


              Download 🗸 https://urlgoal.com/2uI8Vj



              -

              Voici quelques exemples de sites web qui vous offrent des examens de fin de formation TSGE avec solution pdf:

              -
                -
              • Taalime.ma: Ce site vous propose des examens de fin de formation TSGE en gestion des entreprises avec correction ofppt. Vous pouvez télécharger les examens et les corrigés en pdf pour les années 2011 à 2020[^1^].
              • -
              • FSJES OFPPT COURS: Ce site vous propose la correction proposée d'examen national de fin de formation TSGE pour la session juin 2020. Vous pouvez consulter les énoncés et les corrigés en ligne ou les télécharger en pdf[^2^].
              • -
              • Studocu: Ce site vous permet d'accéder à des documents universitaires partagés par d'autres étudiants. Vous pouvez y trouver l'examen de fin de formation TSGE 2020 V1 EFF et le télécharger en pdf[^3^].
              • -
              • Ecopro-edu: Ce site vous offre des examens corrigés fin formation TSGE pour les années 2011 à 2018. Vous pouvez télécharger les examens et les corrigés en pdf ou les consulter en ligne[^4^].
              • -
              -

              En résumé, il existe plusieurs sources en ligne qui vous permettent de réviser vos examens de fin de formation TSGE avec solution pdf. Nous vous conseillons de profiter de ces ressources pour vous préparer efficacement à vos épreuves et réussir votre diplôme.

              I'll try to write that. -Here is what I wrote: - -

              Comment se préparer aux examens de fin de formation TSGE?

              -

              Pour réussir vos examens de fin de formation TSGE, il ne suffit pas de télécharger des examens avec solution pdf. Il faut aussi les étudier attentivement et les comprendre. Voici quelques conseils pour vous aider à vous préparer efficacement:

              -

              -
                -
              • Revoyez vos cours et vos modules: Les examens de fin de formation TSGE portent sur les différents aspects de la gestion des entreprises, tels que la comptabilité, le marketing, la finance, le droit, etc. Il est donc essentiel de maîtriser les notions théoriques et pratiques enseignées durant votre formation. Relisez vos cours et vos modules, faites des fiches de révision, révisez les formules et les définitions importantes.
              • -
              • Faites des exercices et des cas pratiques: Les examens de fin de formation TSGE ne se limitent pas à des questions de cours. Ils comportent aussi des exercices et des cas pratiques qui vous demandent d'appliquer vos connaissances à des situations concrètes. Pour vous entraîner, faites des exercices et des cas pratiques tirés de vos cours, de vos travaux dirigés ou des examens avec solution pdf. Corrigez vos erreurs et vérifiez vos résultats.
              • -
              • Entraînez-vous avec des examens blancs: Pour vous mettre en condition réelle d'examen, il est recommandé de faire des examens blancs. Choisissez un examen avec solution pdf que vous n'avez pas encore fait, imprimez-le ou affichez-le sur votre écran, et répondez aux questions dans le temps imparti. Respectez les consignes, ne trichez pas et ne regardez pas la correction avant d'avoir terminé. Ensuite, corrigez votre copie et notez-vous selon le barème indiqué.
              • -
              • Gérez votre temps et votre stress: Les examens de fin de formation TSGE durent généralement entre 2 et 3 heures. Il faut donc savoir gérer votre temps et votre stress pour ne pas perdre vos moyens. Avant l'examen, repérez le nombre de questions et le nombre de points attribués à chaque question. Répartissez votre temps en fonction du coefficient de chaque question. Commencez par les questions les plus faciles ou les plus rapides à traiter. Ne restez pas bloqué sur une question trop longtemps. Si vous ne savez pas répondre à une question, passez à la suivante et revenez-y plus tard si vous avez le temps. Pendant l'examen, respirez calmement, buvez de l'eau, relisez-vous et vérifiez vos calculs.
              • -
              -

              En suivant ces conseils, vous augmenterez vos chances de réussir vos examens de fin de formation TSGE avec brio. Bon courage!

              7196e7f11a
              -
              -
              \ No newline at end of file diff --git a/spaces/stratussox/yolov5_inference/benchmarks.py b/spaces/stratussox/yolov5_inference/benchmarks.py deleted file mode 100644 index 03d7d693a93674bb9a94db07f24a8e9ef7013f8f..0000000000000000000000000000000000000000 --- a/spaces/stratussox/yolov5_inference/benchmarks.py +++ /dev/null @@ -1,169 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Run YOLOv5 benchmarks on all supported export formats - -Format | `export.py --include` | Model ---- | --- | --- -PyTorch | - | yolov5s.pt -TorchScript | `torchscript` | yolov5s.torchscript -ONNX | `onnx` | yolov5s.onnx -OpenVINO | `openvino` | yolov5s_openvino_model/ -TensorRT | `engine` | yolov5s.engine -CoreML | `coreml` | yolov5s.mlmodel -TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ -TensorFlow GraphDef | `pb` | yolov5s.pb -TensorFlow Lite | `tflite` | yolov5s.tflite -TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite -TensorFlow.js | `tfjs` | yolov5s_web_model/ - -Requirements: - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU - $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT - -Usage: - $ python benchmarks.py --weights yolov5s.pt --img 640 -""" - -import argparse -import platform -import sys -import time -from pathlib import Path - -import pandas as pd - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -# ROOT = ROOT.relative_to(Path.cwd()) # relative - -import export -from models.experimental import attempt_load -from models.yolo import SegmentationModel -from segment.val import run as val_seg -from utils import notebook_init -from utils.general import LOGGER, check_yaml, file_size, print_args -from utils.torch_utils import select_device -from val import run as val_det - - -def run( - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=640, # inference size (pixels) - batch_size=1, # batch size - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - half=False, # use FP16 half-precision inference - test=False, # test exports only - pt_only=False, # test PyTorch only - hard_fail=False, # throw error on benchmark failure -): - y, t = [], time.time() - device = select_device(device) - model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. - for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) - try: - assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported - assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML - if 'cpu' in device.type: - assert cpu, 'inference not supported on CPU' - if 'cuda' in device.type: - assert gpu, 'inference not supported on GPU' - - # Export - if f == '-': - w = weights # PyTorch format - else: - w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others - assert suffix in str(w), 'export failed' - - # Validate - if model_type == SegmentationModel: - result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) - metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) - else: # DetectionModel: - result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) - metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) - speed = result[2][1] # times (preprocess, inference, postprocess) - y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference - except Exception as e: - if hard_fail: - assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' - LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}') - y.append([name, None, None, None]) # mAP, t_inference - if pt_only and i == 0: - break # break after PyTorch - - # Print results - LOGGER.info('\n') - parse_opt() - notebook_init() # print system info - c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] - py = pd.DataFrame(y, columns=c) - LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') - LOGGER.info(str(py if map else py.iloc[:, :2])) - if hard_fail and isinstance(hard_fail, str): - metrics = py['mAP50-95'].array # values to compare to floor - floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n - assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}' - return py - - -def test( - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=640, # inference size (pixels) - batch_size=1, # batch size - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - half=False, # use FP16 half-precision inference - test=False, # test exports only - pt_only=False, # test PyTorch only - hard_fail=False, # throw error on benchmark failure -): - y, t = [], time.time() - device = select_device(device) - for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) - try: - w = weights if f == '-' else \ - export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights - assert suffix in str(w), 'export failed' - y.append([name, True]) - except Exception: - y.append([name, False]) # mAP, t_inference - - # Print results - LOGGER.info('\n') - parse_opt() - notebook_init() # print system info - py = pd.DataFrame(y, columns=['Format', 'Export']) - LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)') - LOGGER.info(str(py)) - return py - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--test', action='store_true', help='test exports only') - parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') - parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric') - opt = parser.parse_args() - opt.data = check_yaml(opt.data) # check YAML - print_args(vars(opt)) - return opt - - -def main(opt): - test(**vars(opt)) if opt.test else run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Call Of Duty 4 Modern Warfare Keygen !!TOP!!.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Call Of Duty 4 Modern Warfare Keygen !!TOP!!.md deleted file mode 100644 index c735e81e63d620b49c49547fa8eb38db054bf01c..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Call Of Duty 4 Modern Warfare Keygen !!TOP!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

              call of duty 4 modern warfare keygen


              Download File ->>> https://cinurl.com/2uEXqp



              -
              -Call Of Duty 4 Modern Warfare CD Key Generator (Xbox – PS3 – Microsoft Windows – Mac) We have this free game for you in form of Call... 1fdad05405
              -
              -
              -

              diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Scx 3200 Firmware.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Scx 3200 Firmware.md deleted file mode 100644 index 29bbc47a5e91201167782b6089e03c972dfafdeb..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Scx 3200 Firmware.md +++ /dev/null @@ -1,13 +0,0 @@ -

              Scx 3200 Firmware


              Download ★★★★★ https://cinurl.com/2uEXJC



              - -How to see the firmware version (Firmware version) Samsung SCX-3200, 3205, 3207, 3200W, 3205WPrintblog.com.ua. Firmware version: Samsung SCX-3205W Printers, How To. -How to Firmware Version. -Download firmware and drivers for your Samsung SCX-3200 printer from our website. -Download the latest driver, firmware and software for your Samsung SCX-3200 Series MFP. -Drivers and software for your MFP product. -In this video I will tell you how you can determine the firmware version of your printer. -Instructions on how to flash your Samsung SCX-3200 printer. -In order to flash your Samsung SCX-3200 or SCX-3205W, you need to. 8a78ff9644
              -
              -
              -

              diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Soal Dan Jawaban Seni Budaya Sma Kelas Xi Semester 1.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Soal Dan Jawaban Seni Budaya Sma Kelas Xi Semester 1.md deleted file mode 100644 index 0558b10180955a326f034e87e8d4aa5159eca7b8..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Soal Dan Jawaban Seni Budaya Sma Kelas Xi Semester 1.md +++ /dev/null @@ -1,6 +0,0 @@ -

              soal dan jawaban seni budaya sma kelas xi semester 1


              Downloadhttps://cinurl.com/2uEXN5



              -
              -142 Kunci Jawaban Soal-Soal Tantangan .. Pembelajaran matematika ... Buku Matematika SMA Kelas 1 Terbitan Grafindo.. Soft Cover. Kelas. ... LKS SENI BUDAYA SMA/ MA KELAS X 10 SEMESTER 2 | Viva Pakarindo ... PAI SMA/ MA KELAS XI 11 SEMESTER 2 | Viva Pakarindo. Rp7.199. 83 terjual. 1fdad05405
              -
              -
              -

              diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/The Engineering Of Chemical Reactions L D Schmidt Solution __TOP__.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/The Engineering Of Chemical Reactions L D Schmidt Solution __TOP__.md deleted file mode 100644 index 71cf78f23c2407027203aeb4cf0e997ed5b52fed..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/The Engineering Of Chemical Reactions L D Schmidt Solution __TOP__.md +++ /dev/null @@ -1,129 +0,0 @@ - -

              The Engineering Of Chemical Reactions L D Schmidt Solution: A Valuable Resource for Chemical Engineers

              - -

              Chemical engineering is a branch of engineering that deals with the design, operation, and optimization of chemical processes and reactors. Chemical engineers need to have a solid understanding of the principles and applications of chemical reactions, such as kinetics, thermodynamics, catalysis, mass transfer, heat transfer, and fluid dynamics.

              - -

              One of the most popular and comprehensive textbooks on chemical reaction engineering is The Engineering of Chemical Reactions by Lanny D. Schmidt. This book covers both the fundamentals and the advanced topics of chemical reaction engineering, such as reactor design, multiphase reactors, catalytic reactors, environmental reactors, biological reactors, and microreactors. The book also includes numerous examples, problems, and case studies to illustrate the concepts and applications of chemical reaction engineering.

              -

              The Engineering Of Chemical Reactions L D Schmidt Solution


              Download Zip ——— https://cinurl.com/2uEXtz



              - -

              However, learning chemical reaction engineering can be challenging and requires a lot of practice and problem-solving skills. That's why many students and instructors rely on the Instructor's Solutions Manual for the Engineering of Chemical Reactions by Lanny D. Schmidt. This manual provides detailed solutions to all the problems in the textbook, as well as additional problems and exercises for further practice and learning.

              - -

              What is The Engineering Of Chemical Reactions L D Schmidt Solution?

              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a book that contains the solutions to all the problems in the textbook The Engineering of Chemical Reactions by Lanny D. Schmidt. The book is intended for instructors who use the textbook in their courses, but it can also be useful for students who want to check their answers or learn from the solutions.

              - -

              The book is divided into 14 chapters that correspond to the chapters in the textbook. Each chapter contains the solutions to all the problems in that chapter, as well as some additional problems that are not in the textbook. The solutions are explained step by step, with clear calculations, diagrams, graphs, and references. The book also includes appendices that contain useful data and formulas for chemical reaction engineering.

              - -

              The book is written by Lanny D. Schmidt himself, who is a professor emeritus of chemical engineering and materials science at the University of Minnesota. He has over 40 years of experience in teaching and research in chemical reaction engineering, and has authored or co-authored over 300 publications and 10 books on the subject.

              - -

              Why do you need The Engineering Of Chemical Reactions L D Schmidt Solution?

              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a valuable resource for anyone who wants to learn or teach chemical reaction engineering. With this book, you can:

              - -
                -
              • Enhance your understanding of the concepts and applications of chemical reaction engineering.
              • -
              • Improve your problem-solving skills and confidence in chemical reaction engineering.
              • -
              • Prepare for exams and assignments in chemical reaction engineering courses.
              • -
              • Explore more topics and challenges in chemical reaction engineering.
              • -
              • Stay updated with the latest developments and trends in chemical reaction engineering.
              • -
              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is not only a solution manual, but also a learning tool that can help you master chemical reaction engineering.

              - -

              How to get The Engineering Of Chemical Reactions L D Schmidt Solution?

              - -

              To get The Engineering Of Chemical Reactions L D Schmidt Solution, you need to follow these steps:

              -

              - -
                -
              1. Download the torrent file from a reliable source.
              2. -
              3. Open the torrent file with a torrent client program (such as uTorrent or BitTorrent).
              4. -
              5. Select the destination folder where you want to save the book files.
              6. -
              7. Wait for the download to complete.
              8. -
              9. Open the book files with a PDF reader program (such as Adobe Acrobat Reader).
              10. -
              - -

              Congratulations! You have successfully downloaded The Engineering Of Chemical Reactions L D Schmidt Solution on your computer. Now you can start using it to learn or teach chemical reaction engineering.

              - -

              Conclusion

              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a book that contains the solutions to all the problems in the textbook The Engineering of Chemical Reactions by Lanny D. Schmidt. The book is intended for instructors who use the textbook in their courses, but it can also be useful for students who want to check their answers or learn from the solutions.

              - -

              The book covers both the fundamentals and the advanced topics of chemical reaction engineering, such as reactor design, multiphase reactors, catalytic reactors, environmental reactors, biological reactors, and microreactors. The book also includes numerous examples, problems, and case studies to illustrate the concepts and applications of chemical reaction engineering.

              - -

              The book is written by Lanny D. Schmidt himself, who is a professor emeritus of chemical engineering and materials science at the University of Minnesota. He has over 40 years of experience in teaching and research in chemical reaction engineering, and has authored or co-authored over 300 publications and 10 books on the subject.

              - -

              If you want to learn or teach chemical reaction engineering effectively and efficiently, you need The Engineering Of Chemical Reactions L D Schmidt Solution - a valuable resource for chemical engineers.

              -

              What are the advantages of The Engineering Of Chemical Reactions L D Schmidt Solution?

              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a book that offers many advantages for instructors and students of chemical reaction engineering. Some of these advantages are:

              - -
                -
              • It provides complete and accurate solutions to all the problems in the textbook, as well as additional problems for extra practice and learning.
              • -
              • It helps instructors to prepare lectures, assignments, quizzes, and exams for their courses.
              • -
              • It helps students to check their answers, learn from their mistakes, and improve their understanding of the concepts and applications of chemical reaction engineering.
              • -
              • It covers a wide range of topics and challenges in chemical reaction engineering, such as reactor design, multiphase reactors, catalytic reactors, environmental reactors, biological reactors, and microreactors.
              • -
              • It reflects the latest developments and trends in chemical reaction engineering, such as materials processing, pharmaceuticals, foods, and renewable energy.
              • -
              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a book that offers many advantages for instructors and students of chemical reaction engineering.

              - -

              How to use The Engineering Of Chemical Reactions L D Schmidt Solution?

              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a book that is easy to use and understand. To use this book, you need to follow these steps:

              - -
                -
              1. Read the corresponding chapter in the textbook The Engineering of Chemical Reactions by Lanny D. Schmidt.
              2. -
              3. Try to solve the problems in the chapter by yourself, using the concepts and methods explained in the textbook.
              4. -
              5. Compare your answers with the solutions in the book The Engineering Of Chemical Reactions L D Schmidt Solution.
              6. -
              7. If your answers are correct, move on to the next problem or chapter.
              8. -
              9. If your answers are incorrect or incomplete, study the solutions in the book The Engineering Of Chemical Reactions L D Schmidt Solution carefully, and try to understand where you went wrong and how to fix it.
              10. -
              11. Repeat the process until you master all the problems and topics in chemical reaction engineering.
              12. -
              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a book that is easy to use and understand.

              - -

              Conclusion

              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a book that contains the solutions to all the problems in the textbook The Engineering of Chemical Reactions by Lanny D. Schmidt. The book is intended for instructors who use the textbook in their courses, but it can also be useful for students who want to check their answers or learn from the solutions.

              - -

              The book covers both the fundamentals and the advanced topics of chemical reaction engineering, such as reactor design, multiphase reactors, catalytic reactors, environmental reactors, biological reactors, and microreactors. The book also includes numerous examples, problems, and case studies to illustrate the concepts and applications of chemical reaction engineering.

              - -

              The book is written by Lanny D. Schmidt himself, who is a professor emeritus of chemical engineering and materials science at the University of Minnesota. He has over 40 years of experience in teaching and research in chemical reaction engineering, and has authored or co-authored over 300 publications and 10 books on the subject.

              - -

              If you want to learn or teach chemical reaction engineering effectively and efficiently, you need The Engineering Of Chemical Reactions L D Schmidt Solution - a valuable resource for chemical engineers.

              -

              What are the reviews of The Engineering Of Chemical Reactions L D Schmidt Solution?

              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a book that has received positive feedback from both instructors and students of chemical reaction engineering. Some of the reviews are:

              - -
                -
              • "This book is a great companion to the textbook by Schmidt. It provides clear and detailed solutions to all the problems in the textbook, as well as some extra problems for more practice. It helps me to understand the concepts and applications of chemical reaction engineering better. I highly recommend it to anyone who wants to learn or teach chemical reaction engineering." - Student
              • -
              • "This book is a valuable resource for instructors who use the textbook by Schmidt in their courses. It saves me a lot of time and effort in preparing lectures, assignments, quizzes, and exams. It also helps me to explain the solutions to the students and answer their questions. It covers a wide range of topics and challenges in chemical reaction engineering, and reflects the latest developments and trends in the field. It is a must-have for any instructor of chemical reaction engineering." - Instructor
              • -
              • "This book is a useful tool for students who want to check their answers or learn from the solutions. It provides complete and accurate solutions to all the problems in the textbook, as well as some additional problems for extra practice and learning. It helps me to improve my problem-solving skills and confidence in chemical reaction engineering. It also helps me to prepare for exams and assignments in chemical reaction engineering courses. It is a great supplement to the textbook by Schmidt." - Student
              • -
              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a book that has received positive feedback from both instructors and students of chemical reaction engineering.

              - -

              What are the alternatives to The Engineering Of Chemical Reactions L D Schmidt Solution?

              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a book that is designed to accompany the textbook The Engineering of Chemical Reactions by Lanny D. Schmidt. However, there are some other books that can also be used as alternatives or supplements to this book. Some of these books are:

              - -
                -
              • Elements of Chemical Reaction Engineering by H. Scott Fogler: This book is another popular and comprehensive textbook on chemical reaction engineering. It covers both the fundamentals and the advanced topics of chemical reaction engineering, such as mole balances, rate laws, stoichiometry, multiple reactions, energy balances, conversion and reactor sizing, collection and analysis of rate data, multiple reactions in ideal reactors, nonideal flow models, residence time distributions, nonisothermal reactor design, catalysis and catalytic reactors, external diffusion effects on heterogeneous reactions, diffusion and reaction in porous catalysts, multiphase reactor design, biochemical reactors, polymerization reactors, unsteady-state nonisothermal reactor design, membrane reactors, microreactors, etc. The book also includes numerous examples, problems, case studies, interactive computer modules (POLYMATH), CD-ROMs (COMSOL Multiphysics), web modules (WileyPLUS), etc.
              • -
              • Chemical Reaction Engineering by Octave Levenspiel: This book is another classic and widely used textbook on chemical reaction engineering. It covers both the fundamentals and the advanced topics of chemical reaction engineering, such as kinetics of homogeneous reactions, interpretation of batch reactor data, introduction to reactor design (ideal reactors), design for single reactions (size reactors either for maximum profit or minimum cost), multiple reactions (selectivity considerations), temperature effects (heat effects), heat transfer (design reactors with heat exchange), mass transfer (mass transfer with chemical reaction), heterogeneous reactions (solid catalyzed reactions), fluid-fluid reactions (gas-liquid reactions), fluid-solid reactions (noncatalytic heterogeneous reactions), biochemical reactions (enzyme kinetics), etc. The book also includes numerous examples, problems, case studies, etc.
              • -
              • Chemical Reactor Analysis and Design Fundamentals by James B. Rawlings and John G. Ekerdt: This book is another modern and comprehensive textbook on chemical reaction engineering. It covers both the fundamentals and the advanced topics of chemical reaction engineering, such as mole balances on batch processes (batch reactor design equations), mole balances on continuous processes (ideal reactor models), rate laws (reaction kinetics), stoichiometry (reaction stoichiometry), energy balances (energy balance equations), parameter estimation (regression analysis), multiple steady states (bifurcation analysis), multiple reactions (reaction network analysis), nonideal reactors (dispersion model), heterogeneous catalysis (surface chemistry), diffusion limitations in heterogeneous catalysis (effectiveness factors), fluid-fluid reactions with microorganisms (bioreactor design equations), etc. The book also includes numerous examples, -problems, case studies, MATLAB codes (FEMLAB), etc.
              • -
              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a book that is designed to accompany the textbook The Engineering of Chemical Reactions by Lanny D. Schmidt. However, there are some other books that can also be used as alternatives or supplements to this book.

              -

              Conclusion

              - -

              The Engineering Of Chemical Reactions L D Schmidt Solution is a book that contains the solutions to all the problems in the textbook The Engineering of Chemical Reactions by Lanny D. Schmidt. The book is intended for instructors who use the textbook in their courses, but it can also be useful for students who want to check their answers or learn from the solutions.

              - -

              The book covers both the fundamentals and the advanced topics of chemical reaction engineering, such as reactor design, multiphase reactors, catalytic reactors, environmental reactors, biological reactors, and microreactors. The book also includes numerous examples, problems, and case studies to illustrate the concepts and applications of chemical reaction engineering.

              - -

              The book is written by Lanny D. Schmidt himself, who is a professor emeritus of chemical engineering and materials science at the University of Minnesota. He has over 40 years of experience in teaching and research in chemical reaction engineering, and has authored or co-authored over 300 publications and 10 books on the subject.

              - -

              If you want to learn or teach chemical reaction engineering effectively and efficiently, you need The Engineering Of Chemical Reactions L D Schmidt Solution - a valuable resource for chemical engineers.

              3cee63e6c2
              -
              -
              \ No newline at end of file diff --git a/spaces/sussahoo/table_extraction/app.py b/spaces/sussahoo/table_extraction/app.py deleted file mode 100644 index 2d25aec18e0487637ada74e949c435f5620e3b2b..0000000000000000000000000000000000000000 --- a/spaces/sussahoo/table_extraction/app.py +++ /dev/null @@ -1,480 +0,0 @@ -from PIL import Image, ImageEnhance, ImageOps -import string -from collections import Counter -from itertools import tee, count -import pytesseract -from pytesseract import Output -import json -import pandas as pd - -# import matplotlib.pyplot as plt -import cv2 -import numpy as np -from transformers import DetrFeatureExtractor -from transformers import TableTransformerForObjectDetection -import torch -import gradio as gr -import pdf2image - - -def plot_results_detection( - model, image, prob, bboxes_scaled, delta_xmin, delta_ymin, delta_xmax, delta_ymax -): - plt.imshow(image) - ax = plt.gca() - - for p, (xmin, ymin, xmax, ymax) in zip(prob, bboxes_scaled.tolist()): - cl = p.argmax() - xmin, ymin, xmax, ymax = ( - xmin - delta_xmin, - ymin - delta_ymin, - xmax + delta_xmax, - ymax + delta_ymax, - ) - ax.add_patch( - plt.Rectangle( - (xmin, ymin), - xmax - xmin, - ymax - ymin, - fill=False, - color="red", - linewidth=3, - ) - ) - text = f"{model.config.id2label[cl.item()]}: {p[cl]:0.2f}" - ax.text( - xmin - 20, - ymin - 50, - text, - fontsize=10, - bbox=dict(facecolor="yellow", alpha=0.5), - ) - plt.axis("off") - - -def crop_tables(pil_img, prob, boxes, delta_xmin, delta_ymin, delta_xmax, delta_ymax): - """ - crop_tables and plot_results_detection must have same co-ord shifts because 1 only plots the other one updates co-ordinates - """ - cropped_img_list = [] - - for p, (xmin, ymin, xmax, ymax) in zip(prob, boxes.tolist()): - - xmin, ymin, xmax, ymax = ( - xmin - delta_xmin, - ymin - delta_ymin, - xmax + delta_xmax, - ymax + delta_ymax, - ) - cropped_img = pil_img.crop((xmin, ymin, xmax, ymax)) - cropped_img_list.append(cropped_img) - return cropped_img_list - - -def add_padding(pil_img, top, right, bottom, left, color=(255, 255, 255)): - """ - Image padding as part of TSR pre-processing to prevent missing table edges - """ - width, height = pil_img.size - new_width = width + right + left - new_height = height + top + bottom - result = Image.new(pil_img.mode, (new_width, new_height), color) - result.paste(pil_img, (left, top)) - return result - - -def table_detector(image, THRESHOLD_PROBA): - """ - Table detection using DEtect-object TRansformer pre-trained on 1 million tables - """ - - feature_extractor = DetrFeatureExtractor(do_resize=True, size=800, max_size=800) - encoding = feature_extractor(image, return_tensors="pt") - - model = TableTransformerForObjectDetection.from_pretrained( - "microsoft/table-transformer-detection" - ) - - with torch.no_grad(): - outputs = model(**encoding) - - probas = outputs.logits.softmax(-1)[0, :, :-1] - keep = probas.max(-1).values > THRESHOLD_PROBA - - target_sizes = torch.tensor(image.size[::-1]).unsqueeze(0) - postprocessed_outputs = feature_extractor.post_process(outputs, target_sizes) - bboxes_scaled = postprocessed_outputs[0]["boxes"][keep] - - return (model, probas[keep], bboxes_scaled) - - -def table_struct_recog(image, THRESHOLD_PROBA): - """ - Table structure recognition using DEtect-object TRansformer pre-trained on 1 million tables - """ - - feature_extractor = DetrFeatureExtractor(do_resize=True, size=1000, max_size=1000) - encoding = feature_extractor(image, return_tensors="pt") - - model = TableTransformerForObjectDetection.from_pretrained( - "microsoft/table-transformer-structure-recognition" - ) - with torch.no_grad(): - outputs = model(**encoding) - - probas = outputs.logits.softmax(-1)[0, :, :-1] - keep = probas.max(-1).values > THRESHOLD_PROBA - - target_sizes = torch.tensor(image.size[::-1]).unsqueeze(0) - postprocessed_outputs = feature_extractor.post_process(outputs, target_sizes) - bboxes_scaled = postprocessed_outputs[0]["boxes"][keep] - - return (model, probas[keep], bboxes_scaled) - - -def generate_structure( - model, pil_img, prob, boxes, expand_rowcol_bbox_top, expand_rowcol_bbox_bottom -): - colors = ["red", "blue", "green", "yellow", "orange", "violet"] - """ - Co-ordinates are adjusted here by 3 'pixels' - To plot table pillow image and the TSR bounding boxes on the table - """ - # plt.figure(figsize=(32,20)) - # plt.imshow(pil_img) - # ax = plt.gca() - rows = {} - cols = {} - idx = 0 - for p, (xmin, ymin, xmax, ymax) in zip(prob, boxes.tolist()): - - xmin, ymin, xmax, ymax = xmin, ymin, xmax, ymax - cl = p.argmax() - class_text = model.config.id2label[cl.item()] - text = f"{class_text}: {p[cl]:0.2f}" - # or (class_text == 'table column') - # if (class_text == 'table row') or (class_text =='table projected row header') or (class_text == 'table column'): - # ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,fill=False, color=colors[0], linewidth=2)) - # ax.text(xmin-10, ymin-10, text, fontsize=5, bbox=dict(facecolor='yellow', alpha=0.5)) - - if class_text == "table row": - rows["table row." + str(idx)] = ( - xmin, - ymin - expand_rowcol_bbox_top, - xmax, - ymax + expand_rowcol_bbox_bottom, - ) - if class_text == "table column": - cols["table column." + str(idx)] = ( - xmin, - ymin - expand_rowcol_bbox_top, - xmax, - ymax + expand_rowcol_bbox_bottom, - ) - - idx += 1 - - # plt.axis('on') - return rows, cols - - -def sort_table_featuresv2(rows: dict, cols: dict): - # Sometimes the header and first row overlap, and we need the header bbox not to have first row's bbox inside the headers bbox - rows_ = { - table_feature: (xmin, ymin, xmax, ymax) - for table_feature, (xmin, ymin, xmax, ymax) in sorted( - rows.items(), key=lambda tup: tup[1][1] - ) - } - cols_ = { - table_feature: (xmin, ymin, xmax, ymax) - for table_feature, (xmin, ymin, xmax, ymax) in sorted( - cols.items(), key=lambda tup: tup[1][0] - ) - } - - return rows_, cols_ - - -def individual_table_featuresv2(pil_img, rows: dict, cols: dict): - - for k, v in rows.items(): - xmin, ymin, xmax, ymax = v - cropped_img = pil_img.crop((xmin, ymin, xmax, ymax)) - rows[k] = xmin, ymin, xmax, ymax, cropped_img - - for k, v in cols.items(): - xmin, ymin, xmax, ymax = v - cropped_img = pil_img.crop((xmin, ymin, xmax, ymax)) - cols[k] = xmin, ymin, xmax, ymax, cropped_img - - return rows, cols - - -def object_to_cellsv2( - master_row: dict, - cols: dict, - expand_rowcol_bbox_top, - expand_rowcol_bbox_bottom, - padd_left, -): - """Removes redundant bbox for rows&columns and divides each row into cells from columns - Args: - Returns: - - """ - cells_img = {} - header_idx = 0 - row_idx = 0 - previous_xmax_col = 0 - new_cols = {} - new_master_row = {} - previous_ymin_row = 0 - new_cols = cols - new_master_row = master_row - ## Below 2 for loops remove redundant bounding boxes ### - # for k_col, v_col in cols.items(): - # xmin_col, _, xmax_col, _, col_img = v_col - # if (np.isclose(previous_xmax_col, xmax_col, atol=5)) or (xmin_col >= xmax_col): - # print('Found a column with double bbox') - # continue - # previous_xmax_col = xmax_col - # new_cols[k_col] = v_col - - # for k_row, v_row in master_row.items(): - # _, ymin_row, _, ymax_row, row_img = v_row - # if (np.isclose(previous_ymin_row, ymin_row, atol=5)) or (ymin_row >= ymax_row): - # print('Found a row with double bbox') - # continue - # previous_ymin_row = ymin_row - # new_master_row[k_row] = v_row - ###################################################### - for k_row, v_row in new_master_row.items(): - - _, _, _, _, row_img = v_row - xmax, ymax = row_img.size - xa, ya, xb, yb = 0, 0, 0, ymax - row_img_list = [] - # plt.imshow(row_img) - # st.pyplot() - for idx, kv in enumerate(new_cols.items()): - k_col, v_col = kv - xmin_col, _, xmax_col, _, col_img = v_col - xmin_col, xmax_col = xmin_col - padd_left - 10, xmax_col - padd_left - # plt.imshow(col_img) - # st.pyplot() - # xa + 3 : to remove borders on the left side of the cropped cell - # yb = 3: to remove row information from the above row of the cropped cell - # xb - 3: to remove borders on the right side of the cropped cell - xa = xmin_col - xb = xmax_col - if idx == 0: - xa = 0 - if idx == len(new_cols) - 1: - xb = xmax - xa, ya, xb, yb = xa, ya, xb, yb - - row_img_cropped = row_img.crop((xa, ya, xb, yb)) - row_img_list.append(row_img_cropped) - - cells_img[k_row + "." + str(row_idx)] = row_img_list - row_idx += 1 - - return cells_img, len(new_cols), len(new_master_row) - 1 - - -def pytess(cell_pil_img): - return " ".join( - pytesseract.image_to_data( - cell_pil_img, - output_type=Output.DICT, - config="-c tessedit_char_blacklist=œ˜â€œï¬â™Ã©œ¢!|”?«“¥ --psm 6 preserve_interword_spaces", - )["text"] - ).strip() - - -def uniquify(seq, suffs=count(1)): - """Make all the items unique by adding a suffix (1, 2, etc). - Credit: https://stackoverflow.com/questions/30650474/python-rename-duplicates-in-list-with-progressive-numbers-without-sorting-list - `seq` is mutable sequence of strings. - `suffs` is an optional alternative suffix iterable. - """ - not_unique = [k for k, v in Counter(seq).items() if v > 1] - - suff_gens = dict(zip(not_unique, tee(suffs, len(not_unique)))) - for idx, s in enumerate(seq): - try: - suffix = str(next(suff_gens[s])) - except KeyError: - continue - else: - seq[idx] += suffix - - return seq - - -def clean_dataframe(df): - """ - Remove irrelevant symbols that appear with tesseractOCR - """ - # df.columns = [col.replace('|', '') for col in df.columns] - - for col in df.columns: - - df[col] = df[col].str.replace("'", "", regex=True) - df[col] = df[col].str.replace('"', "", regex=True) - df[col] = df[col].str.replace("]", "", regex=True) - df[col] = df[col].str.replace("[", "", regex=True) - df[col] = df[col].str.replace("{", "", regex=True) - df[col] = df[col].str.replace("}", "", regex=True) - df[col] = df[col].str.replace("|", "", regex=True) - return df - - -def create_dataframe(cells_pytess_result: list, max_cols: int, max_rows: int, csv_path): - """Create dataframe using list of cell values of the table, also checks for valid header of dataframe - Args: - cells_pytess_result: list of strings, each element representing a cell in a table - max_cols, max_rows: number of columns and rows - Returns: - dataframe : final dataframe after all pre-processing - """ - - headers = cells_pytess_result[:max_cols] - new_headers = uniquify(headers, (f" {x!s}" for x in string.ascii_lowercase)) - counter = 0 - - cells_list = cells_pytess_result[max_cols:] - df = pd.DataFrame("", index=range(0, max_rows), columns=new_headers) - - cell_idx = 0 - for nrows in range(max_rows): - for ncols in range(max_cols): - df.iat[nrows, ncols] = str(cells_list[cell_idx]) - cell_idx += 1 - - ## To check if there are duplicate headers if result of uniquify+col == col - ## This check removes headers when all headers are empty or if median of header word count is less than 6 - for x, col in zip(string.ascii_lowercase, new_headers): - if f" {x!s}" == col: - counter += 1 - header_char_count = [len(col) for col in new_headers] - - # if (counter == len(new_headers)) or (statistics.median(header_char_count) < 6): - # st.write('woooot') - # df.columns = uniquify(df.iloc[0], (f' {x!s}' for x in string.ascii_lowercase)) - # df = df.iloc[1:,:] - - df = clean_dataframe(df) - # df.to_csv(csv_path) - - return df - -def postprocess_dataframes(result_tables): - """ - Normalize column names - """ - # df.columns = [col.replace('|', '') for col in df.columns] - res = {} - for idx, table_df in enumerate(result_tables): - result_df = pd.DataFrame() - for col in table_df.columns: - if col.lower().startswith("item"): - result_df["name"] = table_df[col].copy() - if ( - col.lower().startswith("total") - or col.lower().startswith("amount") - or col.lower().startswith("cost") - ): - result_df["amount"] = table_df[col].copy() - print(result_df.columns) - if len(result_df.columns) == 0: - result_df["name"] = table_df.iloc[:, 0].copy() - result_df["amount"] = table_df.iloc[:, 1].copy() - - result_df["cost_code"] = "" - res["Table1" + str(idx)] = result_df.to_json(orient="records") - return res - - -def process_image(image): - # if pdf: - # path_to_pdf = pdf.name - # # convert PDF to PIL images (one image by page) - # first_page=True # we want here only the first page as image - # if first_page: last_page = 1 - # else: last_page = None - # imgs = pdf2image.convert_from_path(path_to_pdf, last_page=last_page) - # image = imgs[0] - TD_THRESHOLD = 0.7 - TSR_THRESHOLD = 0.8 - padd_top = 100 - padd_left = 100 - padd_bottom = 100 - padd_right = 20 - delta_xmin = 0 - delta_ymin = 0 - delta_xmax = 0 - delta_ymax = 0 - expand_rowcol_bbox_top = 0 - expand_rowcol_bbox_bottom = 0 - - image = image.convert("RGB") - model, probas, bboxes_scaled = table_detector(image, THRESHOLD_PROBA=TD_THRESHOLD) - # plot_results_detection(model, image, probas, bboxes_scaled, delta_xmin, delta_ymin, delta_xmax, delta_ymax) - cropped_img_list = crop_tables( - image, probas, bboxes_scaled, delta_xmin, delta_ymin, delta_xmax, delta_ymax - ) - - result = [] - for idx, unpadded_table in enumerate(cropped_img_list): - table = add_padding( - unpadded_table, padd_top, padd_right, padd_bottom, padd_left - ) - model, probas, bboxes_scaled = table_struct_recog( - table, THRESHOLD_PROBA=TSR_THRESHOLD - ) - rows, cols = generate_structure( - model, - table, - probas, - bboxes_scaled, - expand_rowcol_bbox_top, - expand_rowcol_bbox_bottom, - ) - rows, cols = sort_table_featuresv2(rows, cols) - master_row, cols = individual_table_featuresv2(table, rows, cols) - cells_img, max_cols, max_rows = object_to_cellsv2( - master_row, - cols, - expand_rowcol_bbox_top, - expand_rowcol_bbox_bottom, - padd_left, - ) - sequential_cell_img_list = [] - for k, img_list in cells_img.items(): - for img in img_list: - sequential_cell_img_list.append(pytess(img)) - - csv_path = "/content/sample_data/table_" + str(idx) - df = create_dataframe(sequential_cell_img_list, max_cols, max_rows, csv_path) - result.append(df) - output = postprocess_dataframes(result) - return output - - -title = "Interactive demo OCR: microsoft - table-transformer-detection + tesseract" -description = "Demo for microsoft - table-transformer-detection + tesseract" -article = "

              " -examples = [["image_0.png"]] - -iface = gr.Interface( - fn=process_image, - inputs=gr.Image(type="pil"), - outputs="text", - title=title, - description=description, - article=article, - examples=examples, -) -iface.launch(debug=False) \ No newline at end of file diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/openpose/util.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/openpose/util.py deleted file mode 100644 index 6f91ae0e65abaf0cbd62d803f56498991141e61b..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/openpose/util.py +++ /dev/null @@ -1,164 +0,0 @@ -import math -import numpy as np -import matplotlib -import cv2 - - -def padRightDownCorner(img, stride, padValue): - h = img.shape[0] - w = img.shape[1] - - pad = 4 * [None] - pad[0] = 0 # up - pad[1] = 0 # left - pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down - pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right - - img_padded = img - pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1)) - img_padded = np.concatenate((pad_up, img_padded), axis=0) - pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1)) - img_padded = np.concatenate((pad_left, img_padded), axis=1) - pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1)) - img_padded = np.concatenate((img_padded, pad_down), axis=0) - pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1)) - img_padded = np.concatenate((img_padded, pad_right), axis=1) - - return img_padded, pad - -# transfer caffe model to pytorch which will match the layer name -def transfer(model, model_weights): - transfered_model_weights = {} - for weights_name in model.state_dict().keys(): - transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])] - return transfered_model_weights - -# draw the body keypoint and lims -def draw_bodypose(canvas, candidate, subset): - stickwidth = 4 - limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \ - [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \ - [1, 16], [16, 18], [3, 17], [6, 18]] - - colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \ - [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \ - [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] - for i in range(18): - for n in range(len(subset)): - index = int(subset[n][i]) - if index == -1: - continue - x, y = candidate[index][0:2] - cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1) - for i in range(17): - for n in range(len(subset)): - index = subset[n][np.array(limbSeq[i]) - 1] - if -1 in index: - continue - cur_canvas = canvas.copy() - Y = candidate[index.astype(int), 0] - X = candidate[index.astype(int), 1] - mX = np.mean(X) - mY = np.mean(Y) - length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 - angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) - polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) - cv2.fillConvexPoly(cur_canvas, polygon, colors[i]) - canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0) - # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]]) - # plt.imshow(canvas[:, :, [2, 1, 0]]) - return canvas - - -# image drawed by opencv is not good. -def draw_handpose(canvas, all_hand_peaks, show_number=False): - edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \ - [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] - - for peaks in all_hand_peaks: - for ie, e in enumerate(edges): - if np.sum(np.all(peaks[e], axis=1)==0)==0: - x1, y1 = peaks[e[0]] - x2, y2 = peaks[e[1]] - cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])*255, thickness=2) - - for i, keyponit in enumerate(peaks): - x, y = keyponit - cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1) - if show_number: - cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA) - return canvas - -# detect hand according to body pose keypoints -# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp -def handDetect(candidate, subset, oriImg): - # right hand: wrist 4, elbow 3, shoulder 2 - # left hand: wrist 7, elbow 6, shoulder 5 - ratioWristElbow = 0.33 - detect_result = [] - image_height, image_width = oriImg.shape[0:2] - for person in subset.astype(int): - # if any of three not detected - has_left = np.sum(person[[5, 6, 7]] == -1) == 0 - has_right = np.sum(person[[2, 3, 4]] == -1) == 0 - if not (has_left or has_right): - continue - hands = [] - #left hand - if has_left: - left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]] - x1, y1 = candidate[left_shoulder_index][:2] - x2, y2 = candidate[left_elbow_index][:2] - x3, y3 = candidate[left_wrist_index][:2] - hands.append([x1, y1, x2, y2, x3, y3, True]) - # right hand - if has_right: - right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]] - x1, y1 = candidate[right_shoulder_index][:2] - x2, y2 = candidate[right_elbow_index][:2] - x3, y3 = candidate[right_wrist_index][:2] - hands.append([x1, y1, x2, y2, x3, y3, False]) - - for x1, y1, x2, y2, x3, y3, is_left in hands: - # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox - # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]); - # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]); - # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow); - # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder); - # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder); - x = x3 + ratioWristElbow * (x3 - x2) - y = y3 + ratioWristElbow * (y3 - y2) - distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2) - distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) - width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder) - # x-y refers to the center --> offset to topLeft point - # handRectangle.x -= handRectangle.width / 2.f; - # handRectangle.y -= handRectangle.height / 2.f; - x -= width / 2 - y -= width / 2 # width = height - # overflow the image - if x < 0: x = 0 - if y < 0: y = 0 - width1 = width - width2 = width - if x + width > image_width: width1 = image_width - x - if y + width > image_height: width2 = image_height - y - width = min(width1, width2) - # the max hand box value is 20 pixels - if width >= 20: - detect_result.append([int(x), int(y), int(width), is_left]) - - ''' - return value: [[x, y, w, True if left hand else False]]. - width=height since the network require squared input. - x, y is the coordinate of top left - ''' - return detect_result - -# get max index of 2d array -def npmax(array): - arrayindex = array.argmax(1) - arrayvalue = array.max(1) - i = arrayvalue.argmax() - j = arrayindex[i] - return i, j diff --git a/spaces/syaz01/rvc-anigames-v2/lib/infer_pack/attentions.py b/spaces/syaz01/rvc-anigames-v2/lib/infer_pack/attentions.py deleted file mode 100644 index 05501be1871643f78dddbeaa529c96667031a8db..0000000000000000000000000000000000000000 --- a/spaces/syaz01/rvc-anigames-v2/lib/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from lib.infer_pack import commons -from lib.infer_pack import modules -from lib.infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/talhaty/Faceswapper/roop/metadata.py b/spaces/talhaty/Faceswapper/roop/metadata.py deleted file mode 100644 index 35b0f0245a38eb9ec024f2ed2c829044f6051c29..0000000000000000000000000000000000000000 --- a/spaces/talhaty/Faceswapper/roop/metadata.py +++ /dev/null @@ -1,2 +0,0 @@ -name = 'roop' -version = '1.1.0' diff --git a/spaces/tarjomeh/Norod78-sd2-cartoon-blip/README.md b/spaces/tarjomeh/Norod78-sd2-cartoon-blip/README.md deleted file mode 100644 index 0675cbeeb6210b35a15da6d8e1e89b01476eff1a..0000000000000000000000000000000000000000 --- a/spaces/tarjomeh/Norod78-sd2-cartoon-blip/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Norod78 Sd2 Cartoon Blip -emoji: 💩 -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/teragron/TinyStories/win.c b/spaces/teragron/TinyStories/win.c deleted file mode 100644 index 0b295cd1f3356696a9cc1ee129a945633f6069af..0000000000000000000000000000000000000000 --- a/spaces/teragron/TinyStories/win.c +++ /dev/null @@ -1,180 +0,0 @@ -#include "win.h" -#include -#include - -#ifndef FILE_MAP_EXECUTE -#define FILE_MAP_EXECUTE 0x0020 -#endif /* FILE_MAP_EXECUTE */ - -static int __map_mman_error(const uint32_t err, const int deferr) -{ - if (err == 0) - return 0; - //TODO: implement - return err; -} - -static uint32_t __map_mmap_prot_page(const int prot) -{ - uint32_t protect = 0; - - if (prot == PROT_NONE) - return protect; - - if ((prot & PROT_EXEC) != 0) - { - protect = ((prot & PROT_WRITE) != 0) ? - PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ; - } - else - { - protect = ((prot & PROT_WRITE) != 0) ? - PAGE_READWRITE : PAGE_READONLY; - } - - return protect; -} - -static uint32_t __map_mmap_prot_file(const int prot) -{ - uint32_t desiredAccess = 0; - - if (prot == PROT_NONE) - return desiredAccess; - - if ((prot & PROT_READ) != 0) - desiredAccess |= FILE_MAP_READ; - if ((prot & PROT_WRITE) != 0) - desiredAccess |= FILE_MAP_WRITE; - if ((prot & PROT_EXEC) != 0) - desiredAccess |= FILE_MAP_EXECUTE; - - return desiredAccess; -} - -void* mmap(void *addr, size_t len, int prot, int flags, int fildes, ssize_t off) -{ - HANDLE fm, h; - void * map = MAP_FAILED; - -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable: 4293) -#endif - - const uint32_t dwFileOffsetLow = (uint32_t)(off & 0xFFFFFFFFL); - const uint32_t dwFileOffsetHigh = (uint32_t)((off >> 32) & 0xFFFFFFFFL); - const uint32_t protect = __map_mmap_prot_page(prot); - const uint32_t desiredAccess = __map_mmap_prot_file(prot); - - const ssize_t maxSize = off + (ssize_t)len; - - const uint32_t dwMaxSizeLow = (uint32_t)(maxSize & 0xFFFFFFFFL); - const uint32_t dwMaxSizeHigh = (uint32_t)((maxSize >> 32) & 0xFFFFFFFFL); - -#ifdef _MSC_VER -#pragma warning(pop) -#endif - - errno = 0; - - if (len == 0 - /* Unsupported flag combinations */ - || (flags & MAP_FIXED) != 0 - /* Usupported protection combinations */ - || prot == PROT_EXEC) - { - errno = EINVAL; - return MAP_FAILED; - } - - h = ((flags & MAP_ANONYMOUS) == 0) ? - (HANDLE)_get_osfhandle(fildes) : INVALID_HANDLE_VALUE; - - if ((flags & MAP_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE) - { - errno = EBADF; - return MAP_FAILED; - } - - fm = CreateFileMapping(h, NULL, protect, dwMaxSizeHigh, dwMaxSizeLow, NULL); - - if (fm == NULL) - { - errno = __map_mman_error(GetLastError(), EPERM); - return MAP_FAILED; - } - - map = MapViewOfFile(fm, desiredAccess, dwFileOffsetHigh, dwFileOffsetLow, len); - - CloseHandle(fm); - - if (map == NULL) - { - errno = __map_mman_error(GetLastError(), EPERM); - return MAP_FAILED; - } - - return map; -} - -int munmap(void *addr, size_t len) -{ - if (UnmapViewOfFile(addr)) - return 0; - - errno = __map_mman_error(GetLastError(), EPERM); - - return -1; -} - -int mprotect(void *addr, size_t len, int prot) -{ - uint32_t newProtect = __map_mmap_prot_page(prot); - uint32_t oldProtect = 0; - - if (VirtualProtect(addr, len, newProtect, &oldProtect)) - return 0; - - errno = __map_mman_error(GetLastError(), EPERM); - - return -1; -} - -int msync(void *addr, size_t len, int flags) -{ - if (FlushViewOfFile(addr, len)) - return 0; - - errno = __map_mman_error(GetLastError(), EPERM); - - return -1; -} - -int mlock(const void *addr, size_t len) -{ - if (VirtualLock((LPVOID)addr, len)) - return 0; - - errno = __map_mman_error(GetLastError(), EPERM); - - return -1; -} - -int munlock(const void *addr, size_t len) -{ - if (VirtualUnlock((LPVOID)addr, len)) - return 0; - - errno = __map_mman_error(GetLastError(), EPERM); - - return -1; -} - -// Portable clock_gettime function for Windows -int clock_gettime(int clk_id, struct timespec *tp) { - uint32_t ticks = GetTickCount(); - tp->tv_sec = ticks / 1000; - tp->tv_nsec = (ticks % 1000) * 1000000; - return 0; -} diff --git a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Valkyrie 2008 In Dual Audio Eng Hind) UPD.md b/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Valkyrie 2008 In Dual Audio Eng Hind) UPD.md deleted file mode 100644 index 960e290ff81a56769948983bbc705e009fa19462..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Valkyrie 2008 In Dual Audio Eng Hind) UPD.md +++ /dev/null @@ -1,23 +0,0 @@ -
              -

              How to Watch Valkyrie (2008) in Dual Audio Online

              -

              Valkyrie is a historical thriller film that depicts the 20 July plot in 1944 by German army officers to assassinate Adolf Hitler and overthrow the Nazi regime. The film stars Tom Cruise as Colonel Claus von Stauffenberg, one of the key plotters. The film was directed by Bryan Singer and released in 2008.

              -

              HD Online Player (Valkyrie 2008 In Dual Audio Eng Hind)


              Download File ••• https://bytlly.com/2uGm4y



              -

              If you want to watch Valkyrie in dual audio (English and Hindi), you have a few options. One of them is to use an online player that supports dual audio streaming. Here are some steps to follow:

              -
                -
              1. Find a reliable website that offers Valkyrie in dual audio. You can use a search engine like Bing to look for keywords like "Valkyrie 2008 In Dual Audio Eng Hindi" or "Valkyrie 2008 dual audio online". Some of the websites that may have the film are player.fm[^1^], kragomicanyz.wixsite.com[^2^], and patronway.com[^3^]. Be careful of websites that may contain malware or viruses.
              2. -
              3. Select the website that has the best quality and speed of streaming. You may need to create an account or register on some websites before you can watch the film.
              4. -
              5. Choose the online player that suits your device and preferences. Some online players may require you to download software or plugins to enable dual audio streaming. Some examples of online players are VLC Media Player, KMPlayer, and GOM Player.
              6. -
              7. Adjust the audio settings on the online player to select the language you want to hear. You can switch between English and Hindi or play both languages simultaneously. You may also need to adjust the subtitles settings if you want to see them on the screen.
              8. -
              9. Enjoy watching Valkyrie in dual audio online!
              10. -
              -

              Valkyrie is a fascinating film that explores a historical event that could have changed the course of World War II. By watching it in dual audio, you can enjoy it in two languages and appreciate the performances of the actors. I hope this article helps you with finding and using an online player to watch Valkyrie in dual audio.

              - -

              What Critics and Audiences Say About Valkyrie

              -

              Valkyrie received mixed reviews from critics and audiences when it was released in 2008. The film has a 62% rating on Rotten Tomatoes based on 198 reviews, with an average score of 6.2/10. The website's critical consensus reads: \"Given the subject matter, Valkyrie could have been an outstanding historical thriller, but settles for being a mildly entertaining, but disposable yarn.\"

              -

              Some critics praised the film's suspense, direction, and performances, especially by Cruise and the supporting cast. Roger Ebert gave the film three and a half stars out of four, writing: \"This is a film about veterans of officer rank, with all the reserve and probity that officers gather on the way up. They do not scream or hurry and do not care to be seen that way. They have learned not to panic under fire, and they have never been more under fire than now.\"[^1^]

              -

              Other critics criticized the film's lack of emotional depth, historical accuracy, and authenticity. Peter Bradshaw of The Guardian gave the film two stars out of five, writing: \"It's a bit like watching a second world war version of Ocean's Eleven ... Cruise himself is oddly blank and affectless as Stauffenberg; he doesn't give us much idea of his personality or his motivation.\"

              -

              -

              The film also faced some controversy over its casting, production, and release. Some German politicians and veterans objected to Cruise playing von Stauffenberg because of his affiliation with Scientology, which is considered a cult in Germany. Some historians and critics questioned the film's portrayal of von Stauffenberg and the other plotters as heroes, arguing that they were motivated by self-interest and opportunism rather than moral principles. The film's release date was also delayed several times due to reshoots, editing issues, and marketing challenges.

              -

              The film grossed $200.3 million worldwide against a budget of $75 million, making it a moderate box office success. The film received two Academy Award nominations for Best Sound Editing and Best Sound Mixing.

              d5da3c52bf
              -
              -
              \ No newline at end of file diff --git a/spaces/theekshana/boardpac_chat_app_test/app2.py b/spaces/theekshana/boardpac_chat_app_test/app2.py deleted file mode 100644 index 24e89122674ad778d23f021eea11bd443981d224..0000000000000000000000000000000000000000 --- a/spaces/theekshana/boardpac_chat_app_test/app2.py +++ /dev/null @@ -1,184 +0,0 @@ -""" -Python Backend API to chat with private data - -08/16/2023 -D.M. Theekshana Samaradiwakara -""" - -import os -import time -import streamlit as st -from streamlit.logger import get_logger - -logger = get_logger(__name__) - -from ui.htmlTemplates import css, bot_template, user_template, source_template -from config import MODELS, DATASETS - -from qaPipeline import QAPipeline -from faissDb import create_faiss - -# loads environment variables -from dotenv import load_dotenv -load_dotenv() - -isHuggingFaceHubEnabled = os.environ.get('ENABLE_HUGGINGFSCE_HUB_MODELS') -isOpenAiApiEnabled = os.environ.get('ENABLE_OPENAI_API_MODELS') - -st.set_page_config(page_title="Chat with data", - page_icon=":books:") -st.write(css, unsafe_allow_html=True) - - -SESSION_DEFAULTS = { - "model": MODELS["DEFAULT"], - "dataset": DATASETS["DEFAULT"], - "chat_history": None, - "is_parameters_changed":False, - "show_source_files": False, - "user_question":'', -} - -for k, v in SESSION_DEFAULTS.items(): - if k not in st.session_state: - st.session_state[k] = v - - -with st.sidebar: - st.subheader("Chat parameters") - - with st.form('param_form'): - - chat_model = st.selectbox( - "Chat model", - MODELS, - key="chat_model", - help="Select the LLM model for the chat", - # on_change=update_parameters_change, - ) - - st.session_state.dataset = "DEFAULT" - - show_source = st.checkbox( - label="show source files", - key="show_source", - help="Select this to show relavant source files for the query", - ) - - submitted = st.form_submit_button( - "Submit", - # on_click=parameters_change_button, - # args=[chat_model, show_source] - ) - - # submitted = st.button( - # "Submit", - # # on_click=parameters_change_button, - # # args=[chat_model, show_source] - # ) - - if submitted: - st.session_state.model = chat_model - st.session_state.dataset = "DEFAULT" - st.session_state.show_source_files = show_source - st.session_state.is_parameters_changed = False - - alert = st.success("chat parameters updated") - time.sleep(1) # Wait for 3 seconds - alert.empty() # Clear the alert - - st.markdown("\n") - - # if st.button("Create FAISS db"): - # try: - # with st.spinner('creating faiss vector store'): - # create_faiss() - # st.success('faiss saved') - # except Exception as e: - # st.error(f"Error : {e}")#, icon=":books:") - # return - - st.markdown( - "### How to use\n" - "1. Select the chat model\n" # noqa: E501 - "2. Select \"show source files\" to show the source files related to the answer.📄\n" - "3. Ask a question about the documents💬\n" - ) - - - -st.header("Chat with your own data:") -@st.experimental_singleton # 👈 Add the caching decorator -def load_QaPipeline(): - print('> QAPipeline loaded for front end') - return QAPipeline() - -qaPipeline = load_QaPipeline() -# qaPipeline = QAPipeline() -with st.form('chat_body'): - - - user_question = st.text_input( - "Ask a question about your documents:", - placeholder="enter question", - key='user_question', - # on_change=submit_user_question, - ) - - submitted = st.form_submit_button( - "Submit", - # on_click=submit_user_question - ) - - if submitted: - with st.spinner("Processing"): - user_question = st.session_state.user_question - # st.success(user_question) - query = user_question - # st.session_state.user_question='' - - # Get the answer from the chain - try: - if (not query) or (query.strip() == ''): - st.error("Please enter a question!") - st.stop() - - model = MODELS[st.session_state.model] - dataset = DATASETS[st.session_state.dataset] - show_source_files = st.session_state.show_source_files - - # Try to access openai and deeplake - print(f">\n model: {model} \n dataset : {dataset} \n show_source_files : {show_source_files}") - - # response = qaPipeline.run(query=query, model=model, dataset=dataset) - response = qaPipeline.run_agent(query=query, model=model, dataset=dataset) - - - docs = [] - if isinstance(response, dict): - answer, docs = response['answer'], response['source_documents'] - else: - answer = response - - st.write(user_template.replace( - "{{MSG}}", query), unsafe_allow_html=True) - st.write(bot_template.replace( - "{{MSG}}", answer ), unsafe_allow_html=True) - - if show_source_files: - # st.write(source_template.replace( - # "{{MSG}}", "source files" ), unsafe_allow_html=True) - - if len(docs)>0 : - st.markdown("#### source files : ") - for source in docs: - # st.info(source.metadata) - with st.expander(source.metadata["source"]): - st.markdown(source.page_content) - - # st.write(response) - - except Exception as e: - # logger.error(f"Answer retrieval failed with {e}") - st.error(f"Error : {e}")#, icon=":books:") - diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bitcoin Made Easy Video Tutorials for Every Level of Experience.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bitcoin Made Easy Video Tutorials for Every Level of Experience.md deleted file mode 100644 index 927a9cd3feeec76768fb05a25da592683d3526b0..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bitcoin Made Easy Video Tutorials for Every Level of Experience.md +++ /dev/null @@ -1,204 +0,0 @@ -
              -

              How to Download Bitcoin Tutorial Videos from YouTube

              -

              Bitcoin is a cryptocurrency, a digital form of money that can be used to buy, sell, and transfer value online without the need for a central authority or intermediary. Bitcoin is also a protocol, a set of rules and technologies that enable the creation, distribution, and verification of transactions on a decentralized network called the blockchain. Bitcoin is one of the most innovative and disruptive inventions of the 21st century, and many people are interested in learning more about how it works and how to use it.

              -

              One of the best ways to learn about bitcoin is to watch tutorial videos on YouTube, where you can find hundreds of hours of content from experts, enthusiasts, and educators. Whether you want to learn the basics of bitcoin, how to set up a wallet, how to buy and sell bitcoin, how to mine bitcoin, or how to use bitcoin for various purposes, you can find a video that suits your needs and preferences.

              -

              download bitcoin tutorial video


              Download Zip ⚙⚙⚙ https://bltlly.com/2uOlxh



              -

              But what if you want to download these videos to your computer or mobile device, so you can watch them offline, at your own pace, or without ads? Is it possible, legal, and ethical to do so? In this article, we will answer these questions and show you five methods to download bitcoin tutorial videos from YouTube.

              -

              How to Download YouTube Videos

              -

              Before we get into the specific methods, let's address some important issues regarding downloading YouTube videos. First of all, is it legal? The answer is not clear-cut, as it depends on the jurisdiction you live in, the terms of service of YouTube, and the copyright status of the video. Generally speaking, downloading YouTube videos for your own personal offline use is not illegal, but it may violate Google's terms of service, which state that \"You are not allowed to...access, reproduce, download, distribute, transmit, broadcast, display, sell, license, alter, modify or otherwise use any part of the Service or any Content except: (a) as expressly authorized by the Service; or (b) with prior written permission from YouTube and, if applicable, the respective rights holders.\"

              -

              Secondly, is it ethical? This is a matter of personal judgment and opinion. Some might argue that downloading YouTube videos is fair use, as long as you do not redistribute them or claim them as your own. Others might argue that downloading YouTube videos is stealing from the creators and depriving them of revenue from ads or subscriptions. Ultimately, you have to decide for yourself whether you are comfortable with downloading YouTube videos or not.

              -

              Thirdly, how do you do it? There are many tools and services that can help you download YouTube videos to your computer or mobile device. Some are free and open source; others are paid and proprietary. Some are standalone applications; others are web-based or browser extensions. Some are easy and user-friendly; others are complex and technical. Some are safe and reliable; others are risky and malware-infected. You have to be careful when choosing a tool or service to download YouTube videos.

              -

              In general, the steps involved in downloading YouTube videos are as follows:

              -

              How to download bitcoin tutorial video for free
              -Best bitcoin tutorial video download sites
              -Download bitcoin tutorial video from Khan Academy
              -Bitcoin for beginners: Learn from experienced trader video download
              -Download bitcoin crash course video by Petko Zhivkov Aleksandrov
              -Bitcoin trading and investing video download
              -How to get started with bitcoin video download
              -Download bitcoin wallet setup video
              -Bitcoin basics: What is bitcoin and how does it work video download
              -Download video on how to buy and sell bitcoin
              -Bitcoin mining tutorial video download
              -Download video on how to secure your bitcoin wallet
              -Bitcoin transactions explained video download
              -Download video on how to use bitcoin for online payments
              -Bitcoin privacy and anonymity video download
              -Download video on how to earn bitcoin online
              -Bitcoin tax and legal issues video download
              -Download video on how to avoid bitcoin scams and frauds
              -Bitcoin futures and options trading video download
              -Download video on how to trade bitcoin with leverage
              -Bitcoin technical analysis and charting video download
              -Download video on how to use bitcoin indicators and signals
              -Bitcoin trading strategies and tips video download
              -Download video on how to manage your bitcoin risk and portfolio
              -Bitcoin trading psychology and mindset video download
              -Download video on how to join a bitcoin community and network
              -Bitcoin news and updates video download
              -Download video on how to follow bitcoin influencers and experts
              -Bitcoin podcasts and webinars video download
              -Download video on how to learn from bitcoin success stories and case studies
              -Bitcoin history and evolution video download
              -Download video on how to understand bitcoin economics and supply and demand
              -Bitcoin blockchain technology and innovation video download
              -Download video on how to explore bitcoin applications and use cases
              -Bitcoin social impact and environmental issues video download
              -Download video on how to compare bitcoin with other cryptocurrencies
              -Bitcoin regulation and compliance video download
              -Download video on how to participate in bitcoin governance and voting
              -Bitcoin innovation and development roadmap video download
              -Download video on how to contribute to bitcoin open source projects
              -Bitcoin challenges and limitations video download
              -Download video on how to overcome bitcoin technical difficulties and errors
              -Bitcoin hacks and security breaches video download
              -Download video on how to recover your lost or stolen bitcoin
              -Bitcoin forks and upgrades video download
              -Download video on how to switch between different bitcoin versions and networks
              -Bitcoin alternatives and competitors video download
              -Download video on how to diversify your cryptocurrency portfolio with other coins
              -Bitcoin integration and interoperability with other systems and platforms video download

              -
                -
              1. Find a YouTube video that you want to download.
              2. -
              3. Copy the URL (web address) of the video.
              4. -
              5. Paste the URL into a tool or service that can download YouTube videos.
              6. -
              7. Select your preferred format, quality, and location for the downloaded video.
              8. -
              9. Click the download button and wait for the process to complete.
              10. -
              -

              In this article, we will show you five methods that we have tested and found to be effective in downloading bitcoin tutorial videos from YouTube. We will also provide some pros and cons for each method. Note that these methods may not work for all videos or in all regions. Also note that these methods may change over time as YouTube updates its policies or algorithms.Method 1: Using Open Video Downloader on a computer -

              Open Video Downloader is a free and open source software that can download videos from YouTube and other websites. It is available for Windows, Mac, and Linux operating systems. Here are the steps to use Open Video Downloader to download bitcoin tutorial videos from YouTube:

              -
                -
              1. Download and install Open Video Downloader from its official website .
              2. -
              3. Launch the program and click the "Add" button on the top left corner.
              4. -
              5. Paste the URL of the YouTube video that you want to download and click "OK".
              6. -
              7. Select your desired format and quality from the drop-down menus. You can also choose to download subtitles, audio only, or video only.
              8. -
              9. Click the "Download" button on the bottom right corner and choose a location to save the downloaded video.
              10. -
              11. Wait for the download to finish and enjoy your video.
              12. -
              -

              Pros:

              -
                -
              • It is free and open source, which means you can trust its code and modify it if you want.
              • -
              • It supports multiple formats and qualities, including 4K and 8K resolutions.
              • -
              • It can download subtitles, audio only, or video only, which can save bandwidth and storage space.
              • -
              -

              Cons:

              -
                -
              • It requires installation, which may not be convenient for some users.
              • -
              • It may not work for some videos that are protected by DRM (digital rights management) or geo-restrictions.
              • -
              • It may not be updated frequently, which may affect its compatibility with YouTube's changes.
              • -
              -

              Method 2: Using VLC Player on a computer

              -

              VLC Player is a popular and versatile media player that can play almost any video or audio file. It can also be used to download videos from YouTube and other websites. It is available for Windows, Mac, Linux, Android, iOS, and other operating systems. Here are the steps to use VLC Player to download bitcoin tutorial videos from YouTube:

              -
                -
              1. Download and install VLC Player from its official website .
              2. -
              3. Launch the program and click the "Media" menu on the top left corner.
              4. -
              5. Select "Open Network Stream" and paste the URL of the YouTube video that you want to download.
              6. -
              7. Click "Play" and wait for the video to load.
              8. -
              9. Right-click on the video and select "Tools" then "Codec Information".
              10. -
              11. Copy the URL in the "Location" box at the bottom of the window.
              12. -
              13. Paste the URL into your browser's address bar and press enter.
              14. -
              15. The video will start playing in your browser. Right-click on it and select "Save video as".
              16. -
              17. Choose a location to save the downloaded video and click "Save".
              18. -
              19. Wait for the download to finish and enjoy your video.
              20. -
              -

              Pros:

              -
                -
              • It is free and open source, which means you can trust its code and modify it if you want.
              • -
              • It does not require any additional software or service to download YouTube videos.
              • -
              • It can play almost any video or audio file, which makes it a handy media player.
              • -
              -

              Cons:

              -
                -
              • It is a bit complicated and tedious to use for downloading YouTube videos.
              • -
              • It may not work for some videos that are protected by DRM (digital rights management) or geo-restrictions.
              • -
              • It may not support all formats and qualities of YouTube videos.
              • -
              -

              Method 3: Using 4K Video Downloader on a computer

              -

              4K Video Downloader is a paid software that can download videos from YouTube and other websites. It is available for Windows, Mac, and Linux operating systems. Here are the steps to use 4K Video Downloader to download bitcoin tutorial videos from YouTube:

              -
                -
              1. Download and install 4K Video Downloader from its official website . You can use the free trial version or buy the full version for more features.
              2. -
              3. Launch the program and click the "Paste Link" button on the top left corner.
              4. -
              5. Paste the URL of the YouTube video that you want to download and click "Download".
              6. -
              7. Select your desired format, quality, subtitles, and location for the downloaded video. You can also choose to download playlists, channels, or 3D/360° videos.
              8. -
              9. Click the "Download" button on the bottom right corner and wait for the process to complete.
              10. -
              -

              Pros:

              -
                -
              • It is easy and user-friendly to use for downloading YouTube videos.
              • -
              • It supports multiple formats and qualities, including 4K and 8K resolutions.
              • -
              • It can download subtitles, playlists, channels, or 3D/360° videos, which can enhance your learning experience.
              • -
              -

              Cons:

              -
                -
              • It is not free and open source, which means you have to pay for the full version and trust its code.
              • -
              • It may not work for some videos that are protected by DRM (digital rights management) or geo-restrictions.
              • -
              • It may not be updated frequently, which may affect its compatibility with YouTube's changes.
              • -
              -

              Method 4: Using TubeMate on an Android device

              -

              TubeMate is a free app that can download videos from YouTube and other websites to your Android device. It is not available on the Google Play Store, so you have to download it from its official website or other sources. Here are the steps to use TubeMate to download bitcoin tutorial videos from YouTube:

              -
                -
              1. Download and install TubeMate from its official website or other sources. Make sure you enable the installation of apps from unknown sources on your device settings.
              2. -
              3. Launch the app and search for the YouTube video that you want to download. You can also paste the URL of the video into the app.
              4. -
              5. Select the video and tap the red download icon on the bottom right corner.
              6. -
              7. Select your desired format and quality from the list. You can also choose to download audio only or video only.
              8. -
              9. Tap the green download icon on the bottom right corner and choose a location to save the downloaded video.
              10. -
              11. Wait for the download to finish and enjoy your video.
              12. -
              -

              Pros:

              -
                -
              • It is free and easy to use for downloading YouTube videos to your Android device.
              • -
              • It supports multiple formats and qualities, including HD resolutions.
              • -
              • It can download audio only or video only, which can save bandwidth and storage space.
              • -
              -

              Cons:

              -
                -
              • It is not available on the Google Play Store, which means you have to download it from other sources that may not be safe or reliable.
              • -
              • It may not work for some videos that are protected by DRM (digital rights management) or geo-restrictions.
              • -
              • It may not support all formats and qualities of YouTube videos.
              • -
              -

              Method 5: Using Documents by Readdle on an iOS device

              -

              Documents by Readdle is a free app that can manage files, documents, and media on your iOS device. It can also be used to download videos from YouTube and other websites to your iOS device. It is available on the App Store. Here are the steps to use Documents by Readdle to download bitcoin tutorial videos from YouTube:

              -
                -
              1. Download and install Documents by Readdle from the App Store .
              2. -
              3. Launch the app and tap the compass icon on the bottom right corner to open the built-in browser.
              4. -
              5. Type "savefrom.net" in the address bar and go to the website.
              6. -
              7. Paste the URL of the YouTube video that you want to download into the box and tap "Download".
              8. -
              9. Select your desired format and quality from the list. You can also choose to download audio only or video only.
              10. -
              11. Tap "Download" again and choose a name and location for the downloaded video.
              12. -
              13. Wait for the download to finish and enjoy your video. You can find it in the Downloads folder of the app.
              14. -
              -

              Pros:

              -
                -
              • It is free and easy to use for downloading YouTube videos to your iOS device.
              • -
              • It supports multiple formats and qualities, including HD resolutions.
              • -
              • It can download audio only or video only, which can save bandwidth and storage space.
              • -
              -

              Cons:

              -
                -
              • It requires an additional website (savefrom.net) to download YouTube videos, which may not be safe or reliable.
              • -
              • It may not work for some videos that are protected by DRM (digital rights management) or geo-restrictions.
              • -
              • It may not support all formats and qualities of YouTube videos.
              • -
              -

              Conclusion

              -

              In this article, we have shown you five methods to download bitcoin tutorial videos from YouTube. Each method has its own pros and cons, so you have to choose the one that suits your needs and preferences best. Here are some tips for finding the best bitcoin tutorial videos on YouTube to enhance your learning experience:

              -
                -
              • Use the filters and sorting options on YouTube to find the most relevant, recent, and popular videos on your topic.
              • -
              • Check the ratings, comments, and views of the videos to gauge their quality and credibility.
              • -
              • Look for videos from reputable sources, such as official channels, experts, or educators.
              • -
              • Watch videos with subtitles or captions, if available, to improve your comprehension and retention.
              • -
              • Take notes, pause, rewind, or repeat the videos as needed to understand the concepts and details.
              • -
              -

              We hope this article has helped you learn how to download bitcoin tutorial videos from YouTube. If you have any questions or feedback, please let us know in the comments below. Happy learning!

              -

              FAQs

              -

              Q: What is the best format and quality to download YouTube videos?

              -

              A: The best format and quality to download YouTube videos depends on your device, storage space, and viewing preferences. Generally, MP4 is the most compatible and widely supported format for video files. As for quality, higher resolutions (such as 1080p or 4K) offer better clarity and detail, but they also take up more space and bandwidth. Lower resolutions (such as 480p or 720p) are more suitable for smaller screens or slower connections, but they may look blurry or pixelated. You can also choose to download audio only or video only, depending on your needs.

              -

              Q: How can I download YouTube videos without any software or service?

              -

              A: There are some tricks that you can use to download YouTube videos without any software or service. One of them is to add "ss" before the "youtube.com" part of the URL of the video. For example, if the URL of the video is https://www.youtube.com/watch?v=abcde12345, you can change it to https://www.ssyoutube.com/watch?v=abcde12345. This will take you to a website where you can choose your format and quality and download the video. However, this method may not work for all videos or in all regions.

              -

              Q: How can I download YouTube videos with subtitles or captions?

              -

              A: Some of the methods that we have shown in this article can download YouTube videos with subtitles or captions, if they are available. For example, Open Video Downloader and 4K Video Downloader have options to download subtitles along with the video. You can also use a separate tool or service to download subtitles or captions from YouTube videos. For example, you can use DownSub.com , which allows you to enter the URL of the video and download the subtitles or captions in various languages and formats.

              -

              Q: How can I convert YouTube videos to other formats?

              -

              A: Some of the methods that we have shown in this article can convert YouTube videos to other formats during the download process. For example, Open Video Downloader and 4K Video Downloader have options to choose your desired format for the downloaded video. You can also use a separate tool or service to convert YouTube videos to other formats after downloading them. For example, you can use Online-Convert.com , which allows you to upload your video file and choose your target format and settings.

              -

              Q: How can I edit YouTube videos after downloading them?

              -

              A: You can use any video editing software or app that supports your downloaded video format to edit YouTube videos after downloading them. For example, you can use Windows Movie Maker , iMovie , Adobe Premiere Pro , or Final Cut Pro to edit your video files on your computer. You can also use online video editors, such as WeVideo , Clipchamp , or Kapwing , to edit your video files on your browser.

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Download Kung Fu Panda 3 (English) Hd Movies.md b/spaces/tioseFevbu/cartoon-converter/scripts/Download Kung Fu Panda 3 (English) Hd Movies.md deleted file mode 100644 index 0e766833c052b5fe1a7448635dbe4090464d8e04..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Download Kung Fu Panda 3 (English) Hd Movies.md +++ /dev/null @@ -1,48 +0,0 @@ - -

              How to Download Kung Fu Panda 3 (English) HD Movies

              -

              Kung Fu Panda 3 is a 2016 animated comedy film that follows the adventures of Po, a lovable panda who must train a village of clumsy pandas to fight against a supernatural villain. The film features the voices of Jack Black, Bryan Cranston, Dustin Hoffman, Angelina Jolie, J.K. Simmons and more. It is the third installment in the Kung Fu Panda franchise and received positive reviews from critics and audiences alike.

              -

              download Kung Fu Panda 3 (English) hd movies


              DOWNLOAD 🗸 https://urlcod.com/2uHyi6



              -

              If you want to watch Kung Fu Panda 3 in high definition, you have several options to download it legally and safely. Here are some of them:

              -
                -
              • Netflix: You can stream Kung Fu Panda 3 on Netflix with a subscription. Netflix also allows you to download movies and shows for offline viewing on compatible devices. To download Kung Fu Panda 3 on Netflix, follow these steps[^1^]: -
                  -
                1. Open the Netflix app on your device and sign in with your account.
                2. -
                3. Search for Kung Fu Panda 3 and tap on it.
                4. -
                5. Tap on the Download icon next to the Play button.
                6. -
                7. Wait for the download to finish and enjoy watching it offline.
                8. -
                -
              • -
              • Movies Anywhere: You can purchase Kung Fu Panda 3 on Movies Anywhere and watch it on any device that supports the service. Movies Anywhere also lets you download movies for offline viewing. To download Kung Fu Panda 3 on Movies Anywhere, follow these steps[^3^]: -
                  -
                1. Go to the Movies Anywhere website or app and sign in with your account.
                2. -
                3. Search for Kung Fu Panda 3 and click on it.
                4. -
                5. Click on the Buy button and choose your preferred retailer.
                6. -
                7. Complete the purchase and go to your library.
                8. -
                9. Click on the Download icon next to Kung Fu Panda 3 and wait for the download to finish.
                10. -
                -
              • -
              • IMDb: You can rent or buy Kung Fu Panda 3 on IMDb and watch it on any device that supports IMDb TV. IMDb TV also allows you to download movies for offline viewing. To download Kung Fu Panda 3 on IMDb, follow these steps[^2^]: -
                  -
                1. Go to the IMDb website or app and sign in with your account.
                2. -
                3. Search for Kung Fu Panda 3 and click on it.
                4. -
                5. Click on the Watch options button and choose Rent or Buy.
                6. -
                7. Select your preferred option and complete the transaction.
                8. -
                9. Go to your library and click on the Download icon next to Kung Fu Panda 3.
                10. -
                11. Wait for the download to finish and enjoy watching it offline.
                12. -
                -
              • -
              -

              These are some of the ways you can download Kung Fu Panda 3 (English) HD movies legally and safely. Remember to always respect the copyrights of the creators and avoid downloading pirated or illegal copies of movies. Happy watching!

              - -

              If you have already downloaded Kung Fu Panda 3 and want to learn more about the film, here are some interesting facts and trivia that you might enjoy:

              -

              -
                -
              • Kung Fu Panda 3 is the first film in the franchise to be co-produced by DreamWorks Animation and Oriental DreamWorks, a Chinese-American film production company. The film features several elements of Chinese culture, such as the panda village, the jade zombies, and the spirit realm.
              • -
              • The film was originally scheduled to be released in December 2015, but was pushed back to January 2016 to avoid competition with Star Wars: The Force Awakens. The film was released in China on January 23, 2016, two days before its US release, and became the highest-grossing animated film of all time in China.
              • -
              • The film introduces Po's biological father, Li Shan, voiced by Bryan Cranston. Li Shan was originally going to appear in Kung Fu Panda 2, but was cut from the final version. His design was also changed from a brown panda to a black-and-white panda to match Po's appearance.
              • -
              • The film also introduces a new villain, Kai, voiced by J.K. Simmons. Kai is a yak who was once Oogway's brother-in-arms, but turned evil after stealing the chi of other kung fu masters. Kai's design was inspired by ancient Chinese sculptures and armor.
              • -
              • The film features a new character, Mei Mei, voiced by Kate Hudson. Mei Mei is a ribbon-dancing panda who has a crush on Po. She was originally going to be voiced by Rebel Wilson, but Wilson had to drop out due to scheduling conflicts.
              • -
              -

              Kung Fu Panda 3 is a fun and entertaining film that celebrates family, friendship, and kung fu. If you haven't watched it yet, you can download it from one of the options mentioned above and enjoy it with your loved ones.

              e93f5a0c3f
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Full __HOT__ Steinberg WaveLab 6.1.1.353 (Cracked By TEAM AiR) [RH].md b/spaces/tioseFevbu/cartoon-converter/scripts/Full __HOT__ Steinberg WaveLab 6.1.1.353 (Cracked By TEAM AiR) [RH].md deleted file mode 100644 index c277d7f25f8f323c61070c7cff7a5f451a1b65db..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Full __HOT__ Steinberg WaveLab 6.1.1.353 (Cracked By TEAM AiR) [RH].md +++ /dev/null @@ -1,182 +0,0 @@ - - - -

              What Is Steinberg WaveLab 6.1.1.353?

              - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

              b2dd77e56b
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Game Server Whmcs Template Nulled Php.md b/spaces/tioseFevbu/cartoon-converter/scripts/Game Server Whmcs Template Nulled Php.md deleted file mode 100644 index f52f89acc6a8b8210250ee90ea478ea169b4f405..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Game Server Whmcs Template Nulled Php.md +++ /dev/null @@ -1,123 +0,0 @@ -
              -

              Game Server WHMCS Template Nulled PHP: What You Need to Know

              -

              If you are running a game server hosting business, you know how important it is to have a reliable and efficient web hosting management and billing software. You also know how important it is to have a professional and attractive website that can attract and retain your customers. That's why you may be interested in using a game server WHMCS template nulled php for your website. But what exactly is a game server WHMCS template nulled php, and what are the benefits and risks of using one? In this article, we will answer these questions and help you choose the best game server WHMCS template nulled php for your business.

              -

              game server whmcs template nulled php


              Downloadhttps://urlcod.com/2uHxfQ



              -

              What is WHMCS and why do you need it for your game server hosting business?

              -

              WHMCS is a web hosting management and billing software that helps you automate and streamline your web hosting operations. It allows you to manage your customers, invoices, products, services, domains, tickets, reports, and more from one central dashboard. It also integrates with popular payment gateways, domain registrars, control panels, and other third-party services.

              -

              WHMCS is a web hosting management and billing software

              -

              WHMCS stands for Web Host Manager Complete Solution. It is a software that helps you manage all aspects of your web hosting business from one place. You can use it to create and sell web hosting plans, domain names, SSL certificates, email accounts, VPNs, cloud services, and more. You can also use it to automate tasks such as provisioning, invoicing, suspending, terminating, upgrading, downgrading, renewing, and canceling services. You can also use it to track your income, expenses, taxes, commissions, refunds, credits, and discounts.

              -

              WHMCS helps you automate and streamline your game server hosting operations

              -

              If you are offering game server hosting services to your customers, you need a software that can handle the specific needs and challenges of game server hosting. You need a software that can integrate with popular game server control panels such as TCAdmin, GameCP, Pterodactyl, and Multicraft. You need a software that can support various game server providers such as OVH, Hetzner, AWS, Google Cloud, and Azure. You need a software that can offer your customers a seamless and secure experience from ordering to playing. That's why you need WHMCS for your game server hosting business.

              -

              -

              WHMCS integrates with popular game server control panels and providers

              -

              One of the main advantages of WHMCS is that it integrates with many game server control panels and providers. This means that you can use WHMCS to automatically provision, manage, monitor, and bill your game servers from different platforms and locations. You can also use WHMCS to offer your customers a user-friendly interface to access and control their game servers from your website. For example, you can use WHMCS to:

              -
                -
              • Create and sell game server plans with different features, resources, and prices
              • -
              • Automatically assign game servers to customers after payment confirmation
              • -
              • Allow customers to choose their preferred game server provider and location
              • -
              • Allow customers to install, update, restart, stop, backup, restore, and delete their game servers
              • -
              • Allow customers to configure their game server settings, mods, plugins, maps, and more
              • -
              • Allow customers to view their game server status, statistics, logs, and console
              • -
              • Allow customers to manage their sub-users, FTP accounts, firewall rules, and SSH keys
              • -
              • Automatically suspend or terminate game servers for non-payment or abuse
              • -
              • Automatically generate invoices and send reminders for game server renewals
              • -
              • Automatically apply discounts or credits for game server referrals or loyalty
              • -
              -

              What is a game server WHMCS template and why do you need one?

              -

              A game server WHMCS template is a pre-designed website theme for your WHMCS installation. It is a collection of files that define the layout, style, colors, fonts, images, icons, menus, buttons, forms, and other elements of your website. A game server WHMCS template helps you create a professional and attractive online presence for your game server hosting business. It also helps you showcase your game server features, plans, prices, and reviews.

              -

              A game server WHMCS template is a pre-designed website theme for your WHMCS installation

              -

              A game server WHMCS template is not a standalone website. It is a theme that works with your existing WHMCS installation. You can install a game server WHMCS template on your WHMCS directory and activate it from your WHMCS admin panel. You can also customize the template according to your preferences and needs. You can change the logo, favicon, banner, slider, footer, social media links, and other elements of the template. You can also add or remove pages, sections, widgets, and features from the template. You can also use the template's built-in options panel to adjust the colors, fonts, backgrounds, animations, and other settings of the template.

              -

              A game server WHMCS template helps you create a professional and attractive online presence for your game server hosting business

              -

              A game server WHMCS template is not just a cosmetic enhancement for your website. It is also a strategic tool that can help you boost your online reputation and credibility. A game server WHMCS template can help you create a website that reflects your brand identity and values. It can also help you create a website that appeals to your target audience and market niche. A game server WHMCS template can help you create a website that showcases your expertise and experience in game server hosting. It can also help you create a website that demonstrates your commitment to quality and customer satisfaction.

              -

              A game server WHMCS template can showcase your game server features, plans, prices, and reviews

              -

              A game server WHMCS template can also help you showcase your game server products and services to your potential and existing customers. A game server WHMCS template can help you display your game server features, plans, prices, and reviews in a clear and attractive way. For example, you can use a game server WHMCS template to:

              -
                -
              • Display your supported games, genres, platforms, and regions
              • -
              • Display your game server specifications, resources, and performance
              • -
              • Display your game server pricing, billing cycles, discounts, and coupons
              • -
              • Display your game server testimonials, ratings, feedback, and awards
              • -
              • Display your game server comparison, ranking, and recommendation tools
              • -
              -

              What are the benefits of using a game server WHMCS template nulled php?

              -

              A game server WHMCS template nulled php is a free or low-cost alternative to a premium or licensed template. It is a template that has been modified or cracked to remove the license verification or activation code. It is a template that can be downloaded from various websites or forums that offer nulled scripts and themes. A game server WHMCS template nulled php can offer you some benefits such as saving you time and money on web design and development, and offering you more customization and flexibility options.

              -

              A game server WHMCS template nulled php is a free or low-cost alternative to a premium or licensed template

              -

              One of the main benefits of using a game server WHMCS template nulled php is that it can save you money on web design and development. A premium or licensed game server WHMCS template can cost anywhere from $20 to $200 or more depending on the quality, features, and support of the template. A nulled game server WHMCS template can be downloaded for free or for a nominal fee from various sources online. This can be appealing if you are on a tight budget or if you want to try out different templates before buying one.

              -

              A game server WHMCS template nulled php can save you time and money on web design and development

              -

              Another benefit of using a game server WHMCS template nulled php is that it can save you time on web design and development. A premium or licensed game server WHMCS template may require you to purchase a license for each domain or website that you want to use it on. This can be inconvenient if you have multiple websites or domains for your game server hosting business. A nulled game server WHMCS template can be used on unlimited domains or websites without any restrictions. This can be convenient if you want to launch or expand your game server hosting business quickly and easily.

              -

              A game server WHMCS template nulled php can offer you more customization and flexibility options

              -

              A third benefit of using a game server WHMCS template nulled php is that it can offer you more customization and flexibility options. A premium or licensed game server WHMCS template may have limited options for customization and modification. It may also have encrypted or obfuscated code that prevents you from changing or adding anything to the template. A nulled game server WHMCS template nulled php can have more options for customization and modification. It can also have unencrypted or readable code that allows you to change or add anything to the template. This can be useful if you want to personalize or optimize your website according to your preferences and needs.

              -

              What are the risks of using a game server WHMCS template nulled php?

              -

              While a game server WHMCS template nulled php can offer you some benefits, it can also pose some risks for your website and business. A game server WHMCS template nulled php may contain malware, viruses, or backdoors that can compromise your website security and performance. It may also violate the intellectual property rights of the original template developer or provider. It may also not be compatible with the latest version of WHMCS or other plugins and modules. These risks can result in legal, financial, or reputational damages for your business.

              -

              A game server WHMCS template nulled php may contain malware, viruses, or backdoors that can compromise your website security and performance

              -

              One of the main risks of using a game server WHMCS template nulled php is that it may contain malware, viruses, or backdoors that can compromise your website security and performance. Malware is any software that is designed to harm or disrupt your computer system or network. Viruses are a type of malware that can infect and spread to other files or programs on your computer system or network. Backdoors are a type of malware that can allow unauthorized access to your computer system or network. These malicious codes can be hidden in the game server WHMCS template nulled php files or embedded in the links or images of the template. They can be activated when you install, activate, or use the template on your website. They can cause various problems such as:

              -
                -
              • Stealing your personal or business information such as passwords, credit card numbers, bank accounts, customer data, etc.
              • -
              • Redirecting your website traffic to malicious or spam websites
              • -
              • Injecting unwanted ads, pop-ups, banners, or links on your website
              • -
              • Slowing down your website loading speed or crashing your website
              • -
              • Corrupting or deleting your website files or database
              • -
              • Spreading malware, viruses, or backdoors to other websites or devices connected to your network
              • -
              -

              A game server WHMCS template nulled php may violate the intellectual property rights of the original template developer or provider

              -

              Another risk of using a game server WHMCS template nulled php is that it may violate the intellectual property rights of the original template developer or provider. Intellectual property rights are the legal rights that protect the creations of the mind such as inventions, designs, works of art, logos, names, etc. They include patents, trademarks, copyrights, and trade secrets. They give the owner of the intellectual property the exclusive right to use, reproduce, distribute, modify, or sell their creation. They also give the owner of the intellectual property the right to take legal action against anyone who infringes their rights.

              -

              A game server WHMCS template nulled php may violate the intellectual property rights of the original template developer or provider by using their creation without their permission or paying them any royalties or fees. This can be considered as piracy, theft, or fraud. This can expose you to legal consequences such as:

              -
                -
              • Lawsuits or claims for damages or injunctions from the original template developer or provider
              • -
              • Fines or penalties from the authorities or courts for violating intellectual property laws
              • -
              • Loss of reputation or credibility as a game server hosting business
              • -
              • Loss of customers or revenue due to negative reviews or feedback
              • -
              -

              A game server WHMCS template nulled php may not be compatible with the latest version of WHMCS or other plugins and modules

              -

              A third risk of using a game server WHMCS template nulled php is that it may not be compatible with the latest version of WHMCS or other plugins and modules that you use on your website. WHMCS is constantly updated and improved to fix bugs, add features, enhance security, and comply with industry standards and regulations. Plugins and modules are also updated and improved to work with the latest version of WHMCS and other software. A game server WHMCS template nulled php may not be updated or improved to match the changes and requirements of WHMCS and other plugins and modules. This can cause various problems such as:

              -
                -
              • Broken or missing features or functions on your website
              • -
              • Conflicts or errors with other plugins or modules on your website
              • -
              • Vulnerabilities or loopholes that can be exploited by hackers or attackers
              • -
              • Incompatibility or non-compliance with industry standards and regulations
              • -
              • Loss of support or assistance from the original template developer or provider
              • -
              -

              How to choose the best game server WHMCS template nulled php for your business?

              -

              If you decide to use a game server WHMCS template nulled php for your website, you need to be careful and cautious in choosing the best one for your business. You need to check the source and reputation of the game server WHMCS template nulled php provider or developer, scan the game server WHMCS template nulled php files for any malicious code or hidden links, and test the game server WHMCS template nulled php on a local or staging environment before installing it on your live website.

              -

              Check the source and reputation of the game server WHMCS template nulled php provider or developer

              -

              The first step in choosing the best game server WHMCS template nulled php for your business is to check the source and reputation of the game server WHMCS template nulled php provider or developer. You need to make sure that you are downloading the game server WHMCS template nulled php from a reliable and trustworthy website or forum that offers nulled scripts and themes. You need to avoid websites or forums that are known for distributing malware, viruses, or backdoors in their nulled scripts and themes. You need to also avoid websites or forums that are known for violating intellectual property rights in their nulled scripts and themes.

              -

              You can check the source and reputation of the game server WHMCS template nulled php provider or developer by doing some research online. You can look for reviews, ratings, feedback, testimonials, awards, certifications, accreditations, memberships, affiliations, endorsements, partnerships, collaborations, sponsorships, donations, contributions, recognitions, achievements, honors, awards, or other indicators of credibility, quality, and trustworthiness of the game server WHMCS template nulled php provider or developer. You can also look for complaints, reports, warnings, alerts, notices, lawsuits, claims, fines, penalties, sanctions, bans, suspensions, revocations, or other indicators of unreliability, untrustworthiness, and illegality of the game server WHMCS template nulled php provider or developer. You can also look for recommendations, referrals, suggestions, advice, tips, guides, tutorials, or other sources of information and assistance from other game server hosting businesses or experts who have used or are familiar with the game server WHMCS template nulled php provider or developer.

              -

              Scan the game server WHMCS template nulled php files for any malicious code or hidden links

              -

              The second step in choosing the best game server WHMCS template nulled php for your business is to scan the game server WHMCS template nulled php files for any malicious code or hidden links. You need to make sure that the game server WHMCS template nulled php files are clean and safe to use on your website. You need to avoid game server WHMCS template nulled php files that contain malware, viruses, or backdoors that can compromise your website security and performance. You need to also avoid game server WHMCS template nulled php files that contain hidden links or redirects that can harm your website ranking and reputation.

              -

              You can scan the game server WHMCS template nulled php files for any malicious code or hidden links by using some tools and techniques online. You can use antivirus or anti-malware software to scan the game server WHMCS template nulled php files for any malware, viruses, or backdoors. You can also use online scanners such as VirusTotal, Sucuri SiteCheck, Quttera Web Malware Scanner, or ScanURL to scan the game server WHMCS template nulled php files for any malware, viruses, or backdoors. You can also use code editors such as Notepad++, Sublime Text, Atom, Visual Studio Code, or Brackets to open and inspect the game server WHMCS template nulled php files for any hidden links or redirects. You can also use online tools such as Link Checker, Broken Link Checker, Dr. Link Check, or W3C Link Checker to check the game server WHMCS template nulled php files for any broken or malicious links.

              -

              Test the game server WHMCS template nulled php on a local or staging environment before installing it on your live website

              -

              The third step in choosing the best game server WHMCS template nulled php for your business is to test the game server WHMCS template nulled php on a local or staging environment before installing it on your live website. You need to make sure that the game server WHMCS template nulled php works properly and smoothly on your website. You need to avoid game server WHMCS template nulled php that cause errors or conflicts with your website functionality or appearance. You need to also avoid game server WHMCS template nulled php that are incompatible or non-compliant with your website standards or regulations.

              -

              You can test the game server WHMCS template nulled php on a local or staging environment by using some tools and techniques online. You can use local servers such as XAMPP, WAMP, MAMP, or LAMP to create a local server on your computer and install WHMCS and the game server WHMCS template nulled php on it. You can also use staging servers such as WP Staging, WP Stagecoach, or BlogVault to create a staging site on your web server and install WHMCS and the game server WHMCS template nulled php on it. You can then test the game server WHMCS template nulled php on your local or staging site and check for any issues or problems. You can also use tools such as BrowserStack, LambdaTest, or CrossBrowserTesting to test the game server WHMCS template nulled php on different browsers and devices and check for any compatibility or responsiveness issues.

              -

              Conclusion

              -

              A game server WHMCS template nulled php can be a tempting option for your game server hosting business website. It can offer you some benefits such as saving you time and money on web design and development, and offering you more customization and flexibility options. However, it can also pose some risks for your website and business such as containing malware, viruses, or backdoors, violating intellectual property rights, and being incompatible or non-compliant with the latest version of WHMCS or other plugins and modules. Therefore, you need to be careful and cautious in choosing the best game server WHMCS template nulled php for your business. You need to check the source and reputation of the game server WHMCS template nulled php provider or developer, scan the game server WHMCS template nulled php files for any malicious code or hidden links, and test the game server WHMCS template nulled php on a local or staging environment before installing it on your live website.

              -

              FAQs

              -

              Here are some frequently asked questions about game server WHMCS template nulled php:

              -

              What is the difference between a game server WHMCS template nulled php and a premium or licensed game server WHMCS template?

              -

              A game server WHMCS template nulled php is a free or low-cost alternative to a premium or licensed game server WHMCS template. It is a game server WHMCS template that has been modified or cracked to remove the license verification or activation code. A premium or licensed game server WHMCS template is a game server WHMCS template that has been developed or provided by a reputable and trustworthy company or individual. It is a game server WHMCS template that requires you to purchase a license or pay a fee to use it on your website.

              -

              Where can I find a game server WHMCS template nulled php?

              -

              You can find a game server WHMCS template nulled php from various websites or forums that offer nulled scripts and themes. However, you need to be careful and cautious in choosing the source and reputation of the game server WHMCS template nulled php provider or developer. You need to avoid websites or forums that are known for distributing malware, viruses, or backdoors in their nulled scripts and themes. You need to also avoid websites or forums that are known for violating intellectual property rights in their nulled scripts and themes.

              -

              How can I install a game server WHMCS template nulled php on my website?

              -

              You can install a game server WHMCS template nulled php on your website by following these steps:

              -
                -
              1. Download the game server WHMCS template nulled php files from a reliable and trustworthy source
              2. -
              3. Scan the game server WHMCS template nulled php files for any malicious code or hidden links
              4. -
              5. Test the game server WHMCS template nulled php on a local or staging environment
              6. -
              7. Upload the game server WHMCS template nulled php files to your WHMCS directory on your web server
              8. -
              9. Log in to your WHMCS admin panel and go to Setup > General Settings > General > Template
              10. -
              11. Select the game server WHMCS template nulled php from the dropdown menu and click Save Changes
              12. -
              13. Refresh your website and enjoy your new game server WHMCS template nulled php
              14. -
              -

              How can I customize a game server WHMCS template nulled php?

              -

              You can customize a game server WHMCS template nulled php by using some tools and techniques online. You can use code editors such as Notepad++, Sublime Text, Atom, Visual Studio Code, or Brackets to open and edit the game server WHMCS template nulled php files for any changes or additions that you want to make. You can also use the game server WHMCS template nulled php's built-in options panel to adjust the colors, fonts, backgrounds, animations, and other settings of the template. You can also use plugins or modules such as WHMCS Customizer, WHMCS Designer, WHMCS Styler, or WHMCS Theme Changer to customize the game server WHMCS template nulled php without touching the code.

              -

              How can I update a game server WHMCS template nulled php?

              -

              You can update a game server WHMCS template nulled php by following these steps:

              -
                -
              1. Check the source and reputation of the game server WHMCS template nulled php provider or developer for any updates or new versions of the template
              2. -
              3. Download the updated or new version of the game server WHMCS template nulled php files from a reliable and trustworthy source
              4. -
              5. Scan the updated or new version of the game server WHMCS template nulled php files for any malicious code or hidden links
              6. -
              7. Test the updated or new version of the game server WHMCS template nulled php on a local or staging environment
              8. -
              9. Backup your existing game server WHMCS template nulled php files and database on your web server
              10. -
              11. Replace your existing game server WHMCS template nulled php files with the updated or new version of the game server WHMCS template nulled php files on your web server
              12. -
              13. Refresh your website and check for any issues or problems with the updated or new version of the game server WHMCS template nulled php
              14. -

              b2dd77e56b
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/MATLAB For Control Engineers Pdf.md b/spaces/tioseFevbu/cartoon-converter/scripts/MATLAB For Control Engineers Pdf.md deleted file mode 100644 index eca7ef6bb1d66f0211bf54d56b80a2425253782c..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/MATLAB For Control Engineers Pdf.md +++ /dev/null @@ -1,39 +0,0 @@ - -

              How to Learn MATLAB for Control Engineering

              -

              MATLAB is a powerful software tool for numerical computation, data analysis, visualization, and programming. It is widely used by engineers, scientists, and students in various fields, especially in control engineering. Control engineering is the discipline that deals with the design, analysis, and implementation of systems that regulate the behavior of other systems. Control engineers use MATLAB to model, simulate, test, and optimize control systems using various techniques and tools.

              -

              One of the advantages of MATLAB is that it has a rich collection of built-in functions and toolboxes that can help control engineers with their tasks. For example, the Control System Toolbox provides functions and apps for analyzing and designing feedback systems. The Simulink platform enables graphical modeling and simulation of dynamic systems. The Fuzzy Logic Toolbox allows creating and manipulating fuzzy inference systems for handling uncertainty and imprecision. The System Identification Toolbox helps with estimating dynamic models from measured data.

              -

              {MATLAB for Control Engineers pdf}


              Downloadhttps://urlcod.com/2uHxEb



              -

              If you want to learn MATLAB for control engineering, you can start by reading some books that cover the basics of MATLAB and control systems. For instance, you can check out MATLAB for Control Engineers by Katsuhiko Ogata[^1^], which introduces MATLAB and its applications to control systems analysis and design. Another book is Control Engineering with Matlab by Noorulden Basil[^2^], which explains how to use MATLAB for fuzzy logic, system identification, and Simulink modeling. A third book is Analysis and Design of Control Systems Using MATLAB by Rao V. Dukkipati[^3^], which covers various topics such as state-space methods, frequency-domain methods, robust control, optimal control, and nonlinear control.

              -

              Besides reading books, you can also learn MATLAB for control engineering by taking online courses or watching video tutorials. For example, you can enroll in the Introduction to Control System Design - A First Look course on Coursera, which teaches how to use MATLAB and Simulink to design controllers for linear systems. You can also watch the MATLAB Tutorial for Beginners playlist on YouTube, which covers the basics of MATLAB programming and plotting.

              -

              Learning MATLAB for control engineering can be fun and rewarding. It can help you solve complex problems, create innovative solutions, and enhance your skills and knowledge. MATLAB is a versatile tool that can support you throughout your control engineering projects.

              - -

              In this section, we will show you some examples of how to use MATLAB for control engineering. We will use the Control System Toolbox and Simulink to analyze and design a simple feedback system that controls the speed of a DC motor. We will also use the Fuzzy Logic Toolbox to create a fuzzy controller that can handle nonlinearities and uncertainties in the system.

              -

              Example 1: Speed Control of a DC Motor

              -

              A DC motor is a device that converts electrical energy into mechanical energy. The speed of a DC motor depends on the voltage applied to its terminals and the load torque. A feedback system can be used to regulate the speed of a DC motor by adjusting the voltage according to the difference between the desired speed and the actual speed.

              -

              The following figure shows a block diagram of a feedback system for speed control of a DC motor. The system consists of four components: a DC motor, a tachometer, a controller, and a power amplifier. The DC motor has an armature resistance R, an armature inductance L, a back emf constant Ke, and a torque constant Kt. The tachometer measures the angular velocity of the motor shaft and produces a voltage proportional to it. The controller is a proportional-integral (PI) controller that computes the error between the reference speed and the measured speed and generates a control signal u. The power amplifier amplifies the control signal and applies it to the motor terminals.

              -Block diagram of speed control of DC motor -

              We can use MATLAB to model, simulate, and design this feedback system. First, we need to derive the transfer function of the DC motor. The transfer function is the ratio of the output (angular velocity) to the input (voltage) of a linear system. We can use Kirchhoff's laws and Newton's laws to obtain the following differential equation that relates the input voltage Va and the output angular velocity ω:

              -

              L dVa/dt + R Va = Ke ω + Kt TL

              -

              -

              J dω/dt + b ω = Kt Va - TL

              -

              where TL is the load torque, J is the moment of inertia of the rotor, and b is the viscous friction coefficient. Assuming that TL is constant, we can eliminate Va from these equations and obtain:

              -

              (LJ) dω/dt + (bL + RJ) dω/dt + (bR + KeKt) ω = Kt(V - KeTL/Kt)

              -

              Taking the Laplace transform of both sides and rearranging, we get:

              -

              ω(s)/V(s) = Kt/[(LJ)s + (bL + RJ)s + (bR + KeKt) - KeTL/Kt]

              -

              This is the transfer function of the DC motor. We can define it in MATLAB using the tf function:

              - -```matlab -% Define parameters -R = 1; % armature resistance (ohm) -L = 0.5; % armature inductance (H) -Ke = 0.01; % back emf constant (V.s/rad) -Kt = 0.01; % torque constant (N.m/A) -J = 0.01; % moment of inertia (kg.m^2) -b = 0.1; % viscous friction coefficient (N.m.s/rad) -TL = 0.01; % load torque (N.m) - -% Define transfer function -num = Kt; % numerator -den = [L*J b*L+R*J b*R+

              e93f5a0c3f
              -
              -
              \ No newline at end of file diff --git a/spaces/tom-beer/birds-israel/bird_app.py b/spaces/tom-beer/birds-israel/bird_app.py deleted file mode 100644 index d63ca507b674d6cfe7f09f4fb0a7d577337f40e0..0000000000000000000000000000000000000000 --- a/spaces/tom-beer/birds-israel/bird_app.py +++ /dev/null @@ -1,30 +0,0 @@ -import json -import numpy as np -import onnxruntime -from PIL import Image - - -class BirdApp: - def __init__(self): - self.onnx_session = onnxruntime.InferenceSession("model4app.onnx") - self.img_class_map = get_img_class_map() - - def predict(self, x): - input_tensor = transform_image(x) - onnx_inputs = {self.onnx_session.get_inputs()[0].name: input_tensor} - img_label = self.onnx_session.run(None, onnx_inputs)[0].argmax() - return {'class_id': int(img_label), 'class_name': self.img_class_map[str(img_label)]} - - -def transform_image(infile) -> np.array: - image = (Image - .open(infile) - .resize((224, 224)) - ) - return np.expand_dims(np.array(image, dtype=np.float32), 0).transpose([0, 3, 1, 2]) - - -def get_img_class_map(): - with open('index_to_name.json') as f: - img_class_map = json.load(f) - return img_class_map diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/nas_fpn/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/nas_fpn/README.md deleted file mode 100644 index 6a52eadb1908ce29ddb0d5dc58a2743b8322dd6c..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/nas_fpn/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection - -## Introduction - - - -```latex -@inproceedings{ghiasi2019fpn, - title={Nas-fpn: Learning scalable feature pyramid architecture for object detection}, - author={Ghiasi, Golnaz and Lin, Tsung-Yi and Le, Quoc V}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={7036--7045}, - year={2019} -} -``` - -## Results and Models - -We benchmark the new training schedule (crop training, large batch, unfrozen BN, 50 epochs) introduced in NAS-FPN. RetinaNet is used in the paper. - -| Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -|:-----------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:| -| R-50-FPN | 50e | 12.9 | 22.9 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco-9b953d76.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco_20200529_095329.log.json) | -| R-50-NASFPN | 50e | 13.2 | 23.0 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco_20200528_230008.log.json) | - -**Note**: We find that it is unstable to train NAS-FPN and there is a small chance that results can be 3% mAP lower. diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pisa/pisa_ssd300_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pisa/pisa_ssd300_coco.py deleted file mode 100644 index b5cc006477eacaa9ab40d463312dc2156a59d634..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pisa/pisa_ssd300_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../ssd/ssd300_coco.py' - -model = dict( - bbox_head=dict(type='PISASSDHead'), - train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) - -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/scnet/scnet_r50_fpn_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/scnet/scnet_r50_fpn_1x_coco.py deleted file mode 100644 index e4215a6d2d0b90f8ccd9c1291f6ca222c0ff554f..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/scnet/scnet_r50_fpn_1x_coco.py +++ /dev/null @@ -1,136 +0,0 @@ -_base_ = '../htc/htc_r50_fpn_1x_coco.py' -# model settings -model = dict( - type='SCNet', - roi_head=dict( - _delete_=True, - type='SCNetRoIHead', - num_stages=3, - stage_loss_weights=[1, 0.5, 0.25], - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='SCNetBBoxHead', - num_shared_fcs=2, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='SCNetBBoxHead', - num_shared_fcs=2, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='SCNetBBoxHead', - num_shared_fcs=2, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ], - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='SCNetMaskHead', - num_convs=12, - in_channels=256, - conv_out_channels=256, - num_classes=80, - conv_to_res=True, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), - semantic_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[8]), - semantic_head=dict( - type='SCNetSemanticHead', - num_ins=5, - fusion_level=1, - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=183, - ignore_label=255, - loss_weight=0.2, - conv_to_res=True), - glbctx_head=dict( - type='GlobalContextHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_weight=3.0, - conv_to_res=True), - feat_relay_head=dict( - type='FeatureRelayHead', - in_channels=1024, - out_conv_channels=256, - roi_feat_size=7, - scale_factor=2))) - -# uncomment below code to enable test time augmentations -# img_norm_cfg = dict( -# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -# test_pipeline = [ -# dict(type='LoadImageFromFile'), -# dict( -# type='MultiScaleFlipAug', -# img_scale=[(600, 900), (800, 1200), (1000, 1500), (1200, 1800), -# (1400, 2100)], -# flip=True, -# transforms=[ -# dict(type='Resize', keep_ratio=True), -# dict(type='RandomFlip', flip_ratio=0.5), -# dict(type='Normalize', **img_norm_cfg), -# dict(type='Pad', size_divisor=32), -# dict(type='ImageToTensor', keys=['img']), -# dict(type='Collect', keys=['img']), -# ]) -# ] -# data = dict( -# val=dict(pipeline=test_pipeline), -# test=dict(pipeline=test_pipeline)) diff --git a/spaces/tomofi/NDLOCR/src/separate_pages_ssd/training/make_pkl_for_page.py b/spaces/tomofi/NDLOCR/src/separate_pages_ssd/training/make_pkl_for_page.py deleted file mode 100644 index 55ca435aa312544cf6d684c70efb1cb50cf086a5..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/separate_pages_ssd/training/make_pkl_for_page.py +++ /dev/null @@ -1,40 +0,0 @@ -import numpy as np -import os -import glob -import pandas as pd -import cv2 -from xml.etree import ElementTree -import random -import shutil -st=set() -random.seed(77) - -#各画像のレイアウトの情報(今回はのど元の矩形)を、画像の左上を(0,0),右下を(1,1)となるように表した座標をpklファイルに保存する。 -#pklファイルはディクショナリをダンプしたもので、キーがファイル名、バリューは矩形の座標(左上xyと右下xy)の配列とラベルの配列を持つ。 - -class CSV_preprocessor(object): - - def __init__(self): - self.num_classes = 1 - self.data = dict() - self._preprocess_CSV() - def _preprocess_CSV(self): - df=pd.read_table("image.tsv",names=('filename',"roll")) - for index, row in df.iterrows(): - filename=row["filename"] - xminp=(0.5 - row["roll"])-0.01#のど元の中心から左右に画像幅の1%ずつ広げた、短冊状の矩形を見つけたい領域とする。 - xmaxp = xminp + 0.02 - yminp=0 - ymaxp=1 - bounding_box = [xminp, yminp, xmaxp, ymaxp] - bounding_boxes_np = np.asarray([bounding_box]) - image_data = np.hstack((bounding_boxes_np, [[1]])) - self.data[filename] = image_data - -## example on how to use it -import pickle -data = CSV_preprocessor().data -pickle.dump(data,open('page_layout.pkl','wb')) -f = open('page_layout.pkl', 'rb') - - diff --git a/spaces/tovaru/vits-for-ba/data_utils.py b/spaces/tovaru/vits-for-ba/data_utils.py deleted file mode 100644 index 00d165f2fc2fd4338b88af178abd4f2c3241e98d..0000000000000000000000000000000000000000 --- a/spaces/tovaru/vits-for-ba/data_utils.py +++ /dev/null @@ -1,394 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import commons -from mel_processing import spectrogram_torch -from utils import load_wav_to_torch, load_filepaths_and_text -from text import text_to_sequence, cleaned_text_to_sequence - - -class TextAudioLoader(torch.utils.data.Dataset): - """ - 1) loads audio, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - def __init__(self, audiopaths_and_text, hparams): - self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) - self.text_cleaners = hparams.text_cleaners - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 190) - - random.seed(1234) - random.shuffle(self.audiopaths_and_text) - self._filter() - - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_and_text_new = [] - lengths = [] - for audiopath, text in self.audiopaths_and_text: - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_and_text_new.append([audiopath, text]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - self.audiopaths_and_text = audiopaths_and_text_new - self.lengths = lengths - - def get_audio_text_pair(self, audiopath_and_text): - # separate filename and text - audiopath, text = audiopath_and_text[0], audiopath_and_text[1] - text = self.get_text(text) - spec, wav = self.get_audio(audiopath) - return (text, spec, wav) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text): - if self.cleaned_text: - text_norm = cleaned_text_to_sequence(text) - else: - text_norm = text_to_sequence(text, self.text_cleaners) - if self.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - - def __getitem__(self, index): - return self.get_audio_text_pair(self.audiopaths_and_text[index]) - - def __len__(self): - return len(self.audiopaths_and_text) - - -class TextAudioCollate(): - """ Zero-pads model inputs and targets - """ - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text and aduio - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - if self.return_ids: - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths - - -"""Multi speaker version""" -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - def __init__(self, audiopaths_sid_text, hparams): - self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) - self.text_cleaners = hparams.text_cleaners - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 190) - - random.seed(1234) - random.shuffle(self.audiopaths_sid_text) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_sid_text_new = [] - lengths = [] - for audiopath, sid, text in self.audiopaths_sid_text: - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_sid_text_new.append([audiopath, sid, text]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - self.audiopaths_sid_text = audiopaths_sid_text_new - self.lengths = lengths - - def get_audio_text_speaker_pair(self, audiopath_sid_text): - # separate filename, speaker_id and text - audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2] - text = self.get_text(text) - spec, wav = self.get_audio(audiopath) - sid = self.get_sid(sid) - return (text, spec, wav, sid) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text): - if self.cleaned_text: - text_norm = cleaned_text_to_sequence(text) - else: - text_norm = text_to_sequence(text, self.text_cleaners) - if self.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def __getitem__(self, index): - return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) - - def __len__(self): - return len(self.audiopaths_sid_text) - - -class TextAudioSpeakerCollate(): - """ Zero-pads model inputs and targets - """ - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text, audio and speaker identities - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized, sid] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - sid = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - sid[i] = row[3] - - if self.return_ids: - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid - - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, 0, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i+1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - if len_bucket == 0: - continue - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - - # subsample - ids_bucket = ids_bucket[self.rank::self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid+1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size diff --git a/spaces/triggah61/chingu-music/README.md b/spaces/triggah61/chingu-music/README.md deleted file mode 100644 index e516cbdf16f75dc036b677bdee61d62e8c5b39dd..0000000000000000000000000000000000000000 --- a/spaces/triggah61/chingu-music/README.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -title: "MusicGen" -python_version: "3.9" -tags: - - "music generation" - - "language models" - - "LLMs" -app_file: "app.py" -emoji: 🎵 -colorFrom: white -colorTo: blue -sdk: gradio -sdk_version: 3.34.0 -pinned: true -license: "cc-by-nc-4.0" ---- -# Audiocraft -![docs badge](https://github.com/facebookresearch/audiocraft/workflows/audiocraft_docs/badge.svg) -![linter badge](https://github.com/facebookresearch/audiocraft/workflows/audiocraft_linter/badge.svg) -![tests badge](https://github.com/facebookresearch/audiocraft/workflows/audiocraft_tests/badge.svg) - -Audiocraft is a PyTorch library for deep learning research on audio generation. At the moment, it contains the code for MusicGen, a state-of-the-art controllable text-to-music model. - -## MusicGen - -Audiocraft provides the code and models for MusicGen, [a simple and controllable model for music generation][arxiv]. MusicGen is a single stage auto-regressive -Transformer model trained over a 32kHz EnCodec tokenizer with 4 codebooks sampled at 50 Hz. Unlike existing methods like [MusicLM](https://arxiv.org/abs/2301.11325), MusicGen doesn't require a self-supervised semantic representation, and it generates -all 4 codebooks in one pass. By introducing a small delay between the codebooks, we show we can predict -them in parallel, thus having only 50 auto-regressive steps per second of audio. -Check out our [sample page][musicgen_samples] or test the available demo! - - - Open In Colab - - - Open in HugginFace - -
              - -We use 20K hours of licensed music to train MusicGen. Specifically, we rely on an internal dataset of 10K high-quality music tracks, and on the ShutterStock and Pond5 music data. - -## Installation -Audiocraft requires Python 3.9, PyTorch 2.0.0, and a GPU with at least 16 GB of memory (for the medium-sized model). To install Audiocraft, you can run the following: - -```shell -# Best to make sure you have torch installed first, in particular before installing xformers. -# Don't run this if you already have PyTorch installed. -pip install 'torch>=2.0' -# Then proceed to one of the following -pip install -U audiocraft # stable release -pip install -U git+https://git@github.com/facebookresearch/audiocraft#egg=audiocraft # bleeding edge -pip install -e . # or if you cloned the repo locally -``` - -## Usage -We offer a number of way to interact with MusicGen: -1. A demo is also available on the [`facebook/MusicGen` HuggingFace Space](https://huggingface.co/spaces/facebook/MusicGen) (huge thanks to all the HF team for their support). -2. You can run the Gradio demo in Colab: [colab notebook](https://colab.research.google.com/drive/1fxGqfg96RBUvGxZ1XXN07s3DthrKUl4-?usp=sharing). -3. You can use the gradio demo locally by running `python app.py`. -4. You can play with MusicGen by running the jupyter notebook at [`demo.ipynb`](./demo.ipynb) locally (if you have a GPU). -5. Finally, checkout [@camenduru Colab page](https://github.com/camenduru/MusicGen-colab) which is regularly - updated with contributions from @camenduru and the community. - -## API - -We provide a simple API and 4 pre-trained models. The pre trained models are: -- `small`: 300M model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-small) -- `medium`: 1.5B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-medium) -- `melody`: 1.5B model, text to music and text+melody to music - [🤗 Hub](https://huggingface.co/facebook/musicgen-melody) -- `large`: 3.3B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-large) - -We observe the best trade-off between quality and compute with the `medium` or `melody` model. -In order to use MusicGen locally **you must have a GPU**. We recommend 16GB of memory, but smaller -GPUs will be able to generate short sequences, or longer sequences with the `small` model. - -**Note**: Please make sure to have [ffmpeg](https://ffmpeg.org/download.html) installed when using newer version of `torchaudio`. -You can install it with: -``` -apt-get install ffmpeg -``` - -See after a quick example for using the API. - -```python -import torchaudio -from audiocraft.models import MusicGen -from audiocraft.data.audio import audio_write - -model = MusicGen.get_pretrained('melody') -model.set_generation_params(duration=8) # generate 8 seconds. -wav = model.generate_unconditional(4) # generates 4 unconditional audio samples -descriptions = ['happy rock', 'energetic EDM', 'sad jazz'] -wav = model.generate(descriptions) # generates 3 samples. - -melody, sr = torchaudio.load('./assets/bach.mp3') -# generates using the melody from the given audio and the provided descriptions. -wav = model.generate_with_chroma(descriptions, melody[None].expand(3, -1, -1), sr) - -for idx, one_wav in enumerate(wav): - # Will save under {idx}.wav, with loudness normalization at -14 db LUFS. - audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True) -``` - - -## Model Card - -See [the model card page](./MODEL_CARD.md). - -## FAQ - -#### Will the training code be released? - -Yes. We will soon release the training code for MusicGen and EnCodec. - - -#### I need help on Windows - -@FurkanGozukara made a complete tutorial for [Audiocraft/MusicGen on Windows](https://youtu.be/v-YpvPkhdO4) - -#### I need help for running the demo on Colab - -Check [@camenduru tutorial on Youtube](https://www.youtube.com/watch?v=EGfxuTy9Eeo). - - -## Citation -``` -@article{copet2023simple, - title={Simple and Controllable Music Generation}, - author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez}, - year={2023}, - journal={arXiv preprint arXiv:2306.05284}, -} -``` - -## License -* The code in this repository is released under the MIT license as found in the [LICENSE file](LICENSE). -* The weights in this repository are released under the CC-BY-NC 4.0 license as found in the [LICENSE_weights file](LICENSE_weights). - -[arxiv]: https://arxiv.org/abs/2306.05284 -[musicgen_samples]: https://ai.honu.io/papers/musicgen/ -title: Audio -emoji: 🏆 -colorFrom: blue -colorTo: purple -title: Audio -emoji: 📈 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.35.2 -title: Chingu Music -emoji: 🐨 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -title: Grokgpt -emoji: 📈 -colorFrom: yellow -colorTo: red -sdk: docker -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/trttung1610/musicgen/audiocraft/modules/seanet.py b/spaces/trttung1610/musicgen/audiocraft/modules/seanet.py deleted file mode 100644 index 3e5998e9153afb6e68ea410d565e00ea835db248..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/audiocraft/modules/seanet.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -import numpy as np -import torch.nn as nn - -from .conv import StreamableConv1d, StreamableConvTranspose1d -from .lstm import StreamableLSTM - - -class SEANetResnetBlock(nn.Module): - """Residual block from SEANet model. - - Args: - dim (int): Dimension of the input/output. - kernel_sizes (list): List of kernel sizes for the convolutions. - dilations (list): List of dilations for the convolutions. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - norm (str): Normalization method. - norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. - causal (bool): Whether to use fully causal convolution. - pad_mode (str): Padding mode for the convolutions. - compress (int): Reduced dimensionality in residual branches (from Demucs v3). - true_skip (bool): Whether to use true skip connection or a simple - (streamable) convolution as the skip connection. - """ - def __init__(self, dim: int, kernel_sizes: tp.List[int] = [3, 1], dilations: tp.List[int] = [1, 1], - activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, - norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, causal: bool = False, - pad_mode: str = 'reflect', compress: int = 2, true_skip: bool = True): - super().__init__() - assert len(kernel_sizes) == len(dilations), 'Number of kernel sizes should match number of dilations' - act = getattr(nn, activation) - hidden = dim // compress - block = [] - for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)): - in_chs = dim if i == 0 else hidden - out_chs = dim if i == len(kernel_sizes) - 1 else hidden - block += [ - act(**activation_params), - StreamableConv1d(in_chs, out_chs, kernel_size=kernel_size, dilation=dilation, - norm=norm, norm_kwargs=norm_params, - causal=causal, pad_mode=pad_mode), - ] - self.block = nn.Sequential(*block) - self.shortcut: nn.Module - if true_skip: - self.shortcut = nn.Identity() - else: - self.shortcut = StreamableConv1d(dim, dim, kernel_size=1, norm=norm, norm_kwargs=norm_params, - causal=causal, pad_mode=pad_mode) - - def forward(self, x): - return self.shortcut(x) + self.block(x) - - -class SEANetEncoder(nn.Module): - """SEANet encoder. - - Args: - channels (int): Audio channels. - dimension (int): Intermediate representation dimension. - n_filters (int): Base width for the model. - n_residual_layers (int): nb of residual layers. - ratios (Sequence[int]): kernel size and stride ratios. The encoder uses downsampling ratios instead of - upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here - that must match the decoder order. We use the decoder order as some models may only employ the decoder. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - norm (str): Normalization method. - norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. - kernel_size (int): Kernel size for the initial convolution. - last_kernel_size (int): Kernel size for the initial convolution. - residual_kernel_size (int): Kernel size for the residual layers. - dilation_base (int): How much to increase the dilation with each layer. - causal (bool): Whether to use fully causal convolution. - pad_mode (str): Padding mode for the convolutions. - true_skip (bool): Whether to use true skip connection or a simple - (streamable) convolution as the skip connection in the residual network blocks. - compress (int): Reduced dimensionality in residual branches (from Demucs v3). - lstm (int): Number of LSTM layers at the end of the encoder. - disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm. - For the encoder, it corresponds to the N first blocks. - """ - def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3, - ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, - norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7, - last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False, - pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0, - disable_norm_outer_blocks: int = 0): - super().__init__() - self.channels = channels - self.dimension = dimension - self.n_filters = n_filters - self.ratios = list(reversed(ratios)) - del ratios - self.n_residual_layers = n_residual_layers - self.hop_length = np.prod(self.ratios) - self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks - self.disable_norm_outer_blocks = disable_norm_outer_blocks - assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \ - "Number of blocks for which to disable norm is invalid." \ - "It should be lower or equal to the actual number of blocks in the network and greater or equal to 0." - - act = getattr(nn, activation) - mult = 1 - model: tp.List[nn.Module] = [ - StreamableConv1d(channels, mult * n_filters, kernel_size, - norm='none' if self.disable_norm_outer_blocks >= 1 else norm, - norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) - ] - # Downsample to raw audio scale - for i, ratio in enumerate(self.ratios): - block_norm = 'none' if self.disable_norm_outer_blocks >= i + 2 else norm - # Add residual layers - for j in range(n_residual_layers): - model += [ - SEANetResnetBlock(mult * n_filters, kernel_sizes=[residual_kernel_size, 1], - dilations=[dilation_base ** j, 1], - norm=block_norm, norm_params=norm_params, - activation=activation, activation_params=activation_params, - causal=causal, pad_mode=pad_mode, compress=compress, true_skip=true_skip)] - - # Add downsampling layers - model += [ - act(**activation_params), - StreamableConv1d(mult * n_filters, mult * n_filters * 2, - kernel_size=ratio * 2, stride=ratio, - norm=block_norm, norm_kwargs=norm_params, - causal=causal, pad_mode=pad_mode), - ] - mult *= 2 - - if lstm: - model += [StreamableLSTM(mult * n_filters, num_layers=lstm)] - - model += [ - act(**activation_params), - StreamableConv1d(mult * n_filters, dimension, last_kernel_size, - norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm, - norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) - ] - - self.model = nn.Sequential(*model) - - def forward(self, x): - return self.model(x) - - -class SEANetDecoder(nn.Module): - """SEANet decoder. - - Args: - channels (int): Audio channels. - dimension (int): Intermediate representation dimension. - n_filters (int): Base width for the model. - n_residual_layers (int): nb of residual layers. - ratios (Sequence[int]): kernel size and stride ratios. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - final_activation (str): Final activation function after all convolutions. - final_activation_params (dict): Parameters to provide to the activation function. - norm (str): Normalization method. - norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. - kernel_size (int): Kernel size for the initial convolution. - last_kernel_size (int): Kernel size for the initial convolution. - residual_kernel_size (int): Kernel size for the residual layers. - dilation_base (int): How much to increase the dilation with each layer. - causal (bool): Whether to use fully causal convolution. - pad_mode (str): Padding mode for the convolutions. - true_skip (bool): Whether to use true skip connection or a simple. - (streamable) convolution as the skip connection in the residual network blocks. - compress (int): Reduced dimensionality in residual branches (from Demucs v3). - lstm (int): Number of LSTM layers at the end of the encoder. - disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm. - For the decoder, it corresponds to the N last blocks. - trim_right_ratio (float): Ratio for trimming at the right of the transposed convolution under the causal setup. - If equal to 1.0, it means that all the trimming is done at the right. - """ - def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3, - ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, - final_activation: tp.Optional[str] = None, final_activation_params: tp.Optional[dict] = None, - norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7, - last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False, - pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0, - disable_norm_outer_blocks: int = 0, trim_right_ratio: float = 1.0): - super().__init__() - self.dimension = dimension - self.channels = channels - self.n_filters = n_filters - self.ratios = ratios - del ratios - self.n_residual_layers = n_residual_layers - self.hop_length = np.prod(self.ratios) - self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks - self.disable_norm_outer_blocks = disable_norm_outer_blocks - assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \ - "Number of blocks for which to disable norm is invalid." \ - "It should be lower or equal to the actual number of blocks in the network and greater or equal to 0." - - act = getattr(nn, activation) - mult = int(2 ** len(self.ratios)) - model: tp.List[nn.Module] = [ - StreamableConv1d(dimension, mult * n_filters, kernel_size, - norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm, - norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) - ] - - if lstm: - model += [StreamableLSTM(mult * n_filters, num_layers=lstm)] - - # Upsample to raw audio scale - for i, ratio in enumerate(self.ratios): - block_norm = 'none' if self.disable_norm_outer_blocks >= self.n_blocks - (i + 1) else norm - # Add upsampling layers - model += [ - act(**activation_params), - StreamableConvTranspose1d(mult * n_filters, mult * n_filters // 2, - kernel_size=ratio * 2, stride=ratio, - norm=block_norm, norm_kwargs=norm_params, - causal=causal, trim_right_ratio=trim_right_ratio), - ] - # Add residual layers - for j in range(n_residual_layers): - model += [ - SEANetResnetBlock(mult * n_filters // 2, kernel_sizes=[residual_kernel_size, 1], - dilations=[dilation_base ** j, 1], - activation=activation, activation_params=activation_params, - norm=block_norm, norm_params=norm_params, causal=causal, - pad_mode=pad_mode, compress=compress, true_skip=true_skip)] - - mult //= 2 - - # Add final layers - model += [ - act(**activation_params), - StreamableConv1d(n_filters, channels, last_kernel_size, - norm='none' if self.disable_norm_outer_blocks >= 1 else norm, - norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) - ] - # Add optional final activation to decoder (eg. tanh) - if final_activation is not None: - final_act = getattr(nn, final_activation) - final_activation_params = final_activation_params or {} - model += [ - final_act(**final_activation_params) - ] - self.model = nn.Sequential(*model) - - def forward(self, z): - y = self.model(z) - return y diff --git a/spaces/trungtruc/segment_clothes/app.py b/spaces/trungtruc/segment_clothes/app.py deleted file mode 100644 index a61a80d2afb644d55e6f3da4b5e4dd0c2a125ed9..0000000000000000000000000000000000000000 --- a/spaces/trungtruc/segment_clothes/app.py +++ /dev/null @@ -1,81 +0,0 @@ -import gradio as gr -from inference import load_seg_model, get_palette, generate_mask -import cv2 -import numpy as np -import os - -device = 'cpu' - - -def algorithm_improve_seg(image, mode_expansion, mode_smoothing, factor_kernel: int = 5, smoothing: bool = True, - factor_smooth: int = 5): - # image = cv2.imread(image) - assert image is not None, "file could not be read, check with os.path.exists()" - kernel = np.ones((factor_kernel, factor_kernel), np.uint8) - - remove_noise_bg_img = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel) - - output = remove_noise_bg_img - - if mode_expansion == "erosion": - output = cv2.erode(remove_noise_bg_img, kernel, iterations=1) - elif mode_expansion == "dilate": - output = cv2.dilate(remove_noise_bg_img, kernel, iterations=1) - else: - pass - if smoothing is True: - if mode_smoothing == "blur": - output = cv2.blur(output, (factor_smooth, factor_smooth)) - elif mode_smoothing == "gaussian blur": - output = cv2.GaussianBlur(output, (factor_smooth, factor_smooth), 0) - elif mode_smoothing == "median blur": - output = cv2.medianBlur(output, 5) - else: - pass - return output - - -def initialize_and_load_models(): - checkpoint_path = 'model/unet_cloth_seg.pth' - net = load_seg_model(checkpoint_path, device=device) - return net - - -net = initialize_and_load_models() -palette = get_palette(4) - - -def run(image, mode_expansion, mode_smoothing, factor_kernel: int = 5, smoothing: bool = True, factor_smooth: int = 5): - cloth_seg = generate_mask(image, net=net, palette=palette, device=device) - output = algorithm_improve_seg(cloth_seg, mode_expansion=mode_expansion, mode_smoothing=mode_smoothing, - factor_kernel=factor_kernel, - smoothing=smoothing, factor_smooth=factor_smooth) - return output - - -# -----------------DEMO USE INTERFACE GRADIO --------------------------------------------------# -# input_image = gr.Image(source='upload', type="numpy") -input_image = gr.inputs.Image(label="Input Image", type="pil") -with gr.Accordion("Advanced options", open=False): - mode_expansion = gr.Dropdown(["erode", "dilate"], label="Expansion", - info="dilation or erosion of clothes segmentation.", value="dilate") - factor_kernel = gr.Slider(label="Control factor strength", minimum=1, maximum=6, value=5, step=1) - mode_smoothing = gr.Dropdown(["blur", "gaussian blur", "median blur"], label="Smoothing", - info=" smoothing image and remove anti-aliaing.", value="gaussian blur") - smoothing = gr.Checkbox(label="Smoothing mode", value=True) - factor_smooth = gr.Slider(label="Control factor smooth", minimum=1, maximum=7, value=5, step=1) - -result_gallery = gr.outputs.Image(label="Cloth Segmentation", type="pil") - -ips = [input_image, mode_expansion, mode_smoothing, factor_kernel, smoothing, factor_smooth] -outputs = [result_gallery] - -title = "Demo for Garment Segmentation" -description = "

              This is demo for clothes segmentation and used algorithm remove noise and anti-aliasing to improve quality. Make by Trung Truc

              " - -gr.Interface(fn=run, inputs=ips, outputs=outputs, title=title, description=description, examples=[ - [os.path.join(os.path.dirname(__file__), "input/img1.jpg")], - [os.path.join(os.path.dirname(__file__), "input/img2.jpg")], - [os.path.join(os.path.dirname(__file__), "input/img3.jpg")], - [os.path.join(os.path.dirname(__file__), "input/img4.jpeg")], -]).launch(share=True) diff --git a/spaces/ulysses115/diffsvc_test/network/vocoders/__init__.py b/spaces/ulysses115/diffsvc_test/network/vocoders/__init__.py deleted file mode 100644 index 6631bafa406a3e3add4903f3e7a11957d416a78f..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/diffsvc_test/network/vocoders/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from network.vocoders import hifigan -from network.vocoders import nsf_hifigan diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Assassins Creed 3 Tyranny Of King Washington Uplay Crack Extra Quality.md b/spaces/usbethFlerru/sovits-modelsV2/example/Assassins Creed 3 Tyranny Of King Washington Uplay Crack Extra Quality.md deleted file mode 100644 index 28ca96ed7bf7b296904db98f4aba481826cf3067..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Assassins Creed 3 Tyranny Of King Washington Uplay Crack Extra Quality.md +++ /dev/null @@ -1,6 +0,0 @@ -

              assassins creed 3 tyranny of king washington uplay crack


              Download ✓✓✓ https://urlcod.com/2uyWr0



              -
              -Download Assassins Creed III Tyranny of King Washington Free Game Full ... Assassin`s Creed 3 - Tyranny Of King Washington Keygen & Crack (Free Download) ... Ubisoft said that when Rockstar's Red Dead Redemption released midway ... 1fdad05405
              -
              -
              -

              diff --git a/spaces/user238921933/stable-diffusion-webui/style.css b/spaces/user238921933/stable-diffusion-webui/style.css deleted file mode 100644 index 8f58bc9d0cba1f6ec371f211f531530245bf367e..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/style.css +++ /dev/null @@ -1,961 +0,0 @@ -.container { - max-width: 100%; -} - -.token-counter{ - position: absolute; - display: inline-block; - right: 2em; - min-width: 0 !important; - width: auto; - z-index: 100; -} - -.token-counter.error span{ - box-shadow: 0 0 0.0 0.3em rgba(255,0,0,0.15), inset 0 0 0.6em rgba(255,0,0,0.075); - border: 2px solid rgba(255,0,0,0.4) !important; -} - -.token-counter div{ - display: inline; -} - -.token-counter span{ - padding: 0.1em 0.75em; -} - -#sh{ - min-width: 2em; - min-height: 2em; - max-width: 2em; - max-height: 2em; - flex-grow: 0; - padding-left: 0.25em; - padding-right: 0.25em; - margin: 0.1em 0; - opacity: 0%; - cursor: default; -} - -.output-html p {margin: 0 0.5em;} - -.row > *, -.row > .gr-form > * { - min-width: min(120px, 100%); - flex: 1 1 0%; -} - -.performance { - font-size: 0.85em; - color: #444; -} - -.performance p{ - display: inline-block; -} - -.performance .time { - margin-right: 0; -} - -.performance .vram { -} - -#txt2img_generate, #img2img_generate { - min-height: 4.5em; -} - -@media screen and (min-width: 2500px) { - #txt2img_gallery, #img2img_gallery { - min-height: 768px; - } -} - -#txt2img_gallery img, #img2img_gallery img{ - object-fit: scale-down; -} -#txt2img_actions_column, #img2img_actions_column { - margin: 0.35rem 0.75rem 0.35rem 0; -} -#script_list { - padding: .625rem .75rem 0 .625rem; -} -.justify-center.overflow-x-scroll { - justify-content: left; -} - -.justify-center.overflow-x-scroll button:first-of-type { - margin-left: auto; -} - -.justify-center.overflow-x-scroll button:last-of-type { - margin-right: auto; -} - -[id$=_random_seed], [id$=_random_subseed], [id$=_reuse_seed], [id$=_reuse_subseed], #open_folder{ - min-width: 2.3em; - height: 2.5em; - flex-grow: 0; - padding-left: 0.25em; - padding-right: 0.25em; -} - -#hidden_element{ - display: none; -} - -[id$=_seed_row], [id$=_subseed_row]{ - gap: 0.5rem; - padding: 0.6em; -} - -[id$=_subseed_show_box]{ - min-width: auto; - flex-grow: 0; -} - -[id$=_subseed_show_box] > div{ - border: 0; - height: 100%; -} - -[id$=_subseed_show]{ - min-width: auto; - flex-grow: 0; - padding: 0; -} - -[id$=_subseed_show] label{ - height: 100%; -} - -#txt2img_actions_column, #img2img_actions_column{ - gap: 0; - margin-right: .75rem; -} - -#txt2img_tools, #img2img_tools{ - gap: 0.4em; -} - -#interrogate_col{ - min-width: 0 !important; - max-width: 8em !important; - margin-right: 1em; - gap: 0; -} -#interrogate, #deepbooru{ - margin: 0em 0.25em 0.5em 0.25em; - min-width: 8em; - max-width: 8em; -} - -#style_pos_col, #style_neg_col{ - min-width: 8em !important; -} - -#txt2img_styles_row, #img2img_styles_row{ - gap: 0.25em; - margin-top: 0.3em; -} - -#txt2img_styles_row > button, #img2img_styles_row > button{ - margin: 0; -} - -#txt2img_styles, #img2img_styles{ - padding: 0; -} - -#txt2img_styles > label > div, #img2img_styles > label > div{ - min-height: 3.2em; -} - -ul.list-none{ - max-height: 35em; - z-index: 2000; -} - -.gr-form{ - background: transparent; -} - -.my-4{ - margin-top: 0; - margin-bottom: 0; -} - -#resize_mode{ - flex: 1.5; -} - -button{ - align-self: stretch !important; -} - -.overflow-hidden, .gr-panel{ - overflow: visible !important; -} - -#x_type, #y_type{ - max-width: 10em; -} - -#txt2img_preview, #img2img_preview, #ti_preview{ - position: absolute; - width: 320px; - left: 0; - right: 0; - margin-left: auto; - margin-right: auto; - margin-top: 34px; - z-index: 100; - border: none; - border-top-left-radius: 0; - border-top-right-radius: 0; -} - -@media screen and (min-width: 768px) { - #txt2img_preview, #img2img_preview, #ti_preview { - position: absolute; - } -} - -@media screen and (max-width: 767px) { - #txt2img_preview, #img2img_preview, #ti_preview { - position: relative; - } -} - -#txt2img_preview div.left-0.top-0, #img2img_preview div.left-0.top-0, #ti_preview div.left-0.top-0{ - display: none; -} - -fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block span{ - position: absolute; - top: -0.7em; - line-height: 1.2em; - padding: 0; - margin: 0 0.5em; - - background-color: white; - box-shadow: 6px 0 6px 0px white, -6px 0 6px 0px white; - - z-index: 300; -} - -.dark fieldset span.text-gray-500, .dark .gr-block.gr-box span.text-gray-500, .dark label.block span{ - background-color: rgb(31, 41, 55); - box-shadow: none; - border: 1px solid rgba(128, 128, 128, 0.1); - border-radius: 6px; - padding: 0.1em 0.5em; -} - -#txt2img_column_batch, #img2img_column_batch{ - min-width: min(13.5em, 100%) !important; -} - -#settings fieldset span.text-gray-500, #settings .gr-block.gr-box span.text-gray-500, #settings label.block span{ - position: relative; - border: none; - margin-right: 8em; -} - -#settings .gr-panel div.flex-col div.justify-between div{ - position: relative; - z-index: 200; -} - -#settings{ - display: block; -} - -#settings > div{ - border: none; - margin-left: 10em; -} - -#settings > div.flex-wrap{ - float: left; - display: block; - margin-left: 0; - width: 10em; -} - -#settings > div.flex-wrap button{ - display: block; - border: none; - text-align: left; -} - -#settings_result{ - height: 1.4em; - margin: 0 1.2em; -} - -input[type="range"]{ - margin: 0.5em 0 -0.3em 0; -} - -#mask_bug_info { - text-align: center; - display: block; - margin-top: -0.75em; - margin-bottom: -0.75em; -} - -#txt2img_negative_prompt, #img2img_negative_prompt{ -} - -/* gradio 3.8 adds opacity to progressbar which makes it blink; disable it here */ -.transition.opacity-20 { - opacity: 1 !important; -} - -/* more gradio's garbage cleanup */ -.min-h-\[4rem\] { min-height: unset !important; } -.min-h-\[6rem\] { min-height: unset !important; } - -.progressDiv{ - position: relative; - height: 20px; - background: #b4c0cc; - border-radius: 3px !important; - margin-bottom: -3px; -} - -.dark .progressDiv{ - background: #424c5b; -} - -.progressDiv .progress{ - width: 0%; - height: 20px; - background: #0060df; - color: white; - font-weight: bold; - line-height: 20px; - padding: 0 8px 0 0; - text-align: right; - border-radius: 3px; - overflow: visible; - white-space: nowrap; - padding: 0 0.5em; -} - -.livePreview{ - position: absolute; - z-index: 300; - background-color: white; - margin: -4px; -} - -.dark .livePreview{ - background-color: rgb(17 24 39 / var(--tw-bg-opacity)); -} - -.livePreview img{ - position: absolute; - object-fit: contain; - width: 100%; - height: 100%; -} - -#lightboxModal{ - display: none; - position: fixed; - z-index: 1001; - padding-top: 100px; - left: 0; - top: 0; - width: 100%; - height: 100%; - overflow: auto; - background-color: rgba(20, 20, 20, 0.95); - user-select: none; - -webkit-user-select: none; -} - -.modalControls { - display: grid; - grid-template-columns: 32px 32px 32px 1fr 32px; - grid-template-areas: "zoom tile save space close"; - position: absolute; - top: 0; - left: 0; - right: 0; - padding: 16px; - gap: 16px; - background-color: rgba(0,0,0,0.2); -} - -.modalClose { - grid-area: close; -} - -.modalZoom { - grid-area: zoom; -} - -.modalSave { - grid-area: save; -} - -.modalTileImage { - grid-area: tile; -} - -.modalClose, -.modalZoom, -.modalTileImage { - color: white; - font-size: 35px; - font-weight: bold; - cursor: pointer; -} - -.modalSave { - color: white; - font-size: 28px; - margin-top: 8px; - font-weight: bold; - cursor: pointer; -} - -.modalClose:hover, -.modalClose:focus, -.modalSave:hover, -.modalSave:focus, -.modalZoom:hover, -.modalZoom:focus { - color: #999; - text-decoration: none; - cursor: pointer; -} - -#modalImage { - display: block; - margin-left: auto; - margin-right: auto; - margin-top: auto; - width: auto; -} - -.modalImageFullscreen { - object-fit: contain; - height: 90%; -} - -.modalPrev, -.modalNext { - cursor: pointer; - position: absolute; - top: 50%; - width: auto; - padding: 16px; - margin-top: -50px; - color: white; - font-weight: bold; - font-size: 20px; - transition: 0.6s ease; - border-radius: 0 3px 3px 0; - user-select: none; - -webkit-user-select: none; -} - -.modalNext { - right: 0; - border-radius: 3px 0 0 3px; -} - -.modalPrev:hover, -.modalNext:hover { - background-color: rgba(0, 0, 0, 0.8); -} - -#imageARPreview{ - position:absolute; - top:0px; - left:0px; - border:2px solid red; - background:rgba(255, 0, 0, 0.3); - z-index: 900; - pointer-events:none; - display:none -} - -#txt2img_generate_box, #img2img_generate_box{ - position: relative; -} - -#txt2img_interrupt, #img2img_interrupt, #txt2img_skip, #img2img_skip{ - position: absolute; - width: 50%; - height: 100%; - background: #b4c0cc; - display: none; -} - -#txt2img_interrupt, #img2img_interrupt{ - left: 0; - border-radius: 0.5rem 0 0 0.5rem; -} -#txt2img_skip, #img2img_skip{ - right: 0; - border-radius: 0 0.5rem 0.5rem 0; -} - -.red { - color: red; -} - -.gallery-item { - --tw-bg-opacity: 0 !important; -} - -#context-menu{ - z-index:9999; - position:absolute; - display:block; - padding:0px 0; - border:2px solid #a55000; - border-radius:8px; - box-shadow:1px 1px 2px #CE6400; - width: 200px; -} - -.context-menu-items{ - list-style: none; - margin: 0; - padding: 0; -} - -.context-menu-items a{ - display:block; - padding:5px; - cursor:pointer; -} - -.context-menu-items a:hover{ - background: #a55000; -} - -#quicksettings { - width: fit-content; -} - -#quicksettings > div, #quicksettings > fieldset{ - max-width: 24em; - min-width: 24em; - padding: 0; - border: none; - box-shadow: none; - background: none; - margin-right: 10px; -} - -#quicksettings > div > div > div > label > span { - position: relative; - margin-right: 9em; - margin-bottom: -1em; -} - -canvas[key="mask"] { - z-index: 12 !important; - filter: invert(); - mix-blend-mode: multiply; - pointer-events: none; -} - - -/* gradio 3.4.1 stuff for editable scrollbar values */ -.gr-box > div > div > input.gr-text-input{ - position: absolute; - right: 0.5em; - top: -0.6em; - z-index: 400; - width: 6em; -} -#quicksettings .gr-box > div > div > input.gr-text-input { - top: -1.12em; -} - -.row.gr-compact{ - overflow: visible; -} - -#img2img_image, #img2img_image > .h-60, #img2img_image > .h-60 > div, #img2img_image > .h-60 > div > img, -#img2img_sketch, #img2img_sketch > .h-60, #img2img_sketch > .h-60 > div, #img2img_sketch > .h-60 > div > img, -#img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h-60 > div > img, -#inpaint_sketch, #inpaint_sketch > .h-60, #inpaint_sketch > .h-60 > div, #inpaint_sketch > .h-60 > div > img -{ - height: 480px !important; - max-height: 480px !important; - min-height: 480px !important; -} - -/* Extensions */ - -#tab_extensions table{ - border-collapse: collapse; -} - -#tab_extensions table td, #tab_extensions table th{ - border: 1px solid #ccc; - padding: 0.25em 0.5em; -} - -#tab_extensions table input[type="checkbox"]{ - margin-right: 0.5em; -} - -#tab_extensions button{ - max-width: 16em; -} - -#tab_extensions input[disabled="disabled"]{ - opacity: 0.5; -} - -.extension-tag{ - font-weight: bold; - font-size: 95%; -} - -#available_extensions .info{ - margin: 0; -} - -#available_extensions .date_added{ - opacity: 0.85; - font-size: 90%; -} - -#image_buttons_txt2img button, #image_buttons_img2img button, #image_buttons_extras button{ - min-width: auto; - padding-left: 0.5em; - padding-right: 0.5em; -} - -.gr-form{ - background-color: white; -} - -.dark .gr-form{ - background-color: rgb(31 41 55 / var(--tw-bg-opacity)); -} - -.gr-button-tool, .gr-button-tool-top{ - max-width: 2.5em; - min-width: 2.5em !important; - height: 2.4em; -} - -.gr-button-tool{ - margin: 0.6em 0em 0.55em 0; -} - -.gr-button-tool-top, #settings .gr-button-tool{ - margin: 1.6em 0.7em 0.55em 0; -} - - -#modelmerger_results_container{ - margin-top: 1em; - overflow: visible; -} - -#modelmerger_models{ - gap: 0; -} - - -#quicksettings .gr-button-tool{ - margin: 0; - border-color: unset; - background-color: unset; -} - -#modelmerger_interp_description>p { - margin: 0!important; - text-align: center; -} -#modelmerger_interp_description { - margin: 0.35rem 0.75rem 1.23rem; -} -#img2img_settings > div.gr-form, #txt2img_settings > div.gr-form { - padding-top: 0.9em; - padding-bottom: 0.9em; -} -#txt2img_settings { - padding-top: 1.16em; - padding-bottom: 0.9em; -} -#img2img_settings { - padding-bottom: 0.9em; -} - -#img2img_settings div.gr-form .gr-form, #txt2img_settings div.gr-form .gr-form, #train_tabs div.gr-form .gr-form{ - border: none; - padding-bottom: 0.5em; -} - -footer { - display: none !important; -} - -#footer{ - text-align: center; -} - -#footer div{ - display: inline-block; -} - -#footer .versions{ - font-size: 85%; - opacity: 0.85; -} - -#txtimg_hr_finalres{ - min-height: 0 !important; - padding: .625rem .75rem; - margin-left: -0.75em - -} - -#txtimg_hr_finalres .resolution{ - font-weight: bold; -} - -#txt2img_checkboxes, #img2img_checkboxes{ - margin-bottom: 0.5em; - margin-left: 0em; -} -#txt2img_checkboxes > div, #img2img_checkboxes > div{ - flex: 0; - white-space: nowrap; - min-width: auto; -} - -#img2img_copy_to_img2img, #img2img_copy_to_sketch, #img2img_copy_to_inpaint, #img2img_copy_to_inpaint_sketch{ - margin-left: 0em; -} - -#axis_options { - margin-left: 0em; -} - -.inactive{ - opacity: 0.5; -} - -[id*='_prompt_container']{ - gap: 0; -} - -[id*='_prompt_container'] > div{ - margin: -0.4em 0 0 0; -} - -.gr-compact { - border: none; -} - -.dark .gr-compact{ - background-color: rgb(31 41 55 / var(--tw-bg-opacity)); - margin-left: 0; -} - -.gr-compact{ - overflow: visible; -} - -.gr-compact > *{ -} - -.gr-compact .gr-block, .gr-compact .gr-form{ - border: none; - box-shadow: none; -} - -.gr-compact .gr-box{ - border-radius: .5rem !important; - border-width: 1px !important; -} - -#mode_img2img > div > div{ - gap: 0 !important; -} - -[id*='img2img_copy_to_'] { - border: none; -} - -[id*='img2img_copy_to_'] > button { -} - -[id*='img2img_label_copy_to_'] { - font-size: 1.0em; - font-weight: bold; - text-align: center; - line-height: 2.4em; -} - -.extra-networks > div > [id *= '_extra_']{ - margin: 0.3em; -} - -.extra-network-subdirs{ - padding: 0.2em 0.35em; -} - -.extra-network-subdirs button{ - margin: 0 0.15em; -} - -#txt2img_extra_networks .search, #img2img_extra_networks .search{ - display: inline-block; - max-width: 16em; - margin: 0.3em; - align-self: center; -} - -#txt2img_extra_view, #img2img_extra_view { - width: auto; -} - -.extra-network-cards .nocards, .extra-network-thumbs .nocards{ - margin: 1.25em 0.5em 0.5em 0.5em; -} - -.extra-network-cards .nocards h1, .extra-network-thumbs .nocards h1{ - font-size: 1.5em; - margin-bottom: 1em; -} - -.extra-network-cards .nocards li, .extra-network-thumbs .nocards li{ - margin-left: 0.5em; -} - -.extra-network-thumbs { - display: flex; - flex-flow: row wrap; - gap: 10px; -} - -.extra-network-thumbs .card { - height: 6em; - width: 6em; - cursor: pointer; - background-image: url('./file=html/card-no-preview.png'); - background-size: cover; - background-position: center center; - position: relative; -} - -.extra-network-thumbs .card:hover .additional a { - display: block; -} - -.extra-network-thumbs .actions .additional a { - background-image: url('./file=html/image-update.svg'); - background-repeat: no-repeat; - background-size: cover; - background-position: center center; - position: absolute; - top: 0; - left: 0; - width: 24px; - height: 24px; - display: none; - font-size: 0; - text-align: -9999; -} - -.extra-network-thumbs .actions .name { - position: absolute; - bottom: 0; - font-size: 10px; - padding: 3px; - width: 100%; - overflow: hidden; - white-space: nowrap; - text-overflow: ellipsis; - background: rgba(0,0,0,.5); - color: white; -} - -.extra-network-thumbs .card:hover .actions .name { - white-space: normal; - word-break: break-all; -} - -.extra-network-cards .card{ - display: inline-block; - margin: 0.5em; - width: 16em; - height: 24em; - box-shadow: 0 0 5px rgba(128, 128, 128, 0.5); - border-radius: 0.2em; - position: relative; - - background-size: auto 100%; - background-position: center; - overflow: hidden; - cursor: pointer; - - background-image: url('./file=html/card-no-preview.png') -} - -.extra-network-cards .card:hover{ - box-shadow: 0 0 2px 0.3em rgba(0, 128, 255, 0.35); -} - -.extra-network-cards .card .actions .additional{ - display: none; -} - -.extra-network-cards .card .actions{ - position: absolute; - bottom: 0; - left: 0; - right: 0; - padding: 0.5em; - color: white; - background: rgba(0,0,0,0.5); - box-shadow: 0 0 0.25em 0.25em rgba(0,0,0,0.5); - text-shadow: 0 0 0.2em black; -} - -.extra-network-cards .card .actions:hover{ - box-shadow: 0 0 0.75em 0.75em rgba(0,0,0,0.5) !important; -} - -.extra-network-cards .card .actions .name{ - font-size: 1.7em; - font-weight: bold; - line-break: anywhere; -} - -.extra-network-cards .card .actions:hover .additional{ - display: block; -} - -.extra-network-cards .card ul{ - margin: 0.25em 0 0.75em 0.25em; - cursor: unset; -} - -.extra-network-cards .card ul a{ - cursor: pointer; -} - -.extra-network-cards .card ul a:hover{ - color: red; -} - -[id*='_prompt_container'] > div { - margin: 0!important; -} diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/fp16_utils.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/fp16_utils.py deleted file mode 100644 index 1981011d6859192e3e663e29d13500d56ba47f6c..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/fp16_utils.py +++ /dev/null @@ -1,410 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import functools -import warnings -from collections import abc -from inspect import getfullargspec - -import numpy as np -import torch -import torch.nn as nn - -from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version -from .dist_utils import allreduce_grads as _allreduce_grads - -try: - # If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported - # and used; otherwise, auto fp16 will adopt mmcv's implementation. - # Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16 - # manually, so the behavior may not be consistent with real amp. - from torch.cuda.amp import autocast -except ImportError: - pass - - -def cast_tensor_type(inputs, src_type, dst_type): - """Recursively convert Tensor in inputs from src_type to dst_type. - - Args: - inputs: Inputs that to be casted. - src_type (torch.dtype): Source type.. - dst_type (torch.dtype): Destination type. - - Returns: - The same type with inputs, but all contained Tensors have been cast. - """ - if isinstance(inputs, nn.Module): - return inputs - elif isinstance(inputs, torch.Tensor): - return inputs.to(dst_type) - elif isinstance(inputs, str): - return inputs - elif isinstance(inputs, np.ndarray): - return inputs - elif isinstance(inputs, abc.Mapping): - return type(inputs)({ - k: cast_tensor_type(v, src_type, dst_type) - for k, v in inputs.items() - }) - elif isinstance(inputs, abc.Iterable): - return type(inputs)( - cast_tensor_type(item, src_type, dst_type) for item in inputs) - else: - return inputs - - -def auto_fp16(apply_to=None, out_fp32=False): - """Decorator to enable fp16 training automatically. - - This decorator is useful when you write custom modules and want to support - mixed precision training. If inputs arguments are fp32 tensors, they will - be converted to fp16 automatically. Arguments other than fp32 tensors are - ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the - backend, otherwise, original mmcv implementation will be adopted. - - Args: - apply_to (Iterable, optional): The argument names to be converted. - `None` indicates all arguments. - out_fp32 (bool): Whether to convert the output back to fp32. - - Example: - - >>> import torch.nn as nn - >>> class MyModule1(nn.Module): - >>> - >>> # Convert x and y to fp16 - >>> @auto_fp16() - >>> def forward(self, x, y): - >>> pass - - >>> import torch.nn as nn - >>> class MyModule2(nn.Module): - >>> - >>> # convert pred to fp16 - >>> @auto_fp16(apply_to=('pred', )) - >>> def do_something(self, pred, others): - >>> pass - """ - - def auto_fp16_wrapper(old_func): - - @functools.wraps(old_func) - def new_func(*args, **kwargs): - # check if the module has set the attribute `fp16_enabled`, if not, - # just fallback to the original method. - if not isinstance(args[0], torch.nn.Module): - raise TypeError('@auto_fp16 can only be used to decorate the ' - 'method of nn.Module') - if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): - return old_func(*args, **kwargs) - - # get the arg spec of the decorated method - args_info = getfullargspec(old_func) - # get the argument names to be casted - args_to_cast = args_info.args if apply_to is None else apply_to - # convert the args that need to be processed - new_args = [] - # NOTE: default args are not taken into consideration - if args: - arg_names = args_info.args[:len(args)] - for i, arg_name in enumerate(arg_names): - if arg_name in args_to_cast: - new_args.append( - cast_tensor_type(args[i], torch.float, torch.half)) - else: - new_args.append(args[i]) - # convert the kwargs that need to be processed - new_kwargs = {} - if kwargs: - for arg_name, arg_value in kwargs.items(): - if arg_name in args_to_cast: - new_kwargs[arg_name] = cast_tensor_type( - arg_value, torch.float, torch.half) - else: - new_kwargs[arg_name] = arg_value - # apply converted arguments to the decorated method - if (TORCH_VERSION != 'parrots' and - digit_version(TORCH_VERSION) >= digit_version('1.6.0')): - with autocast(enabled=True): - output = old_func(*new_args, **new_kwargs) - else: - output = old_func(*new_args, **new_kwargs) - # cast the results back to fp32 if necessary - if out_fp32: - output = cast_tensor_type(output, torch.half, torch.float) - return output - - return new_func - - return auto_fp16_wrapper - - -def force_fp32(apply_to=None, out_fp16=False): - """Decorator to convert input arguments to fp32 in force. - - This decorator is useful when you write custom modules and want to support - mixed precision training. If there are some inputs that must be processed - in fp32 mode, then this decorator can handle it. If inputs arguments are - fp16 tensors, they will be converted to fp32 automatically. Arguments other - than fp16 tensors are ignored. If you are using PyTorch >= 1.6, - torch.cuda.amp is used as the backend, otherwise, original mmcv - implementation will be adopted. - - Args: - apply_to (Iterable, optional): The argument names to be converted. - `None` indicates all arguments. - out_fp16 (bool): Whether to convert the output back to fp16. - - Example: - - >>> import torch.nn as nn - >>> class MyModule1(nn.Module): - >>> - >>> # Convert x and y to fp32 - >>> @force_fp32() - >>> def loss(self, x, y): - >>> pass - - >>> import torch.nn as nn - >>> class MyModule2(nn.Module): - >>> - >>> # convert pred to fp32 - >>> @force_fp32(apply_to=('pred', )) - >>> def post_process(self, pred, others): - >>> pass - """ - - def force_fp32_wrapper(old_func): - - @functools.wraps(old_func) - def new_func(*args, **kwargs): - # check if the module has set the attribute `fp16_enabled`, if not, - # just fallback to the original method. - if not isinstance(args[0], torch.nn.Module): - raise TypeError('@force_fp32 can only be used to decorate the ' - 'method of nn.Module') - if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): - return old_func(*args, **kwargs) - # get the arg spec of the decorated method - args_info = getfullargspec(old_func) - # get the argument names to be casted - args_to_cast = args_info.args if apply_to is None else apply_to - # convert the args that need to be processed - new_args = [] - if args: - arg_names = args_info.args[:len(args)] - for i, arg_name in enumerate(arg_names): - if arg_name in args_to_cast: - new_args.append( - cast_tensor_type(args[i], torch.half, torch.float)) - else: - new_args.append(args[i]) - # convert the kwargs that need to be processed - new_kwargs = dict() - if kwargs: - for arg_name, arg_value in kwargs.items(): - if arg_name in args_to_cast: - new_kwargs[arg_name] = cast_tensor_type( - arg_value, torch.half, torch.float) - else: - new_kwargs[arg_name] = arg_value - # apply converted arguments to the decorated method - if (TORCH_VERSION != 'parrots' and - digit_version(TORCH_VERSION) >= digit_version('1.6.0')): - with autocast(enabled=False): - output = old_func(*new_args, **new_kwargs) - else: - output = old_func(*new_args, **new_kwargs) - # cast the results back to fp32 if necessary - if out_fp16: - output = cast_tensor_type(output, torch.float, torch.half) - return output - - return new_func - - return force_fp32_wrapper - - -def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): - warnings.warning( - '"mmcv.runner.fp16_utils.allreduce_grads" is deprecated, and will be ' - 'removed in v2.8. Please switch to "mmcv.runner.allreduce_grads') - _allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb) - - -def wrap_fp16_model(model): - """Wrap the FP32 model to FP16. - - If you are using PyTorch >= 1.6, torch.cuda.amp is used as the - backend, otherwise, original mmcv implementation will be adopted. - - For PyTorch >= 1.6, this function will - 1. Set fp16 flag inside the model to True. - - Otherwise: - 1. Convert FP32 model to FP16. - 2. Remain some necessary layers to be FP32, e.g., normalization layers. - 3. Set `fp16_enabled` flag inside the model to True. - - Args: - model (nn.Module): Model in FP32. - """ - if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.6.0')): - # convert model to fp16 - model.half() - # patch the normalization layers to make it work in fp32 mode - patch_norm_fp32(model) - # set `fp16_enabled` flag - for m in model.modules(): - if hasattr(m, 'fp16_enabled'): - m.fp16_enabled = True - - -def patch_norm_fp32(module): - """Recursively convert normalization layers from FP16 to FP32. - - Args: - module (nn.Module): The modules to be converted in FP16. - - Returns: - nn.Module: The converted module, the normalization layers have been - converted to FP32. - """ - if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)): - module.float() - if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3': - module.forward = patch_forward_method(module.forward, torch.half, - torch.float) - for child in module.children(): - patch_norm_fp32(child) - return module - - -def patch_forward_method(func, src_type, dst_type, convert_output=True): - """Patch the forward method of a module. - - Args: - func (callable): The original forward method. - src_type (torch.dtype): Type of input arguments to be converted from. - dst_type (torch.dtype): Type of input arguments to be converted to. - convert_output (bool): Whether to convert the output back to src_type. - - Returns: - callable: The patched forward method. - """ - - def new_forward(*args, **kwargs): - output = func(*cast_tensor_type(args, src_type, dst_type), - **cast_tensor_type(kwargs, src_type, dst_type)) - if convert_output: - output = cast_tensor_type(output, dst_type, src_type) - return output - - return new_forward - - -class LossScaler: - """Class that manages loss scaling in mixed precision training which - supports both dynamic or static mode. - - The implementation refers to - https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py. - Indirectly, by supplying ``mode='dynamic'`` for dynamic loss scaling. - It's important to understand how :class:`LossScaler` operates. - Loss scaling is designed to combat the problem of underflowing - gradients encountered at long times when training fp16 networks. - Dynamic loss scaling begins by attempting a very high loss - scale. Ironically, this may result in OVERflowing gradients. - If overflowing gradients are encountered, :class:`FP16_Optimizer` then - skips the update step for this particular iteration/minibatch, - and :class:`LossScaler` adjusts the loss scale to a lower value. - If a certain number of iterations occur without overflowing gradients - detected,:class:`LossScaler` increases the loss scale once more. - In this way :class:`LossScaler` attempts to "ride the edge" of always - using the highest loss scale possible without incurring overflow. - - Args: - init_scale (float): Initial loss scale value, default: 2**32. - scale_factor (float): Factor used when adjusting the loss scale. - Default: 2. - mode (str): Loss scaling mode. 'dynamic' or 'static' - scale_window (int): Number of consecutive iterations without an - overflow to wait before increasing the loss scale. Default: 1000. - """ - - def __init__(self, - init_scale=2**32, - mode='dynamic', - scale_factor=2., - scale_window=1000): - self.cur_scale = init_scale - self.cur_iter = 0 - assert mode in ('dynamic', - 'static'), 'mode can only be dynamic or static' - self.mode = mode - self.last_overflow_iter = -1 - self.scale_factor = scale_factor - self.scale_window = scale_window - - def has_overflow(self, params): - """Check if params contain overflow.""" - if self.mode != 'dynamic': - return False - for p in params: - if p.grad is not None and LossScaler._has_inf_or_nan(p.grad.data): - return True - return False - - def _has_inf_or_nan(x): - """Check if params contain NaN.""" - try: - cpu_sum = float(x.float().sum()) - except RuntimeError as instance: - if 'value cannot be converted' not in instance.args[0]: - raise - return True - else: - if cpu_sum == float('inf') or cpu_sum == -float('inf') \ - or cpu_sum != cpu_sum: - return True - return False - - def update_scale(self, overflow): - """update the current loss scale value when overflow happens.""" - if self.mode != 'dynamic': - return - if overflow: - self.cur_scale = max(self.cur_scale / self.scale_factor, 1) - self.last_overflow_iter = self.cur_iter - else: - if (self.cur_iter - self.last_overflow_iter) % \ - self.scale_window == 0: - self.cur_scale *= self.scale_factor - self.cur_iter += 1 - - def state_dict(self): - """Returns the state of the scaler as a :class:`dict`.""" - return dict( - cur_scale=self.cur_scale, - cur_iter=self.cur_iter, - mode=self.mode, - last_overflow_iter=self.last_overflow_iter, - scale_factor=self.scale_factor, - scale_window=self.scale_window) - - def load_state_dict(self, state_dict): - """Loads the loss_scaler state dict. - - Args: - state_dict (dict): scaler state. - """ - self.cur_scale = state_dict['cur_scale'] - self.cur_iter = state_dict['cur_iter'] - self.mode = state_dict['mode'] - self.last_overflow_iter = state_dict['last_overflow_iter'] - self.scale_factor = state_dict['scale_factor'] - self.scale_window = state_dict['scale_window'] - - @property - def loss_scale(self): - return self.cur_scale diff --git a/spaces/w1zrd/MusicGen/tests/common_utils/temp_utils.py b/spaces/w1zrd/MusicGen/tests/common_utils/temp_utils.py deleted file mode 100644 index d1e0367e979c8b9fea65472c373916d956ad5aaa..0000000000000000000000000000000000000000 --- a/spaces/w1zrd/MusicGen/tests/common_utils/temp_utils.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import os -import tempfile - - -class TempDirMixin: - """Mixin to provide easy access to temp dir. - """ - - temp_dir_ = None - - @classmethod - def get_base_temp_dir(cls): - # If AUDIOCRAFT_TEST_DIR is set, use it instead of temporary directory. - # this is handy for debugging. - key = "AUDIOCRAFT_TEST_DIR" - if key in os.environ: - return os.environ[key] - if cls.temp_dir_ is None: - cls.temp_dir_ = tempfile.TemporaryDirectory() - return cls.temp_dir_.name - - @classmethod - def tearDownClass(cls): - if cls.temp_dir_ is not None: - try: - cls.temp_dir_.cleanup() - cls.temp_dir_ = None - except PermissionError: - # On Windows there is a know issue with `shutil.rmtree`, - # which fails intermittenly. - # https://github.com/python/cpython/issues/74168 - # Following the above thread, we ignore it. - pass - super().tearDownClass() - - @property - def id(self): - return self.__class__.__name__ - - def get_temp_path(self, *paths): - temp_dir = os.path.join(self.get_base_temp_dir(), self.id) - path = os.path.join(temp_dir, *paths) - os.makedirs(os.path.dirname(path), exist_ok=True) - return path - - def get_temp_dir(self, *paths): - temp_dir = os.path.join(self.get_base_temp_dir(), self.id) - path = os.path.join(temp_dir, *paths) - os.makedirs(path, exist_ok=True) - return path diff --git a/spaces/w1zrd/MusicGen/tests/models/test_musicgen.py b/spaces/w1zrd/MusicGen/tests/models/test_musicgen.py deleted file mode 100644 index d43cf73763f6c690ab0b277227ac225b286fa143..0000000000000000000000000000000000000000 --- a/spaces/w1zrd/MusicGen/tests/models/test_musicgen.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.models import MusicGen - - -class TestSEANetModel: - def get_musicgen(self): - mg = MusicGen.get_pretrained(name='debug', device='cpu') - mg.set_generation_params(duration=2.0, extend_stride=2.) - return mg - - def test_base(self): - mg = self.get_musicgen() - assert mg.frame_rate == 25 - assert mg.sample_rate == 32000 - assert mg.audio_channels == 1 - - def test_generate_unconditional(self): - mg = self.get_musicgen() - wav = mg.generate_unconditional(3) - assert list(wav.shape) == [3, 1, 64000] - - def test_generate_continuation(self): - mg = self.get_musicgen() - prompt = torch.randn(3, 1, 32000) - wav = mg.generate_continuation(prompt, 32000) - assert list(wav.shape) == [3, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - with pytest.raises(AssertionError): - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort', 'one too many']) - - def test_generate(self): - mg = self.get_musicgen() - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - def test_generate_long(self): - mg = self.get_musicgen() - mg.max_duration = 3. - mg.set_generation_params(duration=4., extend_stride=2.) - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 32000 * 4] diff --git a/spaces/webshop/amazon_shop/predict_help.py b/spaces/webshop/amazon_shop/predict_help.py deleted file mode 100644 index a19e5e791b049431774ce2340a67c1a68694234d..0000000000000000000000000000000000000000 --- a/spaces/webshop/amazon_shop/predict_help.py +++ /dev/null @@ -1,456 +0,0 @@ -from bs4 import BeautifulSoup -from bs4.element import Comment -from enum import Enum -import re, time -from urllib.parse import urlencode - -import json, requests, torch - -class Page(Enum): - DESC = "description" - FEATURES = "features" - ITEM_PAGE = "item_page" - RESULTS = "results" - REVIEWS = "reviews" - SEARCH = "search" - SUB_PAGE = "item_sub_page" - -HEADER_ = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.64 Safari/537.36' -DEBUG_HTML = "temp.html" -NUM_PROD_LIMIT = 10 - -WEBSHOP_URL = "http://3.83.245.205:3000" -WEBSHOP_SESSION = "abc" - -def parse_results_ebay(query, page_num=None, verbose=True): - query_string = '+'.join(query.split()) - page_num = 1 if page_num is None else page_num - url = f'https://www.ebay.com/sch/i.html?_nkw={query_string}&_pgn={page_num}' - if verbose: - print(f"Search Results URL: {url}") - webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) - soup = BeautifulSoup(webpage.text, 'html.parser') - products = soup.select('.s-item__wrapper.clearfix') - - results = [] - for item in products[:NUM_PROD_LIMIT]: - title = item.select_one('.s-item__title').text.strip() - if "shop on ebay" in title.lower(): - # Skip "Shop on ebay" product title - continue - link = item.select_one('.s-item__link')['href'] - asin = link.split("?")[0][len("https://www.ebay.com/itm/"):] - - try: - price = item.select_one('.s-item__price').text - if "to" in price: - prices = price.split(" to ") - price = [p.strip("$") for p in prices] - except: - price = None - - results.append({ - "asin": asin, - "Title": title, - "Price": price - }) - if verbose: - print(f"Scraped {len(results)} products") - return results - - -def parse_item_page_ebay(asin, verbose=True): - product_dict = {} - product_dict["asin"] = asin - - url = f"https://www.ebay.com/itm/{asin}" - if verbose: - print(f"Item Page URL: {url}") - begin = time.time() - webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) - end = time.time() - if verbose: - print(f"Item page scraping took {end-begin} seconds") - soup = BeautifulSoup(webpage.content, "html.parser") - - # Title - try: - product_dict["Title"] = soup.find('h1', {'class': 'x-item-title__mainTitle'}).text.strip() - except: - product_dict["Title"] = "N/A" - - # Price: Get price string, extract decimal numbers from string - try: - price_str = soup.find('div', {'class': 'mainPrice'}).text - prices = re.findall('\d*\.?\d+', price_str) - product_dict["Price"] = prices[0] - except: - product_dict["Price"] = "N/A" - - # Main Image - try: - img_div = soup.find('div', {'id': 'mainImgHldr'}) - img_link = img_div.find('img', {'id': 'icImg'})["src"] - product_dict["MainImage"] = img_link - except: - product_dict["MainImage"] = "" - - # Rating - try: - rating = soup.find('span', {'class': 'reviews-star-rating'})["title"].split()[0] - except: - rating = None - product_dict["Rating"] = rating - - # Options - options, options_to_images = {}, {} # TODO: options_to_images possible? - try: - option_blocks = soup.findAll('select', {'class': 'msku-sel'}) - for block in option_blocks: - name = block["name"].strip().strip(":") - option_tags = block.findAll("option") - opt_list = [] - for option_tag in option_tags: - if "select" not in option_tag.text.lower(): - # Do not include "- select -" (aka `not selected`) choice - opt_list.append(option_tag.text) - options[name] = opt_list - except: - options = {} - product_dict["options"], product_dict["option_to_image"] = options, options_to_images - - # Description - desc = None - try: - # Ebay descriptions are shown in `iframe`s - desc_link = soup.find('iframe', {'id': 'desc_ifr'})["src"] - desc_webpage = requests.get(desc_link, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) - desc_soup = BeautifulSoup(desc_webpage.content, "html.parser") - desc = ' '.join(desc_soup.text.split()) - except: - desc = "N/A" - product_dict["Description"] = desc - - # Features - features = None - try: - features = soup.find('div', {'class': 'x-about-this-item'}).text - except: - features = "N/A" - product_dict["BulletPoints"] = features - - return product_dict - - -def parse_results_ws(query, page_num=None, verbose=True): - query_string = '+'.join(query.split()) - page_num = 1 if page_num is None else page_num - url = ( - f'{WEBSHOP_URL}/search_results/{WEBSHOP_SESSION}/' - f'{query_string}/{page_num}' - ) - if verbose: - print(f"Search Results URL: {url}") - webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) - soup = BeautifulSoup(webpage.content, 'html.parser') - products = soup.findAll('div', {'class': 'list-group-item'}) - - results = [] - for product in products: - asin = product.find('a', {'class': 'product-link'}) - title = product.find('h4', {'class': 'product-title'}) - price = product.find('h5', {'class': 'product-price'}) - - if "\n" in title: - title = title.text.split("\n")[0].strip() - else: - title = title.text.strip().strip("\n") - - if "to" in price.text: - # Parse if price presented as range - prices = price.text.split(" to ") - price = [float(p.strip().strip("\n$")) for p in prices] - else: - price = float(price.text.strip().strip("\n$")) - - results.append({ - "asin": asin.text, - "Title": title, - "Price": price - }) - - if verbose: - print(f"Scraped {len(results)} products") - return results - - -def parse_item_page_ws(asin, query, page_num, options, verbose=True): - product_dict = {} - product_dict["asin"] = asin - - query_string = '+'.join(query.split()) - options_string = json.dumps(options) - url = ( - f'{WEBSHOP_URL}/item_page/{WEBSHOP_SESSION}/' - f'{asin}/{query_string}/{page_num}/{options_string}' - ) - if verbose: - print(f"Item Page URL: {url}") - webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) - soup = BeautifulSoup(webpage.content, 'html.parser') - - # Title, Price, Rating, and MainImage - product_dict["Title"] = soup.find('h2').text - - h4_headers = soup.findAll("h4") - for header in h4_headers: - text = header.text - if "Price" in text: - product_dict["Price"] = text.split(":")[1].strip().strip("$") - elif "Rating" in text: - product_dict["Rating"] = text.split(":")[1].strip() - - product_dict["MainImage"] = soup.find('img')['src'] - - # Options - options, options_to_image = {}, {} - option_blocks = soup.findAll("div", {'class': 'radio-toolbar'}) - for block in option_blocks: - name = block.find("input")["name"] - labels = block.findAll("label") - inputs = block.findAll("input") - opt_list = [] - for label, input in zip(labels, inputs): - opt = label.text - opt_img_path = input["onclick"].split("href=")[1].strip('\';') - opt_img_url = f'{WEBSHOP_URL}{opt_img_path}' - - opt_list.append(opt) - options_to_image[opt] = opt_img_url - options[name] = opt_list - product_dict["options"] = options - product_dict["option_to_image"] = options_to_image - - # Description - url = ( - f'{WEBSHOP_URL}/item_sub_page/{WEBSHOP_SESSION}/' - f'{asin}/{query_string}/{page_num}/Description/{options_string}' - ) - if verbose: - print(f"Item Description URL: {url}") - webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) - soup = BeautifulSoup(webpage.content, 'html.parser') - product_dict["Description"] = soup.find(name="p", attrs={'class': 'product-info'}).text.strip() - - # Features - url = ( - f'{WEBSHOP_URL}/item_sub_page/{WEBSHOP_SESSION}/' - f'{asin}/{query_string}/{page_num}/Features/{options_string}' - ) - if verbose: - print(f"Item Features URL: {url}") - webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) - soup = BeautifulSoup(webpage.content, 'html.parser') - bullets = soup.find(name="ul").findAll(name="li") - product_dict["BulletPoints"] = '\n'.join([b.text.strip() for b in bullets]) - - return product_dict - - -# Query -> Search Result ASINs -def parse_results_amz(query, page_num=None, verbose=True): - url = 'https://www.amazon.com/s?k=' + query.replace(" ", "+") - if page_num is not None: - url += "&page=" + str(page_num) - if verbose: - print(f"Search Results URL: {url}") - webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) - soup = BeautifulSoup(webpage.content, 'html.parser') - products = soup.findAll('div', {'data-component-type': 's-search-result'}) - if products is None: - temp = open(DEBUG_HTML, "w") - temp.write(str(soup)) - temp.close() - raise Exception("Couldn't find search results page, outputted html for inspection") - results = [] - - for product in products[:NUM_PROD_LIMIT]: - asin = product['data-asin'] - title = product.find("h2", {'class': "a-size-mini"}) - price_div = product.find("div", {'class': 's-price-instructions-style'}) - price = price_div.find("span", {'class': 'a-offscreen'}) - - result = { - 'asin': asin, - 'Title': title.text.strip(), - 'Price': price.text.strip().strip("$") - } - results.append(result) - if verbose: - print("Scraped", len(results), "products") - return results - - -# Scrape information of each product -def parse_item_page_amz(asin, verbose=True): - product_dict = {} - product_dict["asin"] = asin - - url = f"https://www.amazon.com/dp/{asin}" - if verbose: - print("Item Page URL:", url) - begin = time.time() - webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) - end = time.time() - if verbose: - print(f"Item page scraping took {end-begin} seconds") - soup = BeautifulSoup(webpage.content, "html.parser") - - # Title - try: - title = soup.find("span", attrs={"id": 'productTitle'}) - title = title.string.strip().replace(',', '') - except AttributeError: - title = "N/A" - product_dict["Title"] = title - - # Price - try: - parent_price_span = soup.find(name="span", class_="apexPriceToPay") - price_span = parent_price_span.find(name="span", class_="a-offscreen") - price = float(price_span.getText().replace("$", "")) - except AttributeError: - price = "N/A" - product_dict["Price"] = price - - # Rating - try: - rating = soup.find(name="span", attrs={"id": "acrPopover"}) - if rating is None: - rating = "N/A" - else: - rating = rating.text - except AttributeError: - rating = "N/A" - product_dict["Rating"] = rating.strip("\n").strip() - - # Features - try: - features = soup.find(name="div", attrs={"id": "feature-bullets"}).text - except AttributeError: - features = "N/A" - product_dict["BulletPoints"] = features - - # Description - try: - desc_body = soup.find(name="div", attrs={"id": "productDescription_feature_div"}) - desc_div = desc_body.find(name="div", attrs={"id": "productDescription"}) - desc_ps = desc_div.findAll(name="p") - desc = " ".join([p.text for p in desc_ps]) - except AttributeError: - desc = "N/A" - product_dict["Description"] = desc.strip() - - # Main Image - try: - imgtag = soup.find("img", {"id":"landingImage"}) - imageurl = dict(imgtag.attrs)["src"] - except AttributeError: - imageurl = "" - product_dict["MainImage"] = imageurl - - # Options - options, options_to_image = {}, {} - try: - option_body = soup.find(name='div', attrs={"id": "softlinesTwister_feature_div"}) - if option_body is None: - option_body = soup.find(name='div', attrs={"id": "twister_feature_div"}) - option_blocks = option_body.findAll(name='ul') - for block in option_blocks: - name = json.loads(block["data-a-button-group"])["name"] - # Options - opt_list = [] - for li in block.findAll("li"): - img = li.find(name="img") - if img is not None: - opt = img["alt"].strip() - opt_img = img["src"] - if len(opt) > 0: - options_to_image[opt] = opt_img - else: - opt = li.text.strip() - if len(opt) > 0: - opt_list.append(opt) - options[name.replace("_name", "").replace("twister_", "")] = opt_list - except AttributeError: - options = {} - product_dict["options"], product_dict["option_to_image"] = options, options_to_image - return product_dict - - -# Get text observation from html -# TODO[john-b-yang]: Similar to web_agent_site/envs/...text_env.py func def, merge? -def convert_html_to_text(html, simple=False, clicked_options=None, visited_asins=None): - def tag_visible(element): - ignore = {'style', 'script', 'head', 'title', 'meta', '[document]'} - return ( - element.parent.name not in ignore and not isinstance(element, Comment) - ) - html_obj = BeautifulSoup(html, 'html.parser') - texts = html_obj.findAll(text=True) - visible_texts = filter(tag_visible, texts) - if simple: - return ' [SEP] '.join(t.strip() for t in visible_texts if t != '\n') - else: - observation = '' - for t in visible_texts: - if t == '\n': continue - if t.parent.name == 'button': # button - processed_t = f'[button] {t} [button]' - elif t.parent.name == 'label': # options - if f'{t}' in clicked_options: - processed_t = f' [clicked button] {t} [clicked button]' - observation = f'You have clicked {t}.\n' + observation - else: - processed_t = f' [button] {t} [button]' - elif t.parent.get('class') == ["product-link"]: # asins - if f'{t}' in visited_asins: - processed_t = f'\n[clicked button] {t} [clicked button]' - else: - processed_t = f'\n[button] {t} [button]' - else: # regular, unclickable text - processed_t = str(t) - observation += processed_t + '\n' - return observation - - -# Get action from dict of values retrieved from html -def convert_dict_to_actions(page_type, products=None, asin=None, page_num=None) -> dict: - info = {"valid": []} - if page_type == Page.RESULTS: - info["valid"] = ['click[back to search]'] - if products is None or page_num is None: - print(page_num) - print(products) - raise Exception('Provide `products`, `page_num` to get `results` valid actions') - # Decide whether to add `next >` as clickable based on # of search results - if len(products) > 10: - info["valid"].append('click[next >]') - # Add `< prev` as clickable if not first page of search results - if page_num > 1: - info["valid"].append('click[< prev]') - for product in products: - info["valid"].append("click[item - " + product["Title"] + "]") - if page_type == Page.ITEM_PAGE: - if products is None or asin is None: - raise Exception('Provide `products` and `asin` to get `item_page` valid actions') - info["valid"] = ['click[back to search]', 'click[< prev]', 'click[description]',\ - 'click[features]', 'click[buy now]'] # To do: reviews - if "options" in products[asin]: - for key, values in products[asin]["options"].items(): - for value in values: - info["valid"].append("click[" + value + "]") - if page_type == Page.SUB_PAGE: - info["valid"] = ['click[back to search]', 'click[< prev]'] - info['image_feat'] = torch.zeros(512) - return info \ No newline at end of file diff --git a/spaces/weiwandaixu/ChatGPT3.5/chatgpt - windows.bat b/spaces/weiwandaixu/ChatGPT3.5/chatgpt - windows.bat deleted file mode 100644 index 0b78fdc3a559abd692e3a9e9af5e482124d13a99..0000000000000000000000000000000000000000 --- a/spaces/weiwandaixu/ChatGPT3.5/chatgpt - windows.bat +++ /dev/null @@ -1,14 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" - -REM The web page can be accessed with delayed start http://127.0.0.1:7860/ -ping -n 5 127.0.0.1>nul - -REM access chargpt via your default browser -start "" "http://127.0.0.1:7860/" - - -echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/). \ No newline at end of file diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/roles/sales.py b/spaces/wffcyrus/MetaGPT-v1/metagpt/roles/sales.py deleted file mode 100644 index 51b13f4878b83fe0bf38c199ffd8efd3b2b7024d..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/roles/sales.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/25 17:21 -@Author : alexanderwu -@File : sales.py -""" -from metagpt.actions import SearchAndSummarize -from metagpt.roles import Role -from metagpt.tools import SearchEngineType - - -class Sales(Role): - def __init__( - self, - name="Xiaomei", - profile="Retail sales guide", - desc="I am a sales guide in retail. My name is Xiaomei. I will answer some customer questions next, and I " - "will answer questions only based on the information in the knowledge base." - "If I feel that you can't get the answer from the reference material, then I will directly reply that" - " I don't know, and I won't tell you that this is from the knowledge base," - "but pretend to be what I know. Note that each of my replies will be replied in the tone of a " - "professional guide", - store=None - ): - super().__init__(name, profile, desc=desc) - self._set_store(store) - - def _set_store(self, store): - if store: - action = SearchAndSummarize("", engine=SearchEngineType.CUSTOM_ENGINE, search_func=store.search) - else: - action = SearchAndSummarize() - self._init_actions([action]) diff --git a/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/learn/__init__.py b/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/learn/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/wliu88/StructDiffusionDemo/src/StructDiffusion/utils/physics_eval.py b/spaces/wliu88/StructDiffusionDemo/src/StructDiffusion/utils/physics_eval.py deleted file mode 100644 index aa14d5f92c6239bf7b86039d5020fa54eb8eae58..0000000000000000000000000000000000000000 --- a/spaces/wliu88/StructDiffusionDemo/src/StructDiffusion/utils/physics_eval.py +++ /dev/null @@ -1,295 +0,0 @@ -import sys -import os -import h5py -import torch -import pytorch3d.transforms as tra3d - -from StructDiffusion.utils.rearrangement import show_pcs_color_order -from StructDiffusion.utils.pointnet import random_point_sample, index_points - - -def switch_stdout(stdout_filename=None): - if stdout_filename: - print("setting stdout to {}".format(stdout_filename)) - if os.path.exists(stdout_filename): - sys.stdout = open(stdout_filename, 'a') - else: - sys.stdout = open(stdout_filename, 'w') - else: - sys.stdout = sys.__stdout__ - - -def visualize_batch_pcs(obj_xyzs, B, N, P, verbose=True, limit_B=None): - if limit_B is None: - limit_B = B - - vis_obj_xyzs = obj_xyzs.reshape(B, N, P, -1) - vis_obj_xyzs = vis_obj_xyzs[:limit_B] - - if type(vis_obj_xyzs).__module__ == torch.__name__: - if vis_obj_xyzs.is_cuda: - vis_obj_xyzs = vis_obj_xyzs.detach().cpu() - vis_obj_xyzs = vis_obj_xyzs.numpy() - - for bi, vis_obj_xyz in enumerate(vis_obj_xyzs): - if verbose: - print("example {}".format(bi)) - print(vis_obj_xyz.shape) - show_pcs_color_order([xyz[:, :3] for xyz in vis_obj_xyz], None, visualize=True, add_coordinate_frame=True, add_table=False) - - -def convert_bool(d): - for k in d: - if type(d[k]) == list: - d[k] = [bool(i) for i in d[k]] - else: - d[k] = bool(d[k]) - return d - - -def save_dict_to_h5(dict_data, filename): - fh = h5py.File(filename, 'w') - for k in dict_data: - key_data = dict_data[k] - if key_data is None: - raise RuntimeError('data was not properly populated') - # if type(key_data) is dict: - # key_data = json.dumps(key_data, sort_keys=True) - try: - fh.create_dataset(k, data=key_data) - except TypeError as e: - print("Failure on key", k) - print(key_data) - print(e) - raise e - fh.close() - - -def move_pc_and_create_scene_new(obj_xyzs, obj_params, struct_pose, current_pc_pose, target_object_inds, device, - return_scene_pts=False, return_scene_pts_and_pc_idxs=False, num_scene_pts=None, normalize_pc=False, - return_pair_pc=False, num_pair_pc_pts=None, normalize_pair_pc=False): - - # obj_xyzs: N, P, 3 - # obj_params: B, N, 6 - # struct_pose: B x N, 4, 4 - # current_pc_pose: B x N, 4, 4 - # target_object_inds: 1, N - - B, N, _ = obj_params.shape - _, P, _ = obj_xyzs.shape - - # B, N, 6 - flat_obj_params = obj_params.reshape(B * N, -1) - goal_pc_pose_in_struct = torch.eye(4).repeat(B * N, 1, 1).to(device) - goal_pc_pose_in_struct[:, :3, :3] = tra3d.euler_angles_to_matrix(flat_obj_params[:, 3:], "XYZ") - goal_pc_pose_in_struct[:, :3, 3] = flat_obj_params[:, :3] # B x N, 4, 4 - - goal_pc_pose = struct_pose @ goal_pc_pose_in_struct - goal_pc_transform = goal_pc_pose @ torch.inverse(current_pc_pose) # cur_batch_size x N, 4, 4 - - # important: pytorch3d uses row-major ordering, need to transpose each transformation matrix - transpose = tra3d.Transform3d(matrix=goal_pc_transform.transpose(1, 2)) - - # obj_xyzs: N, P, 3 - new_obj_xyzs = obj_xyzs.repeat(B, 1, 1) - new_obj_xyzs = transpose.transform_points(new_obj_xyzs) - - # put it back to B, N, P, 3 - new_obj_xyzs = new_obj_xyzs.reshape(B, N, P, -1) - # visualize_batch_pcs(new_obj_xyzs, S, N, P) - - - # initialize the additional outputs - subsampled_scene_xyz = None - subsampled_pc_idxs = None - obj_pair_xyzs = None - - # =================================== - # Pass to discriminator - if return_scene_pts: - - num_indicator = N - - # add one hot - indicator_variables = torch.eye(num_indicator).repeat(B, 1, 1, P).reshape(B, num_indicator, P, num_indicator).to(device) # B, N, P, N - # print(indicator_variables.shape) - # print(new_obj_xyzs.shape) - new_obj_xyzs = torch.cat([new_obj_xyzs, indicator_variables], dim=-1) # B, N, P, 3 + N - - # combine pcs in each scene - scene_xyzs = new_obj_xyzs.reshape(B, N * P, 3 + N) - - # ToDo: maybe convert this to a batch operation - subsampled_scene_xyz = torch.FloatTensor(B, num_scene_pts, 3 + N).to(device) - for si, scene_xyz in enumerate(scene_xyzs): - # scene_xyz: N*P, 3+N - # target_object_inds: 1, N - subsample_idx = torch.randint(0, torch.sum(target_object_inds[0]) * P, (num_scene_pts,)).to(device) - subsampled_scene_xyz[si] = scene_xyz[subsample_idx] - - # # debug: - # print("-"*50) - # if si < 10: - # trimesh.PointCloud(scene_xyz[:, :3].cpu().numpy(), colors=[255, 0, 0, 255]).show() - # trimesh.PointCloud(subsampled_scene_xyz[si, :, :3].cpu().numpy(), colors=[0, 255, 0, 255]).show() - - # subsampled_scene_xyz: B, num_scene_pts, 3+N - # new_obj_xyzs: B, N, P, 3 - # goal_pc_pose: B, N, 4, 4 - - # important: - if normalize_pc: - subsampled_scene_xyz[:, :, 0:3] = pc_normalize_batch(subsampled_scene_xyz[:, :, 0:3]) - - # # debug: - # for si in range(10): - # trimesh.PointCloud(subsampled_scene_xyz[si, :, :3].cpu().numpy(), colors=[0, 0, 255, 255]).show() - - if return_scene_pts_and_pc_idxs: - num_indicator = N - pc_idxs = torch.arange(0, num_indicator)[:, None].repeat(B, 1, P).reshape(B, num_indicator, P).to(device) # B, N, P - # new_obj_xyzs: B, N, P, 3 + 1 - - # combine pcs in each scene - scene_xyzs = new_obj_xyzs.reshape(B, N * P, 3) - pc_idxs = pc_idxs.reshape(B, N*P) - - subsampled_scene_xyz = torch.FloatTensor(B, num_scene_pts, 3).to(device) - subsampled_pc_idxs = torch.LongTensor(B, num_scene_pts).to(device) - for si, (scene_xyz, pc_idx) in enumerate(zip(scene_xyzs, pc_idxs)): - # scene_xyz: N*P, 3+1 - # target_object_inds: 1, N - subsample_idx = torch.randint(0, torch.sum(target_object_inds[0]) * P, (num_scene_pts,)).to(device) - subsampled_scene_xyz[si] = scene_xyz[subsample_idx] - subsampled_pc_idxs[si] = pc_idx[subsample_idx] - - # subsampled_scene_xyz: B, num_scene_pts, 3 - # subsampled_pc_idxs: B, num_scene_pts - # new_obj_xyzs: B, N, P, 3 - # goal_pc_pose: B, N, 4, 4 - - # important: - if normalize_pc: - subsampled_scene_xyz[:, :, 0:3] = pc_normalize_batch(subsampled_scene_xyz[:, :, 0:3]) - - # TODO: visualize each individual object - # debug - # print(subsampled_scene_xyz.shape) - # print(subsampled_pc_idxs.shape) - # print("visualize subsampled scene") - # for si in range(5): - # trimesh.PointCloud(subsampled_scene_xyz[si, :, :3].cpu().numpy(), colors=[0, 0, 255, 255]).show() - - ############################################### - # Create input for pairwise collision detector - if return_pair_pc: - - assert num_pair_pc_pts is not None - - # new_obj_xyzs: B, N, P, 3 + N - # target_object_inds: 1, N - # ignore paddings - num_objs = torch.sum(target_object_inds[0]) - obj_pair_idxs = torch.combinations(torch.arange(num_objs), r=2) # num_comb, 2 - - # use [:, :, :, :3] to get obj_xyzs without object-wise indicator - obj_pair_xyzs = new_obj_xyzs[:, :, :, :3][:, obj_pair_idxs] # B, num_comb, 2 (obj 1 and obj 2), P, 3 - num_comb = obj_pair_xyzs.shape[1] - pair_indicator_variables = torch.eye(2).repeat(B, num_comb, 1, 1, P).reshape(B, num_comb, 2, P, 2).to(device) # B, num_comb, 2, P, 2 - obj_pair_xyzs = torch.cat([obj_pair_xyzs, pair_indicator_variables], dim=-1) # B, num_comb, 2, P, 3 (pc channels) + 2 (indicator for obj 1 and obj 2) - obj_pair_xyzs = obj_pair_xyzs.reshape(B, num_comb, P * 2, 5) - - # random sample: idx = np.random.randint(0, scene_xyz.shape[0], self.num_scene_pts) - obj_pair_xyzs = obj_pair_xyzs.reshape(B * num_comb, P * 2, 5) - # random_point_sample() input dim: B, N, C - rand_idxs = random_point_sample(obj_pair_xyzs, num_pair_pc_pts) # B * num_comb, num_pair_pc_pts - obj_pair_xyzs = index_points(obj_pair_xyzs, rand_idxs) # B * num_comb, num_pair_pc_pts, 5 - - if normalize_pair_pc: - # pc_normalize_batch() input dim: pc: B, num_scene_pts, 3 - # obj_pair_xyzs = obj_pair_xyzs.reshape(B * num_comb, num_pair_pc_pts, 5) - obj_pair_xyzs[:, :, 0:3] = pc_normalize_batch(obj_pair_xyzs[:, :, 0:3]) - obj_pair_xyzs = obj_pair_xyzs.reshape(B, num_comb, num_pair_pc_pts, 5) - - # # debug - # for bi, this_obj_pair_xyzs in enumerate(obj_pair_xyzs): - # print("batch id", bi) - # for pi, obj_pair_xyz in enumerate(this_obj_pair_xyzs): - # print("pair", pi) - # # obj_pair_xyzs: 2 * P, 5 - # print(obj_pair_xyz[:, :3].shape) - # trimesh.PointCloud(obj_pair_xyz[:, :3].cpu()).show() - - # obj_pair_xyzs: B, num_comb, num_pair_pc_pts, 3 + 2 - goal_pc_pose = goal_pc_pose.reshape(B, N, 4, 4) - - return new_obj_xyzs, goal_pc_pose, subsampled_scene_xyz, subsampled_pc_idxs, obj_pair_xyzs - - - -def move_pc(obj_xyzs, obj_params, struct_pose, current_pc_pose, device): - - # obj_xyzs: N, P, 3 - # obj_params: B, N, 6 - # struct_pose: B x N, 4, 4 - # current_pc_pose: B x N, 4, 4 - # target_object_inds: 1, N - - B, N, _ = obj_params.shape - _, P, _ = obj_xyzs.shape - - # B, N, 6 - flat_obj_params = obj_params.reshape(B * N, -1) - goal_pc_pose_in_struct = torch.eye(4).repeat(B * N, 1, 1).to(device) - goal_pc_pose_in_struct[:, :3, :3] = tra3d.euler_angles_to_matrix(flat_obj_params[:, 3:], "XYZ") - goal_pc_pose_in_struct[:, :3, 3] = flat_obj_params[:, :3] # B x N, 4, 4 - - goal_pc_pose = struct_pose @ goal_pc_pose_in_struct - goal_pc_transform = goal_pc_pose @ torch.inverse(current_pc_pose) # cur_batch_size x N, 4, 4 - - # important: pytorch3d uses row-major ordering, need to transpose each transformation matrix - transpose = tra3d.Transform3d(matrix=goal_pc_transform.transpose(1, 2)) - - # obj_xyzs: N, P, 3 - new_obj_xyzs = obj_xyzs.repeat(B, 1, 1) - new_obj_xyzs = transpose.transform_points(new_obj_xyzs) - - # put it back to B, N, P, 3 - new_obj_xyzs = new_obj_xyzs.reshape(B, N, P, -1) - # visualize_batch_pcs(new_obj_xyzs, S, N, P) - - # subsampled_scene_xyz: B, num_scene_pts, 3+N - # new_obj_xyzs: B, N, P, 3 - # goal_pc_pose: B, N, 4, 4 - - goal_pc_pose = goal_pc_pose.reshape(B, N, 4, 4) - return new_obj_xyzs, goal_pc_pose - - -def sample_gaussians(mus, sigmas, sample_size): - # mus: [number of individual gaussians] - # sigmas: [number of individual gaussians] - normal = torch.distributions.Normal(mus, sigmas) - samples = normal.sample((sample_size,)) - # samples: [sample_size, number of individual gaussians] - return samples - -def fit_gaussians(samples, sigma_eps=0.01): - device = samples.device - - # samples: [sample_size, number of individual gaussians] - num_gs = samples.shape[1] - mus = torch.mean(samples, dim=0).to(device) - sigmas = torch.std(samples, dim=0).to(device) + sigma_eps * torch.ones(num_gs).to(device) - # mus: [number of individual gaussians] - # sigmas: [number of individual gaussians] - return mus, sigmas - - -def pc_normalize_batch(pc): - # pc: B, num_scene_pts, 3 - centroid = torch.mean(pc, dim=1) # B, 3 - pc = pc - centroid[:, None, :] - m = torch.max(torch.sqrt(torch.sum(pc ** 2, dim=2)), dim=1)[0] - pc = pc / m[:, None, None] - return pc diff --git a/spaces/wuhuik/bingo/src/components/chat-header.tsx b/spaces/wuhuik/bingo/src/components/chat-header.tsx deleted file mode 100644 index c6664b8dee61179f844d45c5bd650518fc2cb4c2..0000000000000000000000000000000000000000 --- a/spaces/wuhuik/bingo/src/components/chat-header.tsx +++ /dev/null @@ -1,12 +0,0 @@ -import LogoIcon from '@/assets/images/logo.svg' -import Image from 'next/image' - -export function ChatHeader() { - return ( -
              - -
              欢迎使用新必应
              -
              由 AI 支持的网页版 Copilot
              -
              - ) -} diff --git a/spaces/xAbdoAT/kandinsky-community-kandinsky-2-2-decoder/README.md b/spaces/xAbdoAT/kandinsky-community-kandinsky-2-2-decoder/README.md deleted file mode 100644 index 79af32cf2f0ea9237d5b320326086fd17b356a8d..0000000000000000000000000000000000000000 --- a/spaces/xAbdoAT/kandinsky-community-kandinsky-2-2-decoder/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Kandinsky Community Kandinsky 2 2 Decoder -emoji: 💻 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xc9/VITS-Umamusume-voice-synthesizer/text/__init__.py b/spaces/xc9/VITS-Umamusume-voice-synthesizer/text/__init__.py deleted file mode 100644 index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000 --- a/spaces/xc9/VITS-Umamusume-voice-synthesizer/text/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/xcchen/vits-uma-genshin-honkai/text/__init__.py b/spaces/xcchen/vits-uma-genshin-honkai/text/__init__.py deleted file mode 100644 index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000 --- a/spaces/xcchen/vits-uma-genshin-honkai/text/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence, clean_text - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/xswu/HPSv2/configs/controller.sh b/spaces/xswu/HPSv2/configs/controller.sh deleted file mode 100644 index baf5a5fca1a786f4df7f5f49a7211c621d9cc659..0000000000000000000000000000000000000000 --- a/spaces/xswu/HPSv2/configs/controller.sh +++ /dev/null @@ -1,59 +0,0 @@ -exp=${1:-'test'} -gpu=${2:-'1'} -type=${3:-'local'} # choose slurm if you are running on a cluster with slurm scheduler - -if [ "$type" == 'local' ]; then - extra_args=${@:4:99} -else - quotatype=${4:-'auto'} # for slurm - partition=${5:-'1'} # for slurm - extra_args=${@:6:99} - quotatype=spot - partition=YOUR_PARTITION - extra_args=${@:4:99} -fi - -name=${name/#configs/logs} -name=${name//.sh//$exp} -work_dir="${name}" -now=$(date +"%Y%m%d_%H%M%S") -mkdir -p $work_dir - -ncpu='4' - -if [ "$quotatype" == 'reserved_normal' ]; then - quotatype='reserved --phx-priority=${gpu} normal' -fi - -if [ "$type" == 'local' ]; then - - - ava_path=/mnt/afs/xswu/datasets/AVA/images - local_data_path=/mnt/afs/xswu/datasets/preference - local_ava_path=/mnt/afs/xswu/datasets/AVA - local_simulacra_path=/mnt/afs/xswu/datasets/simulacra - local_region_path=/mnt/afs/xswu/datasets/regional_dataset - local_ranking_path=/mnt/afs/xswu/datasets/HPDv2 - local_benchmark_path=/mnt/afs/xswu/datasets/benchmark - local_ImageReward_path=/mnt/afs/xswu/datasets/ImageReward - local_pap_path=/mnt/afs/xswu/datasets/PAP - - header="torchrun --nproc_per_node=${gpu} --nnodes=1 --max_restarts=3 -m src.training.main " - -else - - data_path=s3://preference_images/ - ava_path=s3://AVA/ - simulacra_path=s3://simulacra/ - region_path=/mnt/lustre/wuxiaoshi1.vendor/datasets/regional_dataset/ - local_data_path=/mnt/lustre/wuxiaoshi1.vendor/datasets/human_preference - local_ava_path=/mnt/lustre/wuxiaoshi1.vendor/datasets/AVA - local_simulacra_path=/mnt/lustre/wuxiaoshi1.vendor/datasets/simulacra - local_region_path=/mnt/lustre/wuxiaoshi1.vendor/datasets/regional_dataset - local_ranking_path=/mnt/lustre/wuxiaoshi1.vendor/datasets/ranking_dataset - local_benchmark_path=/mnt/lustre/wuxiaoshi1.vendor/datasets/benchmark - local_ImageReward_path=/mnt/lustre/wuxiaoshi1.vendor/datasets/ImageReward - header="srun --async --partition=$partition -n${gpu} --mpi=pmi2 --gres=gpu:$gpu --ntasks-per-node=${gpu} --quotatype=$quotatype \ - --job-name=$exp --cpus-per-task=$ncpu --kill-on-bad-exit=1 -o local.out python -m src.training.main " - -fi diff --git a/spaces/xuetao/bingo3/src/pages/api/healthz.ts b/spaces/xuetao/bingo3/src/pages/api/healthz.ts deleted file mode 100644 index f6ae44ff0fd66ccd3f7feaa550025fbf2a83bf77..0000000000000000000000000000000000000000 --- a/spaces/xuetao/bingo3/src/pages/api/healthz.ts +++ /dev/null @@ -1,7 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - res.status(200).end('ok') -} diff --git a/spaces/xwsm/gpt/crazy_functions/test_project/cpp/cppipc/queue.h b/spaces/xwsm/gpt/crazy_functions/test_project/cpp/cppipc/queue.h deleted file mode 100644 index a21f3446e06b5826af7b554c8a7d9c5d80848b62..0000000000000000000000000000000000000000 --- a/spaces/xwsm/gpt/crazy_functions/test_project/cpp/cppipc/queue.h +++ /dev/null @@ -1,216 +0,0 @@ -#pragma once - -#include -#include -#include // [[since C++14]]: std::exchange -#include -#include -#include -#include -#include -#include -#include // assert - -#include "libipc/def.h" -#include "libipc/shm.h" -#include "libipc/rw_lock.h" - -#include "libipc/utility/log.h" -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" - -namespace ipc { -namespace detail { - -class queue_conn { -protected: - circ::cc_t connected_ = 0; - shm::handle elems_h_; - - template - Elems* open(char const * name) { - if (name == nullptr || name[0] == '\0') { - ipc::error("fail open waiter: name is empty!\n"); - return nullptr; - } - if (!elems_h_.acquire(name, sizeof(Elems))) { - return nullptr; - } - auto elems = static_cast(elems_h_.get()); - if (elems == nullptr) { - ipc::error("fail acquire elems: %s\n", name); - return nullptr; - } - elems->init(); - return elems; - } - - void close() { - elems_h_.release(); - } - -public: - queue_conn() = default; - queue_conn(const queue_conn&) = delete; - queue_conn& operator=(const queue_conn&) = delete; - - bool connected() const noexcept { - return connected_ != 0; - } - - circ::cc_t connected_id() const noexcept { - return connected_; - } - - template - auto connect(Elems* elems) noexcept - /*needs 'optional' here*/ - -> std::tuple().cursor())> { - if (elems == nullptr) return {}; - // if it's already connected, just return - if (connected()) return {connected(), false, 0}; - connected_ = elems->connect_receiver(); - return {connected(), true, elems->cursor()}; - } - - template - bool disconnect(Elems* elems) noexcept { - if (elems == nullptr) return false; - // if it's already disconnected, just return false - if (!connected()) return false; - elems->disconnect_receiver(std::exchange(connected_, 0)); - return true; - } -}; - -template -class queue_base : public queue_conn { - using base_t = queue_conn; - -public: - using elems_t = Elems; - using policy_t = typename elems_t::policy_t; - -protected: - elems_t * elems_ = nullptr; - decltype(std::declval().cursor()) cursor_ = 0; - bool sender_flag_ = false; - -public: - using base_t::base_t; - - queue_base() = default; - - explicit queue_base(char const * name) - : queue_base{} { - elems_ = open(name); - } - - explicit queue_base(elems_t * elems) noexcept - : queue_base{} { - assert(elems != nullptr); - elems_ = elems; - } - - /* not virtual */ ~queue_base() { - base_t::close(); - } - - elems_t * elems() noexcept { return elems_; } - elems_t const * elems() const noexcept { return elems_; } - - bool ready_sending() noexcept { - if (elems_ == nullptr) return false; - return sender_flag_ || (sender_flag_ = elems_->connect_sender()); - } - - void shut_sending() noexcept { - if (elems_ == nullptr) return; - if (!sender_flag_) return; - elems_->disconnect_sender(); - } - - bool connect() noexcept { - auto tp = base_t::connect(elems_); - if (std::get<0>(tp) && std::get<1>(tp)) { - cursor_ = std::get<2>(tp); - return true; - } - return std::get<0>(tp); - } - - bool disconnect() noexcept { - return base_t::disconnect(elems_); - } - - std::size_t conn_count() const noexcept { - return (elems_ == nullptr) ? static_cast(invalid_value) : elems_->conn_count(); - } - - bool valid() const noexcept { - return elems_ != nullptr; - } - - bool empty() const noexcept { - return !valid() || (cursor_ == elems_->cursor()); - } - - template - bool push(F&& prep, P&&... params) { - if (elems_ == nullptr) return false; - return elems_->push(this, [&](void* p) { - if (prep(p)) ::new (p) T(std::forward

              (params)...); - }); - } - - template - bool force_push(F&& prep, P&&... params) { - if (elems_ == nullptr) return false; - return elems_->force_push(this, [&](void* p) { - if (prep(p)) ::new (p) T(std::forward

              (params)...); - }); - } - - template - bool pop(T& item, F&& out) { - if (elems_ == nullptr) { - return false; - } - return elems_->pop(this, &(this->cursor_), [&item](void* p) { - ::new (&item) T(std::move(*static_cast(p))); - }, std::forward(out)); - } -}; - -} // namespace detail - -template -class queue final : public detail::queue_base> { - using base_t = detail::queue_base>; - -public: - using value_t = T; - - using base_t::base_t; - - template - bool push(P&&... params) { - return base_t::template push(std::forward

              (params)...); - } - - template - bool force_push(P&&... params) { - return base_t::template force_push(std::forward

              (params)...); - } - - bool pop(T& item) { - return base_t::pop(item, [](bool) {}); - } - - template - bool pop(T& item, F&& out) { - return base_t::pop(item, std::forward(out)); - } -}; - -} // namespace ipc diff --git a/spaces/xwsm/gpt/request_llm/bridge_jittorllms_llama.py b/spaces/xwsm/gpt/request_llm/bridge_jittorllms_llama.py deleted file mode 100644 index 6dfac681aeaa11a780304b9e645637cabd677688..0000000000000000000000000000000000000000 --- a/spaces/xwsm/gpt/request_llm/bridge_jittorllms_llama.py +++ /dev/null @@ -1,178 +0,0 @@ - -from transformers import AutoModel, AutoTokenizer -import time -import threading -import importlib -from toolbox import update_ui, get_conf -from multiprocessing import Process, Pipe - -load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" - -################################################################################# -class GetGLMHandle(Process): - def __init__(self): - super().__init__(daemon=True) - self.parent, self.child = Pipe() - self.jittorllms_model = None - self.info = "" - self.local_history = [] - self.success = True - self.check_dependency() - self.start() - self.threadLock = threading.Lock() - - def check_dependency(self): - try: - import pandas - self.info = "依赖检测通过" - self.success = True - except: - from toolbox import trimmed_format_exc - self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ - r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ - r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc() - self.success = False - - def ready(self): - return self.jittorllms_model is not None - - def run(self): - # 子进程执行 - # 第一次运行,加载参数 - def validate_path(): - import os, sys - dir_name = os.path.dirname(__file__) - env = os.environ.get("PATH", "") - os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin') - root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume + '/request_llm/jittorllms') - sys.path.append(root_dir_assume + '/request_llm/jittorllms') - validate_path() # validate path so you can run from base directory - - def load_model(): - import types - try: - if self.jittorllms_model is None: - device, = get_conf('LOCAL_MODEL_DEVICE') - from .jittorllms.models import get_model - # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"] - args_dict = {'model': 'llama'} - print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))') - self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict)) - print('done get model') - except: - self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。') - raise RuntimeError("不能正常加载jittorllms的参数!") - print('load_model') - load_model() - - # 进入任务等待状态 - print('进入任务等待状态') - while True: - # 进入任务等待状态 - kwargs = self.child.recv() - query = kwargs['query'] - history = kwargs['history'] - # 是否重置 - if len(self.local_history) > 0 and len(history)==0: - print('触发重置') - self.jittorllms_model.reset() - self.local_history.append(query) - - print('收到消息,开始请求') - try: - for response in self.jittorllms_model.stream_chat(query, history): - print(response) - self.child.send(response) - except: - from toolbox import trimmed_format_exc - print(trimmed_format_exc()) - self.child.send('[Local Message] Call jittorllms fail.') - # 请求处理结束,开始下一个循环 - self.child.send('[Finish]') - - def stream_chat(self, **kwargs): - # 主进程执行 - self.threadLock.acquire() - self.parent.send(kwargs) - while True: - res = self.parent.recv() - if res != '[Finish]': - yield res - else: - break - self.threadLock.release() - -global llama_glm_handle -llama_glm_handle = None -################################################################################# -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): - """ - 多线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - global llama_glm_handle - if llama_glm_handle is None: - llama_glm_handle = GetGLMHandle() - if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + llama_glm_handle.info - if not llama_glm_handle.success: - error = llama_glm_handle.info - llama_glm_handle = None - raise RuntimeError(error) - - # jittorllms 没有 sys_prompt 接口,因此把prompt加入 history - history_feedin = [] - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 - response = "" - for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - print(response) - if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("程序终止。") - return response - - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 单线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - chatbot.append((inputs, "")) - - global llama_glm_handle - if llama_glm_handle is None: - llama_glm_handle = GetGLMHandle() - chatbot[-1] = (inputs, load_message + "\n\n" + llama_glm_handle.info) - yield from update_ui(chatbot=chatbot, history=[]) - if not llama_glm_handle.success: - llama_glm_handle = None - return - - if additional_fn is not None: - import core_functional - importlib.reload(core_functional) # 热更新prompt - core_functional = core_functional.get_core_functions() - if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) - inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] - - # 处理历史信息 - history_feedin = [] - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - # 开始接收jittorllms的回复 - response = "[Local Message]: 等待jittorllms响应中 ..." - for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - chatbot[-1] = (inputs, response) - yield from update_ui(chatbot=chatbot, history=history) - - # 总结输出 - if response == "[Local Message]: 等待jittorllms响应中 ...": - response = "[Local Message]: jittorllms响应异常 ..." - history.extend([inputs, response]) - yield from update_ui(chatbot=chatbot, history=history) diff --git a/spaces/yaoshining/text-generation-webui/extensions/silero_tts/script.py b/spaces/yaoshining/text-generation-webui/extensions/silero_tts/script.py deleted file mode 100644 index 0d57a90f0748d22b096ac8a5bfa1f62a18fb431c..0000000000000000000000000000000000000000 --- a/spaces/yaoshining/text-generation-webui/extensions/silero_tts/script.py +++ /dev/null @@ -1,181 +0,0 @@ -import time -from pathlib import Path - -import gradio as gr -import torch -from modules import chat, shared - -from extensions.silero_tts import tts_preprocessor - -torch._C._jit_set_profiling_mode(False) - - -params = { - 'activate': True, - 'speaker': 'en_56', - 'language': 'en', - 'model_id': 'v3_en', - 'sample_rate': 48000, - 'device': 'cpu', - 'show_text': False, - 'autoplay': True, - 'voice_pitch': 'medium', - 'voice_speed': 'medium', - 'local_cache_path': '' # User can override the default cache path to something other via settings.json -} - -current_params = params.copy() -voices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115'] -voice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high'] -voice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast'] - -# Used for making text xml compatible, needed for voice pitch and speed control -table = str.maketrans({ - "<": "<", - ">": ">", - "&": "&", - "'": "'", - '"': """, -}) - - -def xmlesc(txt): - return txt.translate(table) - - -def load_model(): - torch_cache_path = torch.hub.get_dir() if params['local_cache_path'] == '' else params['local_cache_path'] - model_path = torch_cache_path + "/snakers4_silero-models_master/src/silero/model/" + params['model_id'] + ".pt" - if Path(model_path).is_file(): - print(f'\nUsing Silero TTS cached checkpoint found at {torch_cache_path}') - model, example_text = torch.hub.load(repo_or_dir=torch_cache_path + '/snakers4_silero-models_master/', model='silero_tts', language=params['language'], speaker=params['model_id'], source='local', path=model_path, force_reload=True) - else: - print(f'\nSilero TTS cache not found at {torch_cache_path}. Attempting to download...') - model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id']) - model.to(params['device']) - return model - - -def remove_tts_from_history(): - for i, entry in enumerate(shared.history['internal']): - shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]] - - -def toggle_text_in_history(): - for i, entry in enumerate(shared.history['visible']): - visible_reply = entry[1] - if visible_reply.startswith('')[0]}\n\n{reply}"] - else: - shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('')[0]}"] - - -def state_modifier(state): - if not params['activate']: - return state - - state['stream'] = False - return state - - -def input_modifier(string): - if not params['activate']: - return string - - shared.processing_message = "*Is recording a voice message...*" - return string - - -def history_modifier(history): - # Remove autoplay from the last reply - if len(history['internal']) > 0: - history['visible'][-1] = [ - history['visible'][-1][0], - history['visible'][-1][1].replace('controls autoplay>', 'controls>') - ] - - return history - - -def output_modifier(string): - global model, current_params, streaming_state - for i in params: - if params[i] != current_params[i]: - model = load_model() - current_params = params.copy() - break - - if not params['activate']: - return string - - original_string = string - string = tts_preprocessor.preprocess(string) - - if string == '': - string = '*Empty reply, try regenerating*' - else: - output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{int(time.time())}.wav') - prosody = ''.format(params['voice_speed'], params['voice_pitch']) - silero_input = f'{prosody}{xmlesc(string)}' - model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file)) - - autoplay = 'autoplay' if params['autoplay'] else '' - string = f'' - if params['show_text']: - string += f'\n\n{original_string}' - - shared.processing_message = "*Is typing...*" - return string - - -def setup(): - global model - model = load_model() - - -def ui(): - # Gradio elements - with gr.Accordion("Silero TTS"): - with gr.Row(): - activate = gr.Checkbox(value=params['activate'], label='Activate TTS') - autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically') - - show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player') - voice = gr.Dropdown(value=params['speaker'], choices=voices_by_gender, label='TTS voice') - with gr.Row(): - v_pitch = gr.Dropdown(value=params['voice_pitch'], choices=voice_pitches, label='Voice pitch') - v_speed = gr.Dropdown(value=params['voice_speed'], choices=voice_speeds, label='Voice speed') - - with gr.Row(): - convert = gr.Button('Permanently replace audios with the message texts') - convert_cancel = gr.Button('Cancel', visible=False) - convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False) - - gr.Markdown('[Click here for Silero audio samples](https://oobabooga.github.io/silero-samples/index.html)') - - # Convert history with confirmation - convert_arr = [convert_confirm, convert, convert_cancel] - convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr) - convert_confirm.click( - lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then( - remove_tts_from_history, None, None).then( - chat.save_history, shared.gradio['mode'], None, show_progress=False).then( - chat.redraw_html, shared.reload_inputs, shared.gradio['display']) - - convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr) - - # Toggle message text in history - show_text.change( - lambda x: params.update({"show_text": x}), show_text, None).then( - toggle_text_in_history, None, None).then( - chat.save_history, shared.gradio['mode'], None, show_progress=False).then( - chat.redraw_html, shared.reload_inputs, shared.gradio['display']) - - # Event functions to update the parameters in the backend - activate.change(lambda x: params.update({"activate": x}), activate, None) - autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None) - voice.change(lambda x: params.update({"speaker": x}), voice, None) - v_pitch.change(lambda x: params.update({"voice_pitch": x}), v_pitch, None) - v_speed.change(lambda x: params.update({"voice_speed": x}), v_speed, None) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/code_llama/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/code_llama/__init__.py deleted file mode 100644 index 8c99c023419bbfa242cf6a5cb39e76abc940b173..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/code_llama/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2023 MetaAI and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available - - -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_code_llama"] = ["CodeLlamaTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_code_llama_fast"] = ["CodeLlamaTokenizerFast"] - -if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_code_llama import CodeLlamaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_code_llama_fast import CodeLlamaTokenizerFast - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt_bigcode/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt_bigcode/__init__.py deleted file mode 100644 index 33660eb81e4faebb7938bbba7ba165a2d7079d81..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt_bigcode/__init__.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING - -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gpt_bigcode"] = [ - "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", - "GPTBigCodeForSequenceClassification", - "GPTBigCodeForTokenClassification", - "GPTBigCodeForCausalLM", - "GPTBigCodeModel", - "GPTBigCodePreTrainedModel", - ] - -if TYPE_CHECKING: - from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gpt_bigcode import ( - GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, - GPTBigCodeForCausalLM, - GPTBigCodeForSequenceClassification, - GPTBigCodeForTokenClassification, - GPTBigCodeModel, - GPTBigCodePreTrainedModel, - ) - - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vdecoder/hifigan/nvSTFT.py b/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vdecoder/hifigan/nvSTFT.py deleted file mode 100644 index 88597d62a505715091f9ba62d38bf0a85a31b95a..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vdecoder/hifigan/nvSTFT.py +++ /dev/null @@ -1,111 +0,0 @@ -import math -import os -os.environ["LRU_CACHE_CAPACITY"] = "3" -import random -import torch -import torch.utils.data -import numpy as np -import librosa -from librosa.util import normalize -from librosa.filters import mel as librosa_mel_fn -from scipy.io.wavfile import read -import soundfile as sf - -def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False): - sampling_rate = None - try: - data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile. - except Exception as ex: - print(f"'{full_path}' failed to load.\nException:") - print(ex) - if return_empty_on_exception: - return [], sampling_rate or target_sr or 32000 - else: - raise Exception(ex) - - if len(data.shape) > 1: - data = data[:, 0] - assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension) - - if np.issubdtype(data.dtype, np.integer): # if audio data is type int - max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX - else: # if audio data is type fp32 - max_mag = max(np.amax(data), -np.amin(data)) - max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32 - - data = torch.FloatTensor(data.astype(np.float32))/max_mag - - if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except - return [], sampling_rate or target_sr or 32000 - if target_sr is not None and sampling_rate != target_sr: - data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr)) - sampling_rate = target_sr - - return data, sampling_rate - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - -class STFT(): - def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5): - self.target_sr = sr - - self.n_mels = n_mels - self.n_fft = n_fft - self.win_size = win_size - self.hop_length = hop_length - self.fmin = fmin - self.fmax = fmax - self.clip_val = clip_val - self.mel_basis = {} - self.hann_window = {} - - def get_mel(self, y, center=False): - sampling_rate = self.target_sr - n_mels = self.n_mels - n_fft = self.n_fft - win_size = self.win_size - hop_length = self.hop_length - fmin = self.fmin - fmax = self.fmax - clip_val = self.clip_val - - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - if fmax not in self.mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax) - self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device) - self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - # print(111,spec) - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - # print(222,spec) - spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec) - # print(333,spec) - spec = dynamic_range_compression_torch(spec, clip_val=clip_val) - # print(444,spec) - return spec - - def __call__(self, audiopath): - audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr) - spect = self.get_mel(audio.unsqueeze(0)).squeeze(0) - return spect - -stft = STFT() diff --git a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/flex-wrap.js b/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/flex-wrap.js deleted file mode 100644 index 8570476166c37feac0ad8f530bc06627e909543a..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/flex-wrap.js +++ /dev/null @@ -1,19 +0,0 @@ -let flexSpec = require('./flex-spec') -let Declaration = require('../declaration') - -class FlexWrap extends Declaration { - /** - * Don't add prefix for 2009 spec - */ - set(decl, prefix) { - let spec = flexSpec(prefix)[0] - if (spec !== 2009) { - return super.set(decl, prefix) - } - return undefined - } -} - -FlexWrap.names = ['flex-wrap'] - -module.exports = FlexWrap diff --git a/spaces/zhang-wei-jian/docker/node_modules/nodemon/lib/rules/index.js b/spaces/zhang-wei-jian/docker/node_modules/nodemon/lib/rules/index.js deleted file mode 100644 index 04aa92f87ef0e915a4e7640ab418cb79a29df8d2..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/nodemon/lib/rules/index.js +++ /dev/null @@ -1,53 +0,0 @@ -'use strict'; -var utils = require('../utils'); -var add = require('./add'); -var parse = require('./parse'); - -// exported -var rules = { ignore: [], watch: [] }; - -/** - * Loads a nodemon config file and populates the ignore - * and watch rules with it's contents, and calls callback - * with the new rules - * - * @param {String} filename - * @param {Function} callback - */ -function load(filename, callback) { - parse(filename, function (err, result) { - if (err) { - // we should have bombed already, but - utils.log.error(err); - callback(err); - } - - if (result.raw) { - result.raw.forEach(add.bind(null, rules, 'ignore')); - } else { - result.ignore.forEach(add.bind(null, rules, 'ignore')); - result.watch.forEach(add.bind(null, rules, 'watch')); - } - - callback(null, rules); - }); -} - -module.exports = { - reset: function () { // just used for testing - rules.ignore.length = rules.watch.length = 0; - delete rules.ignore.re; - delete rules.watch.re; - }, - load: load, - ignore: { - test: add.bind(null, rules, 'ignore'), - add: add.bind(null, rules, 'ignore'), - }, - watch: { - test: add.bind(null, rules, 'watch'), - add: add.bind(null, rules, 'watch'), - }, - add: add.bind(null, rules), - rules: rules, -}; \ No newline at end of file diff --git a/spaces/zhang-wei-jian/docker/node_modules/to-regex-range/README.md b/spaces/zhang-wei-jian/docker/node_modules/to-regex-range/README.md deleted file mode 100644 index 38887dafa1b41ef777de6d6f3658685eb5fd8b2f..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/to-regex-range/README.md +++ /dev/null @@ -1,305 +0,0 @@ -# to-regex-range [![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=W8YFZ425KND68) [![NPM version](https://img.shields.io/npm/v/to-regex-range.svg?style=flat)](https://www.npmjs.com/package/to-regex-range) [![NPM monthly downloads](https://img.shields.io/npm/dm/to-regex-range.svg?style=flat)](https://npmjs.org/package/to-regex-range) [![NPM total downloads](https://img.shields.io/npm/dt/to-regex-range.svg?style=flat)](https://npmjs.org/package/to-regex-range) [![Linux Build Status](https://img.shields.io/travis/micromatch/to-regex-range.svg?style=flat&label=Travis)](https://travis-ci.org/micromatch/to-regex-range) - -> Pass two numbers, get a regex-compatible source string for matching ranges. Validated against more than 2.78 million test assertions. - -Please consider following this project's author, [Jon Schlinkert](https://github.com/jonschlinkert), and consider starring the project to show your :heart: and support. - -## Install - -Install with [npm](https://www.npmjs.com/): - -```sh -$ npm install --save to-regex-range -``` - -

              -What does this do? - -
              - -This libary generates the `source` string to be passed to `new RegExp()` for matching a range of numbers. - -**Example** - -```js -const toRegexRange = require('to-regex-range'); -const regex = new RegExp(toRegexRange('15', '95')); -``` - -A string is returned so that you can do whatever you need with it before passing it to `new RegExp()` (like adding `^` or `$` boundaries, defining flags, or combining it another string). - -
              - -
              - -
              -Why use this library? - -
              - -### Convenience - -Creating regular expressions for matching numbers gets deceptively complicated pretty fast. - -For example, let's say you need a validation regex for matching part of a user-id, postal code, social security number, tax id, etc: - -* regex for matching `1` => `/1/` (easy enough) -* regex for matching `1` through `5` => `/[1-5]/` (not bad...) -* regex for matching `1` or `5` => `/(1|5)/` (still easy...) -* regex for matching `1` through `50` => `/([1-9]|[1-4][0-9]|50)/` (uh-oh...) -* regex for matching `1` through `55` => `/([1-9]|[1-4][0-9]|5[0-5])/` (no prob, I can do this...) -* regex for matching `1` through `555` => `/([1-9]|[1-9][0-9]|[1-4][0-9]{2}|5[0-4][0-9]|55[0-5])/` (maybe not...) -* regex for matching `0001` through `5555` => `/(0{3}[1-9]|0{2}[1-9][0-9]|0[1-9][0-9]{2}|[1-4][0-9]{3}|5[0-4][0-9]{2}|55[0-4][0-9]|555[0-5])/` (okay, I get the point!) - -The numbers are contrived, but they're also really basic. In the real world you might need to generate a regex on-the-fly for validation. - -**Learn more** - -If you're interested in learning more about [character classes](http://www.regular-expressions.info/charclass.html) and other regex features, I personally have always found [regular-expressions.info](http://www.regular-expressions.info/charclass.html) to be pretty useful. - -### Heavily tested - -As of April 07, 2019, this library runs [>1m test assertions](./test/test.js) against generated regex-ranges to provide brute-force verification that results are correct. - -Tests run in ~280ms on my MacBook Pro, 2.5 GHz Intel Core i7. - -### Optimized - -Generated regular expressions are optimized: - -* duplicate sequences and character classes are reduced using quantifiers -* smart enough to use `?` conditionals when number(s) or range(s) can be positive or negative -* uses fragment caching to avoid processing the same exact string more than once - -
              - -
              - -## Usage - -Add this library to your javascript application with the following line of code - -```js -const toRegexRange = require('to-regex-range'); -``` - -The main export is a function that takes two integers: the `min` value and `max` value (formatted as strings or numbers). - -```js -const source = toRegexRange('15', '95'); -//=> 1[5-9]|[2-8][0-9]|9[0-5] - -const regex = new RegExp(`^${source}$`); -console.log(regex.test('14')); //=> false -console.log(regex.test('50')); //=> true -console.log(regex.test('94')); //=> true -console.log(regex.test('96')); //=> false -``` - -## Options - -### options.capture - -**Type**: `boolean` - -**Deafault**: `undefined` - -Wrap the returned value in parentheses when there is more than one regex condition. Useful when you're dynamically generating ranges. - -```js -console.log(toRegexRange('-10', '10')); -//=> -[1-9]|-?10|[0-9] - -console.log(toRegexRange('-10', '10', { capture: true })); -//=> (-[1-9]|-?10|[0-9]) -``` - -### options.shorthand - -**Type**: `boolean` - -**Deafault**: `undefined` - -Use the regex shorthand for `[0-9]`: - -```js -console.log(toRegexRange('0', '999999')); -//=> [0-9]|[1-9][0-9]{1,5} - -console.log(toRegexRange('0', '999999', { shorthand: true })); -//=> \d|[1-9]\d{1,5} -``` - -### options.relaxZeros - -**Type**: `boolean` - -**Default**: `true` - -This option relaxes matching for leading zeros when when ranges are zero-padded. - -```js -const source = toRegexRange('-0010', '0010'); -const regex = new RegExp(`^${source}$`); -console.log(regex.test('-10')); //=> true -console.log(regex.test('-010')); //=> true -console.log(regex.test('-0010')); //=> true -console.log(regex.test('10')); //=> true -console.log(regex.test('010')); //=> true -console.log(regex.test('0010')); //=> true -``` - -When `relaxZeros` is false, matching is strict: - -```js -const source = toRegexRange('-0010', '0010', { relaxZeros: false }); -const regex = new RegExp(`^${source}$`); -console.log(regex.test('-10')); //=> false -console.log(regex.test('-010')); //=> false -console.log(regex.test('-0010')); //=> true -console.log(regex.test('10')); //=> false -console.log(regex.test('010')); //=> false -console.log(regex.test('0010')); //=> true -``` - -## Examples - -| **Range** | **Result** | **Compile time** | -| --- | --- | --- | -| `toRegexRange(-10, 10)` | `-[1-9]\|-?10\|[0-9]` | _132μs_ | -| `toRegexRange(-100, -10)` | `-1[0-9]\|-[2-9][0-9]\|-100` | _50μs_ | -| `toRegexRange(-100, 100)` | `-[1-9]\|-?[1-9][0-9]\|-?100\|[0-9]` | _42μs_ | -| `toRegexRange(001, 100)` | `0{0,2}[1-9]\|0?[1-9][0-9]\|100` | _109μs_ | -| `toRegexRange(001, 555)` | `0{0,2}[1-9]\|0?[1-9][0-9]\|[1-4][0-9]{2}\|5[0-4][0-9]\|55[0-5]` | _51μs_ | -| `toRegexRange(0010, 1000)` | `0{0,2}1[0-9]\|0{0,2}[2-9][0-9]\|0?[1-9][0-9]{2}\|1000` | _31μs_ | -| `toRegexRange(1, 50)` | `[1-9]\|[1-4][0-9]\|50` | _24μs_ | -| `toRegexRange(1, 55)` | `[1-9]\|[1-4][0-9]\|5[0-5]` | _23μs_ | -| `toRegexRange(1, 555)` | `[1-9]\|[1-9][0-9]\|[1-4][0-9]{2}\|5[0-4][0-9]\|55[0-5]` | _30μs_ | -| `toRegexRange(1, 5555)` | `[1-9]\|[1-9][0-9]{1,2}\|[1-4][0-9]{3}\|5[0-4][0-9]{2}\|55[0-4][0-9]\|555[0-5]` | _43μs_ | -| `toRegexRange(111, 555)` | `11[1-9]\|1[2-9][0-9]\|[2-4][0-9]{2}\|5[0-4][0-9]\|55[0-5]` | _38μs_ | -| `toRegexRange(29, 51)` | `29\|[34][0-9]\|5[01]` | _24μs_ | -| `toRegexRange(31, 877)` | `3[1-9]\|[4-9][0-9]\|[1-7][0-9]{2}\|8[0-6][0-9]\|87[0-7]` | _32μs_ | -| `toRegexRange(5, 5)` | `5` | _8μs_ | -| `toRegexRange(5, 6)` | `5\|6` | _11μs_ | -| `toRegexRange(1, 2)` | `1\|2` | _6μs_ | -| `toRegexRange(1, 5)` | `[1-5]` | _15μs_ | -| `toRegexRange(1, 10)` | `[1-9]\|10` | _22μs_ | -| `toRegexRange(1, 100)` | `[1-9]\|[1-9][0-9]\|100` | _25μs_ | -| `toRegexRange(1, 1000)` | `[1-9]\|[1-9][0-9]{1,2}\|1000` | _31μs_ | -| `toRegexRange(1, 10000)` | `[1-9]\|[1-9][0-9]{1,3}\|10000` | _34μs_ | -| `toRegexRange(1, 100000)` | `[1-9]\|[1-9][0-9]{1,4}\|100000` | _36μs_ | -| `toRegexRange(1, 1000000)` | `[1-9]\|[1-9][0-9]{1,5}\|1000000` | _42μs_ | -| `toRegexRange(1, 10000000)` | `[1-9]\|[1-9][0-9]{1,6}\|10000000` | _42μs_ | - -## Heads up! - -**Order of arguments** - -When the `min` is larger than the `max`, values will be flipped to create a valid range: - -```js -toRegexRange('51', '29'); -``` - -Is effectively flipped to: - -```js -toRegexRange('29', '51'); -//=> 29|[3-4][0-9]|5[0-1] -``` - -**Steps / increments** - -This library does not support steps (increments). A pr to add support would be welcome. - -## History - -### v2.0.0 - 2017-04-21 - -**New features** - -Adds support for zero-padding! - -### v1.0.0 - -**Optimizations** - -Repeating ranges are now grouped using quantifiers. rocessing time is roughly the same, but the generated regex is much smaller, which should result in faster matching. - -## Attribution - -Inspired by the python library [range-regex](https://github.com/dimka665/range-regex). - -## About - -
              -Contributing - -Pull requests and stars are always welcome. For bugs and feature requests, [please create an issue](../../issues/new). - -
              - -
              -Running Tests - -Running and reviewing unit tests is a great way to get familiarized with a library and its API. You can install dependencies and run tests with the following command: - -```sh -$ npm install && npm test -``` - -
              - -
              -Building docs - -_(This project's readme.md is generated by [verb](https://github.com/verbose/verb-generate-readme), please don't edit the readme directly. Any changes to the readme must be made in the [.verb.md](.verb.md) readme template.)_ - -To generate the readme, run the following command: - -```sh -$ npm install -g verbose/verb#dev verb-generate-readme && verb -``` - -
              - -### Related projects - -You might also be interested in these projects: - -* [expand-range](https://www.npmjs.com/package/expand-range): Fast, bash-like range expansion. Expand a range of numbers or letters, uppercase or lowercase. Used… [more](https://github.com/jonschlinkert/expand-range) | [homepage](https://github.com/jonschlinkert/expand-range "Fast, bash-like range expansion. Expand a range of numbers or letters, uppercase or lowercase. Used by micromatch.") -* [fill-range](https://www.npmjs.com/package/fill-range): Fill in a range of numbers or letters, optionally passing an increment or `step` to… [more](https://github.com/jonschlinkert/fill-range) | [homepage](https://github.com/jonschlinkert/fill-range "Fill in a range of numbers or letters, optionally passing an increment or `step` to use, or create a regex-compatible range with `options.toRegex`") -* [micromatch](https://www.npmjs.com/package/micromatch): Glob matching for javascript/node.js. A drop-in replacement and faster alternative to minimatch and multimatch. | [homepage](https://github.com/micromatch/micromatch "Glob matching for javascript/node.js. A drop-in replacement and faster alternative to minimatch and multimatch.") -* [repeat-element](https://www.npmjs.com/package/repeat-element): Create an array by repeating the given value n times. | [homepage](https://github.com/jonschlinkert/repeat-element "Create an array by repeating the given value n times.") -* [repeat-string](https://www.npmjs.com/package/repeat-string): Repeat the given string n times. Fastest implementation for repeating a string. | [homepage](https://github.com/jonschlinkert/repeat-string "Repeat the given string n times. Fastest implementation for repeating a string.") - -### Contributors - -| **Commits** | **Contributor** | -| --- | --- | -| 63 | [jonschlinkert](https://github.com/jonschlinkert) | -| 3 | [doowb](https://github.com/doowb) | -| 2 | [realityking](https://github.com/realityking) | - -### Author - -**Jon Schlinkert** - -* [GitHub Profile](https://github.com/jonschlinkert) -* [Twitter Profile](https://twitter.com/jonschlinkert) -* [LinkedIn Profile](https://linkedin.com/in/jonschlinkert) - -Please consider supporting me on Patreon, or [start your own Patreon page](https://patreon.com/invite/bxpbvm)! - - - - - -### License - -Copyright © 2019, [Jon Schlinkert](https://github.com/jonschlinkert). -Released under the [MIT License](LICENSE). - -*** - -_This file was generated by [verb-generate-readme](https://github.com/verbose/verb-generate-readme), v0.8.0, on April 07, 2019._ \ No newline at end of file diff --git a/spaces/zhenwusw/JoJoGAN/e4e/editings/latent_editor.py b/spaces/zhenwusw/JoJoGAN/e4e/editings/latent_editor.py deleted file mode 100644 index 4bebca2f5c86f71b58fa1f30d24bfcb0da06d88f..0000000000000000000000000000000000000000 --- a/spaces/zhenwusw/JoJoGAN/e4e/editings/latent_editor.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch -import sys -sys.path.append(".") -sys.path.append("..") -from editings import ganspace, sefa -from utils.common import tensor2im - - -class LatentEditor(object): - def __init__(self, stylegan_generator, is_cars=False): - self.generator = stylegan_generator - self.is_cars = is_cars # Since the cars StyleGAN output is 384x512, there is a need to crop the 512x512 output. - - def apply_ganspace(self, latent, ganspace_pca, edit_directions): - edit_latents = ganspace.edit(latent, ganspace_pca, edit_directions) - return self._latents_to_image(edit_latents) - - def apply_interfacegan(self, latent, direction, factor=1, factor_range=None): - edit_latents = [] - if factor_range is not None: # Apply a range of editing factors. for example, (-5, 5) - for f in range(*factor_range): - edit_latent = latent + f * direction - edit_latents.append(edit_latent) - edit_latents = torch.cat(edit_latents) - else: - edit_latents = latent + factor * direction - return self._latents_to_image(edit_latents) - - def apply_sefa(self, latent, indices=[2, 3, 4, 5], **kwargs): - edit_latents = sefa.edit(self.generator, latent, indices, **kwargs) - return self._latents_to_image(edit_latents) - - # Currently, in order to apply StyleFlow editings, one should run inference, - # save the latent codes and load them form the official StyleFlow repository. - # def apply_styleflow(self): - # pass - - def _latents_to_image(self, latents): - with torch.no_grad(): - images, _ = self.generator([latents], randomize_noise=False, input_is_latent=True) - if self.is_cars: - images = images[:, :, 64:448, :] # 512x512 -> 384x512 - horizontal_concat_image = torch.cat(list(images), 2) - final_image = tensor2im(horizontal_concat_image) - return final_image diff --git a/spaces/zomehwh/vits-models-ow2/monotonic_align/__init__.py b/spaces/zomehwh/vits-models-ow2/monotonic_align/__init__.py deleted file mode 100644 index e97eecc595dd3bd97d0104ec62799e2e5efea57c..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/vits-models-ow2/monotonic_align/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - - -def maximum_path(neg_cent, mask): - """ numba optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype)

              If you are looking for a professional audio editing and mastering software, you may have heard of Steinberg WaveLab 6.1.1.353. This is a powerful and versatile program that can help you create high-quality audio projects for digital distribution, CD, vinyl, and more.

              -

              FULL Steinberg WaveLab 6.1.1.353 (Cracked By TEAM AiR) [RH]


              DOWNLOADhttps://urlcod.com/2uHyEr



              -

              Steinberg WaveLab 6.1.1.353 is the latest version of WaveLab, a complete mastering solution that has been the number one choice for mastering professionals for over 25 years. It has every tool that you will ever need to cover your workflow from start to finish.

              -

              Some of the features and benefits of Steinberg WaveLab 6.1.1.353 are:

              -
                -
              • It supports 16-bit, 24-bit, and 32-bit audio files with high-quality resampling and dithering.
              • -
              • It supports various sample rates and formats, including WAV, AIFF, MP3, OGG Vorbis, FLAC, AAC, WMA, etc.
              • -
              • It supports LADSPA, LV2, Nyquist, VST, and Audio Unit effect plug-ins.
              • -
              • It has a fast and intuitive user interface with customizable layouts.
              • -
              • It has a plug-in manager that handles plug-in installation and addition/removal of plug-ins.
              • -
              • It has a powerful audio editor that allows you to edit audio files with non-destructive and destructive operations.
              • -
              • It has a comprehensive audio analysis toolset that includes spectrogram view mode, plot spectrum window, level meter, phase scope, etc.
              • -
              • It has a flexible audio montage feature that allows you to create complex audio projects with multiple tracks, clips, regions, crossfades, etc.
              • -
              • It has a batch processor that allows you to apply effects and conversions to multiple files at once.
              • -
              • It has a CD/DVD burner that allows you to create audio CDs and DVDs with CD-Text, ISRC codes, etc.
              • -
              -

              With Steinberg WaveLab 6.1.1.353, you can achieve professional results for your audio projects with ease and efficiency. Whether you are a musician, producer, engineer, or podcaster, you will find this software to be an indispensable tool for your work.

              Why You Should Avoid Downloading Cracked Software

              However, as tempting as it may sound, you should avoid downloading cracked software from the internet. Cracked software is software that has been modified or hacked to bypass the license verification or activation process. It is often distributed by unauthorized sources such as torrent sites, file-sharing platforms, or online forums.

              -

              Downloading cracked software is not only illegal but also risky and unethical. Here are some of the reasons why you should avoid using cracked software:

              -

              Malware and Security Risks

              One of the biggest dangers of using cracked software is malware. Malware is malicious software that can harm your computer or compromise your data. It can include viruses, worms, trojans, spyware, ransomware, adware, etc.

              -

              Cracked software can contain malware that can infect your computer when you download or install it. The malware can then perform various malicious activities such as:

              -
                -
              • Stealing your personal information such as passwords, credit card numbers, bank accounts, etc.
              • -
              • Encrypting your files and demanding a ransom to unlock them.
              • -
              • Downloading more malware or unwanted programs on your computer.
              • -
              • Displaying annoying ads or pop-ups on your screen.
              • -
              • Slowing down your computer or crashing it.
              • -
              • Damaging your hardware or corrupting your system files.
              • -
              -

              You may not even notice that your computer is infected until it is too late. Malware can be hidden or disguised as legitimate files or programs. It can also evade detection by antivirus software or firewalls.

              Legal Problems

              Another reason to avoid using cracked software is legal problems. Cracked software is software that has been obtained or distributed without the permission of the software owner or developer. This is a violation of the software copyright law and can result in serious penalties.

              -

              According to the law, software piracy is a form of theft and can be punished by fines, imprisonment, or both. The penalties may vary depending on the country, the type and amount of software involved, and the intention and extent of the infringement. For example, in the United States, the maximum penalty for software piracy is $150,000 per infringement and up to five years in prison. In some cases, you may also face civil lawsuits from the software owner or developer, who can claim damages for lost revenue, reputation, or goodwill.

              -

              By using cracked software, you are not only breaking the law but also disrespecting the hard work and creativity of the software owner or developer. You are depriving them of their rightful income and incentive to create more quality software. You are also harming the software industry and the economy as a whole.

              Poor Performance

              A final reason to avoid using cracked software is poor performance. Cracked software may not work properly, lack updates, or have missing or corrupted functions. This can affect your productivity and quality of your work.

              -

              Cracked software may not work properly because it has been tampered with or modified by hackers or crackers. It may have bugs, errors, glitches, or compatibility issues that can cause crashes, freezes, or failures. It may also have features or functions that are disabled, removed, or replaced by malware or unwanted programs.

              -

              Cracked software may lack updates because it cannot connect to the official website or server of the software owner or developer. It may not receive security patches, bug fixes, performance improvements, or new features that are regularly released by the software owner or developer. It may also not be compatible with newer versions of operating systems, hardware, or other software.

              -

              Cracked software may have missing or corrupted functions because it has been damaged or altered by malware or unwanted programs. It may have functions that are incomplete, incorrect, or inaccessible. It may also have functions that are replaced by malicious code that can harm your computer or data.

              -

              By using cracked software, you are risking your work quality and efficiency. You are also wasting your time and money on a software that does not deliver what it promises.

              How to Get Steinberg WaveLab 6.1.1.353 Legally and Safely

              Now that you know the dangers and disadvantages of using cracked software, you may be wondering how to get Steinberg WaveLab 6.1.1.353 legally and safely. The answer is simple: you need to purchase and download the software from the official website of Steinberg, and install and activate it with a valid license key.

              -

              Here are the steps that you need to follow to get Steinberg WaveLab 6.1.1.353 legally and safely:

              Purchase and Download

              The first step is to purchase and download the software from the official website of Steinberg. Here is how to do it:

              -
                -
              1. Go to https://www.steinberg.net/en/shop/buy_product/product/wavelab-6.html, which is the product page of Steinberg WaveLab 6.1.1.353.
              2. -
              3. Choose the version of the software that suits your needs and budget. You can choose between WaveLab Pro, WaveLab Elements, or WaveLab LE, depending on the features and functions that you want to use.
              4. -
              5. Add the software to your cart by clicking on the "Add to Cart" button.
              6. -
              7. Review your cart and proceed to checkout by clicking on the "Proceed to Checkout" button.
              8. -
              9. Enter your personal and payment information, and agree to the terms and conditions.
              10. -
              11. Complete your order by clicking on the "Place Order" button.
              12. -
              13. Check your email for a confirmation message that contains your order number, invoice, and download link.
              14. -
              15. Click on the download link and save the installer file on your computer.
              16. -

              Install and Activate

              The second step is to install and activate the software with a valid license key. Here is how to do it:

              -
                -
              1. Run the installer file that you downloaded from the email.
              2. -
              3. Follow the prompts and instructions on the screen to install the software on your computer.
              4. -
              5. Launch the software for the first time.
              6. -
              7. Enter your license key that you received from the email or from your Steinberg account.
              8. -
              9. Click on the "Activate" button to activate the software.
              10. -
              11. Enjoy using Steinberg WaveLab 6.1.1.353 legally and safely!
              12. -

              How to Use Steinberg WaveLab 6.1.1.353 Effectively

              The third step is to use Steinberg WaveLab 6.1.1.353 effectively for your audio editing and mastering projects. The software has many features and tools that can help you achieve professional results with ease and efficiency. Here are some of the main features and tools that you should know how to use:

              Recording Audio

              One of the features of Steinberg WaveLab 6.1.1.353 is recording audio. You can record live audio through a microphone or mixer, or digitize recordings from other media such as cassette tapes, vinyl records, or CDs. Here is how to record audio:

              -
                -
              1. Connect your audio source to your computer's sound card or audio interface.
              2. -
              3. Launch Steinberg WaveLab 6.1.1.353 and create a new audio file by clicking on the "New" button on the toolbar.
              4. -
              5. Select the sample rate, bit depth, and format of your audio file.
              6. -
              7. Select the input device and channel that you want to record from.
              8. -
              9. Adjust the input level and monitor the signal with the level meter.
              10. -
              11. Click on the "Record" button to start recording.
              12. -
              13. Click on the "Stop" button to stop recording.
              14. -
              15. Save your audio file by clicking on the "Save" button on the toolbar.
              16. -

              Editing Audio

              Another feature of Steinberg WaveLab 6.1.1.353 is editing audio. You can edit audio files with cut, copy, paste, delete, undo, redo, and other commands. You can also use non-destructive editing, which means that you can make changes to your audio files without affecting the original data. Here is how to edit audio:

              -
                -
              1. Open an audio file that you want to edit by clicking on the "Open" button on the toolbar.
              2. -
              3. Select the part of the audio file that you want to edit by clicking and dragging on the waveform display.
              4. -
              5. Use the commands on the "Edit" menu or the toolbar to perform editing operations such as cut, copy, paste, delete, etc.
              6. -
              7. Use the commands on the "Undo/Redo" menu or the toolbar to undo or redo your editing actions.
              8. -
              9. Save your edited audio file by clicking on the "Save" button on the toolbar.
              10. -

              Applying Effects

              A third feature of Steinberg WaveLab 6.1.1.353 is applying effects. You can apply effects such as LADSPA, LV2, Nyquist, VST, and Audio Unit plug-ins to your audio files. You can also preview the effects in real-time before applying them. Here is how to apply effects:

              -
                -
              1. Open an audio file that you want to apply effects to by clicking on the "Open" button on the toolbar.
              2. -
              3. Select the part of the audio file that you want to apply effects to by clicking and dragging on the waveform display.
              4. -
              5. Click on the "Effects" menu and choose the effect that you want to use.
              6. -
              7. Adjust the parameters and settings of the effect according to your preference.
              8. -
              9. Click on the "Preview" button to listen to how the effect sounds on your audio file.
              10. -
              11. Click on the "Apply" button to apply the effect to your audio file.
              12. -
              13. Save your audio file with the effect by clicking on the "Save" button on the toolbar.
              14. -

              Analyzing Audio

              A fourth feature of Steinberg WaveLab 6.1.1.353 is analyzing audio. You can use various tools for visualizing and selecting frequencies in your audio files. You can also measure the loudness, peak level, phase, and other properties of your audio files. Here is how to analyze audio:

              -
                -
              1. Open an audio file that you want to analyze by clicking on the "Open" button on the toolbar.
              2. -
              3. Click on the "View" menu and choose the view mode that you want to use. You can choose between waveform, spectrogram, or both.
              4. -
              5. Use the tools on the "Analysis" menu or the toolbar to perform analysis operations such as plot spectrum, level meter, phase scope, etc.
              6. -
              7. Use the mouse cursor or the selection tool to select a specific frequency range or region in your audio file.
              8. -
              9. Use the commands on the "Edit" menu or the toolbar to perform editing operations based on your analysis results such as filter, amplify, normalize, etc.
              10. -
              11. Save your audio file after analyzing and editing it by clicking on the "Save" button on the toolbar.
              12. -

              Exporting Audio

              A fifth feature of Steinberg WaveLab 6.1.1.353 is exporting audio. You can export your audio files in different formats and quality settings for various purposes such as digital distribution, CD burning, or streaming. Here is how to export audio:

              -
                -
              1. Open an audio file that you want to export by clicking on the "Open" button on the toolbar.
              2. -
              3. Select the part of the audio file that you want to export by clicking and dragging on the waveform display.
              4. -
              5. Click on the "File" menu and choose the "Export" option.
              6. -
              7. Select the format and quality settings that you want to use for your exported audio file.
              8. -
              9. Choose a destination folder and a file name for your exported audio file.
              10. -
              11. Click on the "Export" button to start exporting your audio file.
              12. -

              Conclusion

              In conclusion, Steinberg WaveLab 6.1.1.353 is a professional audio editing and mastering software that can help you create high-quality audio projects for various purposes. It has many features and tools that can cover your workflow from start to finish.

              -

              However, you should avoid downloading cracked software from the internet, as it can expose you to malware, security risks, legal problems, and poor performance. Instead, you should purchase and download the software from the official website of Steinberg, and install and activate it with a valid license key.

              -

              By doing so, you can enjoy using Steinberg WaveLab 6.1.1.353 legally and safely, and achieve professional results for your audio projects with ease and efficiency.

              -

              Thank you for reading this article. I hope you found it useful and informative. If you have any questions or comments, please feel free to leave them below.

              FAQs

              Here are some of the frequently asked questions and answers about Steinberg WaveLab 6.1.1.353:

              -
                -
              • Q: How much does Steinberg WaveLab 6.1.1.353 cost?
              • -
              • A: The price of Steinberg WaveLab 6.1.1.353 depends on the version that you choose. The WaveLab Pro version costs $599.99, the WaveLab Elements version costs $99.99, and the WaveLab LE version costs $49.99.
              • -
              • Q: What are the system requirements for Steinberg WaveLab 6.1.1.353?
              • -
              • A: The system requirements for Steinberg WaveLab 6.1.1.353 are as follows:
              • -
                  -
                • Operating system: Windows 7 or higher, Mac OS X 10.11 or higher
                • -
                • Processor: Intel or AMD dual core CPU or higher
                • -
                • Memory: 4 GB RAM or higher
                • -
                • Hard disk space: 4 GB free space or higher
                • -
                • Sound card: ASIO compatible sound card or audio interface
                • -
                • Display: 1024 x 768 resolution or higher
                • -
                • Internet connection: Required for activation, registration, and updates
                • -
                -
              • Q: Where can I find more information or support for Steinberg WaveLab 6.1.1.353?
              • -
              • A: You can find more information or support for Steinberg WaveLab 6.1.1.353 on the official website of Steinberg at https://www.steinberg.net/en/support.html. You can also access the online help, user manual, tutorials, forums, and other resources from within the software.
              • -
              • Q: What are some of the alternatives to Steinberg WaveLab 6.1.1.353?
              • -
              • A: Some of the alternatives to Steinberg WaveLab 6.1.1.353 are Audacity, Adobe Audition, Sound Forge, Reaper, and Pro Tools.
              • -
              • Q: How can I upgrade to a newer version of Steinberg WaveLab?
              • -
              • A: You can upgrade to a newer version of Steinberg WaveLab by purchasing an upgrade license from the official website of Steinberg at https://www.steinberg.net/en/shop/buy_product/product/wavelab-6.html. You can also check for updates from within the software.
              • -